Updated the Chopstix patch to include a user-space memory probe.
[linux-2.6.git] / linux-2.6-590-chopstix-intern.patch
1 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/arch/i386/Kconfig linux-2.6.22-590/arch/i386/Kconfig
2 --- linux-2.6.22-580/arch/i386/Kconfig  2009-02-18 09:56:02.000000000 -0500
3 +++ linux-2.6.22-590/arch/i386/Kconfig  2009-02-18 09:57:23.000000000 -0500
4 @@ -1217,6 +1217,14 @@
5  
6  source "arch/i386/oprofile/Kconfig"
7  
8 +config CHOPSTIX
9 +       bool "Chopstix (PlanetLab)"
10 +       depends on MODULES && OPROFILE
11 +       help
12 +         Chopstix allows you to monitor various events by summarizing them
13 +         in lossy data structures and transferring these data structures
14 +         into user space. If in doubt, say "N".
15 +
16  config KPROBES
17         bool "Kprobes (EXPERIMENTAL)"
18         depends on KALLSYMS && EXPERIMENTAL && MODULES
19 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/arch/i386/kernel/asm-offsets.c linux-2.6.22-590/arch/i386/kernel/asm-offsets.c
20 --- linux-2.6.22-580/arch/i386/kernel/asm-offsets.c     2007-07-08 19:32:17.000000000 -0400
21 +++ linux-2.6.22-590/arch/i386/kernel/asm-offsets.c     2009-02-18 09:57:23.000000000 -0500
22 @@ -9,6 +9,7 @@
23  #include <linux/signal.h>
24  #include <linux/personality.h>
25  #include <linux/suspend.h>
26 +#include <linux/arrays.h>
27  #include <asm/ucontext.h>
28  #include "sigframe.h"
29  #include <asm/pgtable.h>
30 @@ -25,9 +26,19 @@
31  #define OFFSET(sym, str, mem) \
32         DEFINE(sym, offsetof(struct str, mem));
33  
34 +#define STACKOFFSET(sym, str, mem) \
35 +       DEFINE(sym, offsetof(struct str, mem)-sizeof(struct str));
36 +
37  /* workaround for a warning with -Wmissing-prototypes */
38  void foo(void);
39  
40 +struct event_spec {
41 +       unsigned long pc;
42 +       unsigned long dcookie;
43 +       unsigned count;
44 +       unsigned int number;
45 +};
46 +
47  void foo(void)
48  {
49         OFFSET(SIGCONTEXT_eax, sigcontext, eax);
50 @@ -51,7 +62,16 @@
51         OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
52         BLANK();
53  
54 -       OFFSET(TI_task, thread_info, task);
55 +    STACKOFFSET(TASK_thread, task_struct, thread);
56 +    STACKOFFSET(THREAD_esp, thread_struct, esp);
57 +    STACKOFFSET(EVENT_event_data, event, event_data);
58 +    STACKOFFSET(EVENT_task, event, task);
59 +    STACKOFFSET(EVENT_event_type, event, event_data);
60 +    STACKOFFSET(SPEC_number, event_spec, number);
61 +    DEFINE(EVENT_SIZE, sizeof(struct event));
62 +    DEFINE(SPEC_SIZE, sizeof(struct event_spec));
63 +    DEFINE(SPEC_EVENT_SIZE, sizeof(struct event_spec)+sizeof(struct event));
64 +
65         OFFSET(TI_exec_domain, thread_info, exec_domain);
66         OFFSET(TI_flags, thread_info, flags);
67         OFFSET(TI_status, thread_info, status);
68 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/arch/i386/kernel/entry. linux-2.6.22-590/arch/i386/kernel/entry.
69 --- linux-2.6.22-580/arch/i386/kernel/entry.    1969-12-31 19:00:00.000000000 -0500
70 +++ linux-2.6.22-590/arch/i386/kernel/entry.    2009-02-18 09:57:23.000000000 -0500
71 @@ -0,0 +1,1027 @@
72 +/*
73 + *  linux/arch/i386/entry.S
74 + *
75 + *  Copyright (C) 1991, 1992  Linus Torvalds
76 + */
77 +
78 +/*
79 + * entry.S contains the system-call and fault low-level handling routines.
80 + * This also contains the timer-interrupt handler, as well as all interrupts
81 + * and faults that can result in a task-switch.
82 + *
83 + * NOTE: This code handles signal-recognition, which happens every time
84 + * after a timer-interrupt and after each system call.
85 + *
86 + * I changed all the .align's to 4 (16 byte alignment), as that's faster
87 + * on a 486.
88 + *
89 + * Stack layout in 'syscall_exit':
90 + *     ptrace needs to have all regs on the stack.
91 + *     if the order here is changed, it needs to be
92 + *     updated in fork.c:copy_process, signal.c:do_signal,
93 + *     ptrace.c and ptrace.h
94 + *
95 + *      0(%esp) - %ebx
96 + *      4(%esp) - %ecx
97 + *      8(%esp) - %edx
98 + *       C(%esp) - %esi
99 + *     10(%esp) - %edi
100 + *     14(%esp) - %ebp
101 + *     18(%esp) - %eax
102 + *     1C(%esp) - %ds
103 + *     20(%esp) - %es
104 + *     24(%esp) - %fs
105 + *     28(%esp) - orig_eax
106 + *     2C(%esp) - %eip
107 + *     30(%esp) - %cs
108 + *     34(%esp) - %eflags
109 + *     38(%esp) - %oldesp
110 + *     3C(%esp) - %oldss
111 + *
112 + * "current" is in register %ebx during any slow entries.
113 + */
114 +
115 +#include <linux/linkage.h>
116 +#include <asm/thread_info.h>
117 +#include <asm/irqflags.h>
118 +#include <asm/errno.h>
119 +#include <asm/segment.h>
120 +#include <asm/smp.h>
121 +#include <asm/page.h>
122 +#include <asm/desc.h>
123 +#include <asm/percpu.h>
124 +#include <asm/dwarf2.h>
125 +#include "irq_vectors.h"
126 +
127 +/*
128 + * We use macros for low-level operations which need to be overridden
129 + * for paravirtualization.  The following will never clobber any registers:
130 + *   INTERRUPT_RETURN (aka. "iret")
131 + *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
132 + *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
133 + *
134 + * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
135 + * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
136 + * Allowing a register to be clobbered can shrink the paravirt replacement
137 + * enough to patch inline, increasing performance.
138 + */
139 +
140 +#define nr_syscalls ((syscall_table_size)/4)
141 +
142 +CF_MASK                = 0x00000001
143 +TF_MASK                = 0x00000100
144 +IF_MASK                = 0x00000200
145 +DF_MASK                = 0x00000400 
146 +NT_MASK                = 0x00004000
147 +VM_MASK                = 0x00020000
148 +
149 +#ifdef CONFIG_PREEMPT
150 +#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
151 +#else
152 +#define preempt_stop(clobbers)
153 +#define resume_kernel          restore_nocheck
154 +#endif
155 +
156 +.macro TRACE_IRQS_IRET
157 +#ifdef CONFIG_TRACE_IRQFLAGS
158 +       testl $IF_MASK,PT_EFLAGS(%esp)     # interrupts off?
159 +       jz 1f
160 +       TRACE_IRQS_ON
161 +1:
162 +#endif
163 +.endm
164 +
165 +#ifdef CONFIG_VM86
166 +#define resume_userspace_sig   check_userspace
167 +#else
168 +#define resume_userspace_sig   resume_userspace
169 +#endif
170 +
171 +#define SAVE_ALL \
172 +       cld; \
173 +       pushl %fs; \
174 +       CFI_ADJUST_CFA_OFFSET 4;\
175 +       /*CFI_REL_OFFSET fs, 0;*/\
176 +       pushl %es; \
177 +       CFI_ADJUST_CFA_OFFSET 4;\
178 +       /*CFI_REL_OFFSET es, 0;*/\
179 +       pushl %ds; \
180 +       CFI_ADJUST_CFA_OFFSET 4;\
181 +       /*CFI_REL_OFFSET ds, 0;*/\
182 +       pushl %eax; \
183 +       CFI_ADJUST_CFA_OFFSET 4;\
184 +       CFI_REL_OFFSET eax, 0;\
185 +       pushl %ebp; \
186 +       CFI_ADJUST_CFA_OFFSET 4;\
187 +       CFI_REL_OFFSET ebp, 0;\
188 +       pushl %edi; \
189 +       CFI_ADJUST_CFA_OFFSET 4;\
190 +       CFI_REL_OFFSET edi, 0;\
191 +       pushl %esi; \
192 +       CFI_ADJUST_CFA_OFFSET 4;\
193 +       CFI_REL_OFFSET esi, 0;\
194 +       pushl %edx; \
195 +       CFI_ADJUST_CFA_OFFSET 4;\
196 +       CFI_REL_OFFSET edx, 0;\
197 +       pushl %ecx; \
198 +       CFI_ADJUST_CFA_OFFSET 4;\
199 +       CFI_REL_OFFSET ecx, 0;\
200 +       pushl %ebx; \
201 +       CFI_ADJUST_CFA_OFFSET 4;\
202 +       CFI_REL_OFFSET ebx, 0;\
203 +       movl $(__USER_DS), %edx; \
204 +       movl %edx, %ds; \
205 +       movl %edx, %es; \
206 +       movl $(__KERNEL_PERCPU), %edx; \
207 +       movl %edx, %fs
208 +
209 +#define RESTORE_INT_REGS \
210 +       popl %ebx;      \
211 +       CFI_ADJUST_CFA_OFFSET -4;\
212 +       CFI_RESTORE ebx;\
213 +       popl %ecx;      \
214 +       CFI_ADJUST_CFA_OFFSET -4;\
215 +       CFI_RESTORE ecx;\
216 +       popl %edx;      \
217 +       CFI_ADJUST_CFA_OFFSET -4;\
218 +       CFI_RESTORE edx;\
219 +       popl %esi;      \
220 +       CFI_ADJUST_CFA_OFFSET -4;\
221 +       CFI_RESTORE esi;\
222 +       popl %edi;      \
223 +       CFI_ADJUST_CFA_OFFSET -4;\
224 +       CFI_RESTORE edi;\
225 +       popl %ebp;      \
226 +       CFI_ADJUST_CFA_OFFSET -4;\
227 +       CFI_RESTORE ebp;\
228 +       popl %eax;      \
229 +       CFI_ADJUST_CFA_OFFSET -4;\
230 +       CFI_RESTORE eax
231 +
232 +#define RESTORE_REGS   \
233 +       RESTORE_INT_REGS; \
234 +1:     popl %ds;       \
235 +       CFI_ADJUST_CFA_OFFSET -4;\
236 +       /*CFI_RESTORE ds;*/\
237 +2:     popl %es;       \
238 +       CFI_ADJUST_CFA_OFFSET -4;\
239 +       /*CFI_RESTORE es;*/\
240 +3:     popl %fs;       \
241 +       CFI_ADJUST_CFA_OFFSET -4;\
242 +       /*CFI_RESTORE fs;*/\
243 +.pushsection .fixup,"ax";      \
244 +4:     movl $0,(%esp); \
245 +       jmp 1b;         \
246 +5:     movl $0,(%esp); \
247 +       jmp 2b;         \
248 +6:     movl $0,(%esp); \
249 +       jmp 3b;         \
250 +.section __ex_table,"a";\
251 +       .align 4;       \
252 +       .long 1b,4b;    \
253 +       .long 2b,5b;    \
254 +       .long 3b,6b;    \
255 +.popsection
256 +
257 +#define RING0_INT_FRAME \
258 +       CFI_STARTPROC simple;\
259 +       CFI_SIGNAL_FRAME;\
260 +       CFI_DEF_CFA esp, 3*4;\
261 +       /*CFI_OFFSET cs, -2*4;*/\
262 +       CFI_OFFSET eip, -3*4
263 +
264 +#define RING0_EC_FRAME \
265 +       CFI_STARTPROC simple;\
266 +       CFI_SIGNAL_FRAME;\
267 +       CFI_DEF_CFA esp, 4*4;\
268 +       /*CFI_OFFSET cs, -2*4;*/\
269 +       CFI_OFFSET eip, -3*4
270 +
271 +#define RING0_PTREGS_FRAME \
272 +       CFI_STARTPROC simple;\
273 +       CFI_SIGNAL_FRAME;\
274 +       CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
275 +       /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
276 +       CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
277 +       /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
278 +       /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
279 +       CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
280 +       CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
281 +       CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
282 +       CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
283 +       CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
284 +       CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
285 +       CFI_OFFSET ebx, PT_EBX-PT_OLDESP
286 +
287 +ENTRY(ret_from_fork)
288 +       CFI_STARTPROC
289 +       pushl %eax
290 +       CFI_ADJUST_CFA_OFFSET 4
291 +       call schedule_tail
292 +       GET_THREAD_INFO(%ebp)
293 +       popl %eax
294 +       CFI_ADJUST_CFA_OFFSET -4
295 +       pushl $0x0202                   # Reset kernel eflags
296 +       CFI_ADJUST_CFA_OFFSET 4
297 +       popfl
298 +       CFI_ADJUST_CFA_OFFSET -4
299 +       jmp syscall_exit
300 +       CFI_ENDPROC
301 +END(ret_from_fork)
302 +
303 +/*
304 + * Return to user mode is not as complex as all this looks,
305 + * but we want the default path for a system call return to
306 + * go as quickly as possible which is why some of this is
307 + * less clear than it otherwise should be.
308 + */
309 +
310 +       # userspace resumption stub bypassing syscall exit tracing
311 +       ALIGN
312 +       RING0_PTREGS_FRAME
313 +ret_from_exception:
314 +       preempt_stop(CLBR_ANY)
315 +ret_from_intr:
316 +       GET_THREAD_INFO(%ebp)
317 +check_userspace:
318 +       movl PT_EFLAGS(%esp), %eax      # mix EFLAGS and CS
319 +       movb PT_CS(%esp), %al
320 +       andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
321 +       cmpl $USER_RPL, %eax
322 +       jb resume_kernel                # not returning to v8086 or userspace
323 +
324 +ENTRY(resume_userspace)
325 +       DISABLE_INTERRUPTS(CLBR_ANY)    # make sure we don't miss an interrupt
326 +                                       # setting need_resched or sigpending
327 +                                       # between sampling and the iret
328 +       movl TI_flags(%ebp), %ecx
329 +       andl $_TIF_WORK_MASK, %ecx      # is there any work to be done on
330 +                                       # int/exception return?
331 +       jne work_pending
332 +       jmp restore_all
333 +END(ret_from_exception)
334 +
335 +#ifdef CONFIG_PREEMPT
336 +ENTRY(resume_kernel)
337 +       DISABLE_INTERRUPTS(CLBR_ANY)
338 +       cmpl $0,TI_preempt_count(%ebp)  # non-zero preempt_count ?
339 +       jnz restore_nocheck
340 +need_resched:
341 +       movl TI_flags(%ebp), %ecx       # need_resched set ?
342 +       testb $_TIF_NEED_RESCHED, %cl
343 +       jz restore_all
344 +       testl $IF_MASK,PT_EFLAGS(%esp)  # interrupts off (exception path) ?
345 +       jz restore_all
346 +       call preempt_schedule_irq
347 +       jmp need_resched
348 +END(resume_kernel)
349 +#endif
350 +       CFI_ENDPROC
351 +
352 +/* SYSENTER_RETURN points to after the "sysenter" instruction in
353 +   the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */
354 +
355 +       # sysenter call handler stub
356 +ENTRY(sysenter_entry)
357 +       CFI_STARTPROC simple
358 +       CFI_SIGNAL_FRAME
359 +       CFI_DEF_CFA esp, 0
360 +       CFI_REGISTER esp, ebp
361 +       movl TSS_sysenter_esp0(%esp),%esp
362 +sysenter_past_esp:
363 +       /*
364 +        * No need to follow this irqs on/off section: the syscall
365 +        * disabled irqs and here we enable it straight after entry:
366 +        */
367 +       ENABLE_INTERRUPTS(CLBR_NONE)
368 +       pushl $(__USER_DS)
369 +       CFI_ADJUST_CFA_OFFSET 4
370 +       /*CFI_REL_OFFSET ss, 0*/
371 +       pushl %ebp
372 +       CFI_ADJUST_CFA_OFFSET 4
373 +       CFI_REL_OFFSET esp, 0
374 +       pushfl
375 +       CFI_ADJUST_CFA_OFFSET 4
376 +       pushl $(__USER_CS)
377 +       CFI_ADJUST_CFA_OFFSET 4
378 +       /*CFI_REL_OFFSET cs, 0*/
379 +       /*
380 +        * Push current_thread_info()->sysenter_return to the stack.
381 +        * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
382 +        * pushed above; +8 corresponds to copy_thread's esp0 setting.
383 +        */
384 +       pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
385 +       CFI_ADJUST_CFA_OFFSET 4
386 +       CFI_REL_OFFSET eip, 0
387 +
388 +/*
389 + * Load the potential sixth argument from user stack.
390 + * Careful about security.
391 + */
392 +       cmpl $__PAGE_OFFSET-3,%ebp
393 +       jae syscall_fault
394 +1:     movl (%ebp),%ebp
395 +.section __ex_table,"a"
396 +       .align 4
397 +       .long 1b,syscall_fault
398 +.previous
399 +
400 +       pushl %eax
401 +       CFI_ADJUST_CFA_OFFSET 4
402 +       SAVE_ALL
403 +       GET_THREAD_INFO(%ebp)
404 +
405 +       /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
406 +       testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
407 +       jnz syscall_trace_entry
408 +       cmpl $(nr_syscalls), %eax
409 +       jae syscall_badsys
410 +       call *sys_call_table(,%eax,4)
411 +       movl %eax,PT_EAX(%esp)
412 +       DISABLE_INTERRUPTS(CLBR_ANY)
413 +       TRACE_IRQS_OFF
414 +       movl TI_flags(%ebp), %ecx
415 +       testw $_TIF_ALLWORK_MASK, %cx
416 +       jne syscall_exit_work
417 +/* if something modifies registers it must also disable sysexit */
418 +       movl PT_EIP(%esp), %edx
419 +       movl PT_OLDESP(%esp), %ecx
420 +       xorl %ebp,%ebp
421 +       TRACE_IRQS_ON
422 +1:     mov  PT_FS(%esp), %fs
423 +       ENABLE_INTERRUPTS_SYSEXIT
424 +       CFI_ENDPROC
425 +.pushsection .fixup,"ax"
426 +2:     movl $0,PT_FS(%esp)
427 +       jmp 1b
428 +.section __ex_table,"a"
429 +       .align 4
430 +       .long 1b,2b
431 +.popsection
432 +ENDPROC(sysenter_entry)
433 +
434 +       # system call handler stub
435 +ENTRY(system_call)
436 +       RING0_INT_FRAME                 # can't unwind into user space anyway
437 +       pushl %eax                      # save orig_eax
438 +       CFI_ADJUST_CFA_OFFSET 4
439 +       SAVE_ALL
440 +       GET_THREAD_INFO(%ebp)
441 +                                       # system call tracing in operation / emulation
442 +       /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
443 +       testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
444 +       jnz syscall_trace_entry
445 +       cmpl $(nr_syscalls), %eax
446 +       jae syscall_badsys
447 +syscall_call:
448 +       call *sys_call_table(,%eax,4)
449 +       movl %eax,PT_EAX(%esp)          # store the return value
450 +syscall_exit:
451 +       DISABLE_INTERRUPTS(CLBR_ANY)    # make sure we don't miss an interrupt
452 +                                       # setting need_resched or sigpending
453 +                                       # between sampling and the iret
454 +       TRACE_IRQS_OFF
455 +       testl $TF_MASK,PT_EFLAGS(%esp)  # If tracing set singlestep flag on exit
456 +       jz no_singlestep
457 +       orl $_TIF_SINGLESTEP,TI_flags(%ebp)
458 +no_singlestep:
459 +       movl TI_flags(%ebp), %ecx
460 +       testw $_TIF_ALLWORK_MASK, %cx   # current->work
461 +       jne syscall_exit_work
462 +
463 +restore_all:
464 +       movl PT_EFLAGS(%esp), %eax      # mix EFLAGS, SS and CS
465 +       # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
466 +       # are returning to the kernel.
467 +       # See comments in process.c:copy_thread() for details.
468 +       movb PT_OLDSS(%esp), %ah
469 +       movb PT_CS(%esp), %al
470 +       andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
471 +       cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
472 +       CFI_REMEMBER_STATE
473 +       je ldt_ss                       # returning to user-space with LDT SS
474 +restore_nocheck:
475 +       TRACE_IRQS_IRET
476 +restore_nocheck_notrace:
477 +       RESTORE_REGS
478 +       addl $4, %esp                   # skip orig_eax/error_code
479 +       CFI_ADJUST_CFA_OFFSET -4
480 +1:     INTERRUPT_RETURN
481 +.section .fixup,"ax"
482 +iret_exc:
483 +       pushl $0                        # no error code
484 +       pushl $do_iret_error
485 +       jmp error_code
486 +.previous
487 +.section __ex_table,"a"
488 +       .align 4
489 +       .long 1b,iret_exc
490 +.previous
491 +
492 +       CFI_RESTORE_STATE
493 +ldt_ss:
494 +       larl PT_OLDSS(%esp), %eax
495 +       jnz restore_nocheck
496 +       testl $0x00400000, %eax         # returning to 32bit stack?
497 +       jnz restore_nocheck             # allright, normal return
498 +
499 +#ifdef CONFIG_PARAVIRT
500 +       /*
501 +        * The kernel can't run on a non-flat stack if paravirt mode
502 +        * is active.  Rather than try to fixup the high bits of
503 +        * ESP, bypass this code entirely.  This may break DOSemu
504 +        * and/or Wine support in a paravirt VM, although the option
505 +        * is still available to implement the setting of the high
506 +        * 16-bits in the INTERRUPT_RETURN paravirt-op.
507 +        */
508 +       cmpl $0, paravirt_ops+PARAVIRT_enabled
509 +       jne restore_nocheck
510 +#endif
511 +
512 +       /* If returning to userspace with 16bit stack,
513 +        * try to fix the higher word of ESP, as the CPU
514 +        * won't restore it.
515 +        * This is an "official" bug of all the x86-compatible
516 +        * CPUs, which we can try to work around to make
517 +        * dosemu and wine happy. */
518 +       movl PT_OLDESP(%esp), %eax
519 +       movl %esp, %edx
520 +       call patch_espfix_desc
521 +       pushl $__ESPFIX_SS
522 +       CFI_ADJUST_CFA_OFFSET 4
523 +       pushl %eax
524 +       CFI_ADJUST_CFA_OFFSET 4
525 +       DISABLE_INTERRUPTS(CLBR_EAX)
526 +       TRACE_IRQS_OFF
527 +       lss (%esp), %esp
528 +       CFI_ADJUST_CFA_OFFSET -8
529 +       jmp restore_nocheck
530 +       CFI_ENDPROC
531 +ENDPROC(system_call)
532 +
533 +       # perform work that needs to be done immediately before resumption
534 +       ALIGN
535 +       RING0_PTREGS_FRAME              # can't unwind into user space anyway
536 +work_pending:
537 +       testb $_TIF_NEED_RESCHED, %cl
538 +       jz work_notifysig
539 +work_resched:
540 +       call schedule
541 +       DISABLE_INTERRUPTS(CLBR_ANY)    # make sure we don't miss an interrupt
542 +                                       # setting need_resched or sigpending
543 +                                       # between sampling and the iret
544 +       TRACE_IRQS_OFF
545 +       movl TI_flags(%ebp), %ecx
546 +       andl $_TIF_WORK_MASK, %ecx      # is there any work to be done other
547 +                                       # than syscall tracing?
548 +       jz restore_all
549 +       testb $_TIF_NEED_RESCHED, %cl
550 +       jnz work_resched
551 +
552 +work_notifysig:                                # deal with pending signals and
553 +                                       # notify-resume requests
554 +#ifdef CONFIG_VM86
555 +       testl $VM_MASK, PT_EFLAGS(%esp)
556 +       movl %esp, %eax
557 +       jne work_notifysig_v86          # returning to kernel-space or
558 +                                       # vm86-space
559 +       xorl %edx, %edx
560 +       call do_notify_resume
561 +       jmp resume_userspace_sig
562 +
563 +       ALIGN
564 +work_notifysig_v86:
565 +       pushl %ecx                      # save ti_flags for do_notify_resume
566 +       CFI_ADJUST_CFA_OFFSET 4
567 +       call save_v86_state             # %eax contains pt_regs pointer
568 +       popl %ecx
569 +       CFI_ADJUST_CFA_OFFSET -4
570 +       movl %eax, %esp
571 +#else
572 +       movl %esp, %eax
573 +#endif
574 +       xorl %edx, %edx
575 +       call do_notify_resume
576 +       jmp resume_userspace_sig
577 +END(work_pending)
578 +
579 +       # perform syscall exit tracing
580 +       ALIGN
581 +syscall_trace_entry:
582 +       movl $-ENOSYS,PT_EAX(%esp)
583 +       movl %esp, %eax
584 +       xorl %edx,%edx
585 +       call do_syscall_trace
586 +       cmpl $0, %eax
587 +       jne resume_userspace            # ret != 0 -> running under PTRACE_SYSEMU,
588 +                                       # so must skip actual syscall
589 +       movl PT_ORIG_EAX(%esp), %eax
590 +       cmpl $(nr_syscalls), %eax
591 +       jnae syscall_call
592 +       jmp syscall_exit
593 +END(syscall_trace_entry)
594 +
595 +       # perform syscall exit tracing
596 +       ALIGN
597 +syscall_exit_work:
598 +       testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
599 +       jz work_pending
600 +       TRACE_IRQS_ON
601 +       ENABLE_INTERRUPTS(CLBR_ANY)     # could let do_syscall_trace() call
602 +                                       # schedule() instead
603 +       movl %esp, %eax
604 +       movl $1, %edx
605 +       call do_syscall_trace
606 +       jmp resume_userspace
607 +END(syscall_exit_work)
608 +       CFI_ENDPROC
609 +
610 +       RING0_INT_FRAME                 # can't unwind into user space anyway
611 +syscall_fault:
612 +       pushl %eax                      # save orig_eax
613 +       CFI_ADJUST_CFA_OFFSET 4
614 +       SAVE_ALL
615 +       GET_THREAD_INFO(%ebp)
616 +       movl $-EFAULT,PT_EAX(%esp)
617 +       jmp resume_userspace
618 +END(syscall_fault)
619 +
620 +syscall_badsys:
621 +       movl $-ENOSYS,PT_EAX(%esp)
622 +       jmp resume_userspace
623 +END(syscall_badsys)
624 +       CFI_ENDPROC
625 +
626 +#define FIXUP_ESPFIX_STACK \
627 +       /* since we are on a wrong stack, we cant make it a C code :( */ \
628 +       PER_CPU(gdt_page, %ebx); \
629 +       GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
630 +       addl %esp, %eax; \
631 +       pushl $__KERNEL_DS; \
632 +       CFI_ADJUST_CFA_OFFSET 4; \
633 +       pushl %eax; \
634 +       CFI_ADJUST_CFA_OFFSET 4; \
635 +       lss (%esp), %esp; \
636 +       CFI_ADJUST_CFA_OFFSET -8;
637 +#define UNWIND_ESPFIX_STACK \
638 +       movl %ss, %eax; \
639 +       /* see if on espfix stack */ \
640 +       cmpw $__ESPFIX_SS, %ax; \
641 +       jne 27f; \
642 +       movl $__KERNEL_DS, %eax; \
643 +       movl %eax, %ds; \
644 +       movl %eax, %es; \
645 +       /* switch to normal stack */ \
646 +       FIXUP_ESPFIX_STACK; \
647 +27:;
648 +
649 +/*
650 + * Build the entry stubs and pointer table with
651 + * some assembler magic.
652 + */
653 +.data
654 +ENTRY(interrupt)
655 +.text
656 +
657 +ENTRY(irq_entries_start)
658 +       RING0_INT_FRAME
659 +vector=0
660 +.rept NR_IRQS
661 +       ALIGN
662 + .if vector
663 +       CFI_ADJUST_CFA_OFFSET -4
664 + .endif
665 +1:     pushl $~(vector)
666 +       CFI_ADJUST_CFA_OFFSET 4
667 +       jmp common_interrupt
668 + .previous
669 +       .long 1b
670 + .text
671 +vector=vector+1
672 +.endr
673 +END(irq_entries_start)
674 +
675 +.previous
676 +END(interrupt)
677 +.previous
678 +
679 +/*
680 + * the CPU automatically disables interrupts when executing an IRQ vector,
681 + * so IRQ-flags tracing has to follow that:
682 + */
683 +       ALIGN
684 +common_interrupt:
685 +       SAVE_ALL
686 +       TRACE_IRQS_OFF
687 +       movl %esp,%eax
688 +       call do_IRQ
689 +       jmp ret_from_intr
690 +ENDPROC(common_interrupt)
691 +       CFI_ENDPROC
692 +
693 +#define BUILD_INTERRUPT(name, nr)      \
694 +ENTRY(name)                            \
695 +       RING0_INT_FRAME;                \
696 +       pushl $~(nr);                   \
697 +       CFI_ADJUST_CFA_OFFSET 4;        \
698 +       SAVE_ALL;                       \
699 +       TRACE_IRQS_OFF                  \
700 +       movl %esp,%eax;                 \
701 +       call smp_##name;                \
702 +       jmp ret_from_intr;              \
703 +       CFI_ENDPROC;                    \
704 +ENDPROC(name)
705 +
706 +/* The include is where all of the SMP etc. interrupts come from */
707 +#include "entry_arch.h"
708 +
709 +KPROBE_ENTRY(page_fault)
710 +       RING0_EC_FRAME
711 +       pushl $do_page_fault
712 +       CFI_ADJUST_CFA_OFFSET 4
713 +       ALIGN
714 +error_code:
715 +       /* the function address is in %fs's slot on the stack */
716 +       pushl %es
717 +       CFI_ADJUST_CFA_OFFSET 4
718 +       /*CFI_REL_OFFSET es, 0*/
719 +       pushl %ds
720 +       CFI_ADJUST_CFA_OFFSET 4
721 +       /*CFI_REL_OFFSET ds, 0*/
722 +       pushl %eax
723 +       CFI_ADJUST_CFA_OFFSET 4
724 +       CFI_REL_OFFSET eax, 0
725 +       pushl %ebp
726 +       CFI_ADJUST_CFA_OFFSET 4
727 +       CFI_REL_OFFSET ebp, 0
728 +       pushl %edi
729 +       CFI_ADJUST_CFA_OFFSET 4
730 +       CFI_REL_OFFSET edi, 0
731 +       pushl %esi
732 +       CFI_ADJUST_CFA_OFFSET 4
733 +       CFI_REL_OFFSET esi, 0
734 +       pushl %edx
735 +       CFI_ADJUST_CFA_OFFSET 4
736 +       CFI_REL_OFFSET edx, 0
737 +       pushl %ecx
738 +       CFI_ADJUST_CFA_OFFSET 4
739 +       CFI_REL_OFFSET ecx, 0
740 +       pushl %ebx
741 +       CFI_ADJUST_CFA_OFFSET 4
742 +       CFI_REL_OFFSET ebx, 0
743 +       cld
744 +       pushl %fs
745 +       CFI_ADJUST_CFA_OFFSET 4
746 +       /*CFI_REL_OFFSET fs, 0*/
747 +       movl $(__KERNEL_PERCPU), %ecx
748 +       movl %ecx, %fs
749 +       UNWIND_ESPFIX_STACK
750 +       popl %ecx
751 +       CFI_ADJUST_CFA_OFFSET -4
752 +       /*CFI_REGISTER es, ecx*/
753 +       movl PT_FS(%esp), %edi          # get the function address
754 +       movl PT_ORIG_EAX(%esp), %edx    # get the error code
755 +       movl $-1, PT_ORIG_EAX(%esp)     # no syscall to restart
756 +       mov  %ecx, PT_FS(%esp)
757 +       /*CFI_REL_OFFSET fs, ES*/
758 +       movl $(__USER_DS), %ecx
759 +       movl %ecx, %ds
760 +       movl %ecx, %es
761 +       movl %esp,%eax                  # pt_regs pointer
762 +       call *%edi
763 +       jmp ret_from_exception
764 +       CFI_ENDPROC
765 +KPROBE_END(page_fault)
766 +
767 +ENTRY(coprocessor_error)
768 +       RING0_INT_FRAME
769 +       pushl $0
770 +       CFI_ADJUST_CFA_OFFSET 4
771 +       pushl $do_coprocessor_error
772 +       CFI_ADJUST_CFA_OFFSET 4
773 +       jmp error_code
774 +       CFI_ENDPROC
775 +END(coprocessor_error)
776 +
777 +ENTRY(simd_coprocessor_error)
778 +       RING0_INT_FRAME
779 +       pushl $0
780 +       CFI_ADJUST_CFA_OFFSET 4
781 +       pushl $do_simd_coprocessor_error
782 +       CFI_ADJUST_CFA_OFFSET 4
783 +       jmp error_code
784 +       CFI_ENDPROC
785 +END(simd_coprocessor_error)
786 +
787 +ENTRY(device_not_available)
788 +       RING0_INT_FRAME
789 +       pushl $-1                       # mark this as an int
790 +       CFI_ADJUST_CFA_OFFSET 4
791 +       SAVE_ALL
792 +       GET_CR0_INTO_EAX
793 +       testl $0x4, %eax                # EM (math emulation bit)
794 +       jne device_not_available_emulate
795 +       preempt_stop(CLBR_ANY)
796 +       call math_state_restore
797 +       jmp ret_from_exception
798 +device_not_available_emulate:
799 +       pushl $0                        # temporary storage for ORIG_EIP
800 +       CFI_ADJUST_CFA_OFFSET 4
801 +       call math_emulate
802 +       addl $4, %esp
803 +       CFI_ADJUST_CFA_OFFSET -4
804 +       jmp ret_from_exception
805 +       CFI_ENDPROC
806 +END(device_not_available)
807 +
808 +/*
809 + * Debug traps and NMI can happen at the one SYSENTER instruction
810 + * that sets up the real kernel stack. Check here, since we can't
811 + * allow the wrong stack to be used.
812 + *
813 + * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
814 + * already pushed 3 words if it hits on the sysenter instruction:
815 + * eflags, cs and eip.
816 + *
817 + * We just load the right stack, and push the three (known) values
818 + * by hand onto the new stack - while updating the return eip past
819 + * the instruction that would have done it for sysenter.
820 + */
821 +#define FIX_STACK(offset, ok, label)           \
822 +       cmpw $__KERNEL_CS,4(%esp);              \
823 +       jne ok;                                 \
824 +label:                                         \
825 +       movl TSS_sysenter_esp0+offset(%esp),%esp;       \
826 +       CFI_DEF_CFA esp, 0;                     \
827 +       CFI_UNDEFINED eip;                      \
828 +       pushfl;                                 \
829 +       CFI_ADJUST_CFA_OFFSET 4;                \
830 +       pushl $__KERNEL_CS;                     \
831 +       CFI_ADJUST_CFA_OFFSET 4;                \
832 +       pushl $sysenter_past_esp;               \
833 +       CFI_ADJUST_CFA_OFFSET 4;                \
834 +       CFI_REL_OFFSET eip, 0
835 +
836 +KPROBE_ENTRY(debug)
837 +       RING0_INT_FRAME
838 +       cmpl $sysenter_entry,(%esp)
839 +       jne debug_stack_correct
840 +       FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
841 +debug_stack_correct:
842 +       pushl $-1                       # mark this as an int
843 +       CFI_ADJUST_CFA_OFFSET 4
844 +       SAVE_ALL
845 +       xorl %edx,%edx                  # error code 0
846 +       movl %esp,%eax                  # pt_regs pointer
847 +       call do_debug
848 +       jmp ret_from_exception
849 +       CFI_ENDPROC
850 +KPROBE_END(debug)
851 +
852 +/*
853 + * NMI is doubly nasty. It can happen _while_ we're handling
854 + * a debug fault, and the debug fault hasn't yet been able to
855 + * clear up the stack. So we first check whether we got  an
856 + * NMI on the sysenter entry path, but after that we need to
857 + * check whether we got an NMI on the debug path where the debug
858 + * fault happened on the sysenter path.
859 + */
860 +KPROBE_ENTRY(nmi)
861 +       RING0_INT_FRAME
862 +       pushl %eax
863 +       CFI_ADJUST_CFA_OFFSET 4
864 +       movl %ss, %eax
865 +       cmpw $__ESPFIX_SS, %ax
866 +       popl %eax
867 +       CFI_ADJUST_CFA_OFFSET -4
868 +       je nmi_espfix_stack
869 +       cmpl $sysenter_entry,(%esp)
870 +       je nmi_stack_fixup
871 +       pushl %eax
872 +       CFI_ADJUST_CFA_OFFSET 4
873 +       movl %esp,%eax
874 +       /* Do not access memory above the end of our stack page,
875 +        * it might not exist.
876 +        */
877 +       andl $(THREAD_SIZE-1),%eax
878 +       cmpl $(THREAD_SIZE-20),%eax
879 +       popl %eax
880 +       CFI_ADJUST_CFA_OFFSET -4
881 +       jae nmi_stack_correct
882 +       cmpl $sysenter_entry,12(%esp)
883 +       je nmi_debug_stack_check
884 +nmi_stack_correct:
885 +       /* We have a RING0_INT_FRAME here */
886 +       pushl %eax
887 +       CFI_ADJUST_CFA_OFFSET 4
888 +       SAVE_ALL
889 +       xorl %edx,%edx          # zero error code
890 +       movl %esp,%eax          # pt_regs pointer
891 +       call do_nmi
892 +       jmp restore_nocheck_notrace
893 +       CFI_ENDPROC
894 +
895 +nmi_stack_fixup:
896 +       RING0_INT_FRAME
897 +       FIX_STACK(12,nmi_stack_correct, 1)
898 +       jmp nmi_stack_correct
899 +
900 +nmi_debug_stack_check:
901 +       /* We have a RING0_INT_FRAME here */
902 +       cmpw $__KERNEL_CS,16(%esp)
903 +       jne nmi_stack_correct
904 +       cmpl $debug,(%esp)
905 +       jb nmi_stack_correct
906 +       cmpl $debug_esp_fix_insn,(%esp)
907 +       ja nmi_stack_correct
908 +       FIX_STACK(24,nmi_stack_correct, 1)
909 +       jmp nmi_stack_correct
910 +
911 +nmi_espfix_stack:
912 +       /* We have a RING0_INT_FRAME here.
913 +        *
914 +        * create the pointer to lss back
915 +        */
916 +       pushl %ss
917 +       CFI_ADJUST_CFA_OFFSET 4
918 +       pushl %esp
919 +       CFI_ADJUST_CFA_OFFSET 4
920 +       addw $4, (%esp)
921 +       /* copy the iret frame of 12 bytes */
922 +       .rept 3
923 +       pushl 16(%esp)
924 +       CFI_ADJUST_CFA_OFFSET 4
925 +       .endr
926 +       pushl %eax
927 +       CFI_ADJUST_CFA_OFFSET 4
928 +       SAVE_ALL
929 +       FIXUP_ESPFIX_STACK              # %eax == %esp
930 +       xorl %edx,%edx                  # zero error code
931 +       call do_nmi
932 +       RESTORE_REGS
933 +       lss 12+4(%esp), %esp            # back to espfix stack
934 +       CFI_ADJUST_CFA_OFFSET -24
935 +1:     INTERRUPT_RETURN
936 +       CFI_ENDPROC
937 +.section __ex_table,"a"
938 +       .align 4
939 +       .long 1b,iret_exc
940 +.previous
941 +KPROBE_END(nmi)
942 +
943 +#ifdef CONFIG_PARAVIRT
944 +ENTRY(native_iret)
945 +1:     iret
946 +.section __ex_table,"a"
947 +       .align 4
948 +       .long 1b,iret_exc
949 +.previous
950 +END(native_iret)
951 +
952 +ENTRY(native_irq_enable_sysexit)
953 +       sti
954 +       sysexit
955 +END(native_irq_enable_sysexit)
956 +#endif
957 +
958 +KPROBE_ENTRY(int3)
959 +       RING0_INT_FRAME
960 +       pushl $-1                       # mark this as an int
961 +       CFI_ADJUST_CFA_OFFSET 4
962 +       SAVE_ALL
963 +       xorl %edx,%edx          # zero error code
964 +       movl %esp,%eax          # pt_regs pointer
965 +       call do_int3
966 +       jmp ret_from_exception
967 +       CFI_ENDPROC
968 +KPROBE_END(int3)
969 +
970 +ENTRY(overflow)
971 +       RING0_INT_FRAME
972 +       pushl $0
973 +       CFI_ADJUST_CFA_OFFSET 4
974 +       pushl $do_overflow
975 +       CFI_ADJUST_CFA_OFFSET 4
976 +       jmp error_code
977 +       CFI_ENDPROC
978 +END(overflow)
979 +
980 +ENTRY(bounds)
981 +       RING0_INT_FRAME
982 +       pushl $0
983 +       CFI_ADJUST_CFA_OFFSET 4
984 +       pushl $do_bounds
985 +       CFI_ADJUST_CFA_OFFSET 4
986 +       jmp error_code
987 +       CFI_ENDPROC
988 +END(bounds)
989 +
990 +ENTRY(invalid_op)
991 +       RING0_INT_FRAME
992 +       pushl $0
993 +       CFI_ADJUST_CFA_OFFSET 4
994 +       pushl $do_invalid_op
995 +       CFI_ADJUST_CFA_OFFSET 4
996 +       jmp error_code
997 +       CFI_ENDPROC
998 +END(invalid_op)
999 +
1000 +ENTRY(coprocessor_segment_overrun)
1001 +       RING0_INT_FRAME
1002 +       pushl $0
1003 +       CFI_ADJUST_CFA_OFFSET 4
1004 +       pushl $do_coprocessor_segment_overrun
1005 +       CFI_ADJUST_CFA_OFFSET 4
1006 +       jmp error_code
1007 +       CFI_ENDPROC
1008 +END(coprocessor_segment_overrun)
1009 +
1010 +ENTRY(invalid_TSS)
1011 +       RING0_EC_FRAME
1012 +       pushl $do_invalid_TSS
1013 +       CFI_ADJUST_CFA_OFFSET 4
1014 +       jmp error_code
1015 +       CFI_ENDPROC
1016 +END(invalid_TSS)
1017 +
1018 +ENTRY(segment_not_present)
1019 +       RING0_EC_FRAME
1020 +       pushl $do_segment_not_present
1021 +       CFI_ADJUST_CFA_OFFSET 4
1022 +       jmp error_code
1023 +       CFI_ENDPROC
1024 +END(segment_not_present)
1025 +
1026 +ENTRY(stack_segment)
1027 +       RING0_EC_FRAME
1028 +       pushl $do_stack_segment
1029 +       CFI_ADJUST_CFA_OFFSET 4
1030 +       jmp error_code
1031 +       CFI_ENDPROC
1032 +END(stack_segment)
1033 +
1034 +KPROBE_ENTRY(general_protection)
1035 +       RING0_EC_FRAME
1036 +       pushl $do_general_protection
1037 +       CFI_ADJUST_CFA_OFFSET 4
1038 +       jmp error_code
1039 +       CFI_ENDPROC
1040 +KPROBE_END(general_protection)
1041 +
1042 +ENTRY(alignment_check)
1043 +       RING0_EC_FRAME
1044 +       pushl $do_alignment_check
1045 +       CFI_ADJUST_CFA_OFFSET 4
1046 +       jmp error_code
1047 +       CFI_ENDPROC
1048 +END(alignment_check)
1049 +
1050 +ENTRY(divide_error)
1051 +       RING0_INT_FRAME
1052 +       pushl $0                        # no error code
1053 +       CFI_ADJUST_CFA_OFFSET 4
1054 +       pushl $do_divide_error
1055 +       CFI_ADJUST_CFA_OFFSET 4
1056 +       jmp error_code
1057 +       CFI_ENDPROC
1058 +END(divide_error)
1059 +
1060 +#ifdef CONFIG_X86_MCE
1061 +ENTRY(machine_check)
1062 +       RING0_INT_FRAME
1063 +       pushl $0
1064 +       CFI_ADJUST_CFA_OFFSET 4
1065 +       pushl machine_check_vector
1066 +       CFI_ADJUST_CFA_OFFSET 4
1067 +       jmp error_code
1068 +       CFI_ENDPROC
1069 +END(machine_check)
1070 +#endif
1071 +
1072 +ENTRY(spurious_interrupt_bug)
1073 +       RING0_INT_FRAME
1074 +       pushl $0
1075 +       CFI_ADJUST_CFA_OFFSET 4
1076 +       pushl $do_spurious_interrupt_bug
1077 +       CFI_ADJUST_CFA_OFFSET 4
1078 +       jmp error_code
1079 +       CFI_ENDPROC
1080 +END(spurious_interrupt_bug)
1081 +
1082 +ENTRY(kernel_thread_helper)
1083 +       pushl $0                # fake return address for unwinder
1084 +       CFI_STARTPROC
1085 +       movl %edx,%eax
1086 +       push %edx
1087 +       CFI_ADJUST_CFA_OFFSET 4
1088 +       call *%ebx
1089 +       push %eax
1090 +       CFI_ADJUST_CFA_OFFSET 4
1091 +       call do_exit
1092 +       CFI_ENDPROC
1093 +ENDPROC(kernel_thread_helper)
1094 +
1095 +.section .rodata,"a"
1096 +#include "syscall_table.S"
1097 +
1098 +syscall_table_size=(.-sys_call_table)
1099 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/arch/i386/kernel/entry.S.syscallprobe linux-2.6.22-590/arch/i386/kernel/entry.S.syscallprobe
1100 --- linux-2.6.22-580/arch/i386/kernel/entry.S.syscallprobe      1969-12-31 19:00:00.000000000 -0500
1101 +++ linux-2.6.22-590/arch/i386/kernel/entry.S.syscallprobe      2009-02-18 09:57:23.000000000 -0500
1102 @@ -0,0 +1,1055 @@
1103 +/*
1104 + *  linux/arch/i386/entry.S
1105 + *
1106 + *  Copyright (C) 1991, 1992  Linus Torvalds
1107 + */
1108 +
1109 +/*
1110 + * entry.S contains the system-call and fault low-level handling routines.
1111 + * This also contains the timer-interrupt handler, as well as all interrupts
1112 + * and faults that can result in a task-switch.
1113 + *
1114 + * NOTE: This code handles signal-recognition, which happens every time
1115 + * after a timer-interrupt and after each system call.
1116 + *
1117 + * I changed all the .align's to 4 (16 byte alignment), as that's faster
1118 + * on a 486.
1119 + *
1120 + * Stack layout in 'syscall_exit':
1121 + *     ptrace needs to have all regs on the stack.
1122 + *     if the order here is changed, it needs to be
1123 + *     updated in fork.c:copy_process, signal.c:do_signal,
1124 + *     ptrace.c and ptrace.h
1125 + *
1126 + *      0(%esp) - %ebx
1127 + *      4(%esp) - %ecx
1128 + *      8(%esp) - %edx
1129 + *       C(%esp) - %esi
1130 + *     10(%esp) - %edi
1131 + *     14(%esp) - %ebp
1132 + *     18(%esp) - %eax
1133 + *     1C(%esp) - %ds
1134 + *     20(%esp) - %es
1135 + *     24(%esp) - %fs
1136 + *     28(%esp) - orig_eax
1137 + *     2C(%esp) - %eip
1138 + *     30(%esp) - %cs
1139 + *     34(%esp) - %eflags
1140 + *     38(%esp) - %oldesp
1141 + *     3C(%esp) - %oldss
1142 + *
1143 + * "current" is in register %ebx during any slow entries.
1144 + */
1145 +
1146 +#include <linux/linkage.h>
1147 +#include <asm/thread_info.h>
1148 +#include <asm/irqflags.h>
1149 +#include <asm/errno.h>
1150 +#include <asm/segment.h>
1151 +#include <asm/smp.h>
1152 +#include <asm/page.h>
1153 +#include <asm/desc.h>
1154 +#include <asm/percpu.h>
1155 +#include <asm/dwarf2.h>
1156 +#include "irq_vectors.h"
1157 +
1158 +/*
1159 + * We use macros for low-level operations which need to be overridden
1160 + * for paravirtualization.  The following will never clobber any registers:
1161 + *   INTERRUPT_RETURN (aka. "iret")
1162 + *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
1163 + *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
1164 + *
1165 + * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
1166 + * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
1167 + * Allowing a register to be clobbered can shrink the paravirt replacement
1168 + * enough to patch inline, increasing performance.
1169 + */
1170 +
1171 +#define nr_syscalls ((syscall_table_size)/4)
1172 +
1173 +CF_MASK                = 0x00000001
1174 +TF_MASK                = 0x00000100
1175 +IF_MASK                = 0x00000200
1176 +DF_MASK                = 0x00000400 
1177 +NT_MASK                = 0x00004000
1178 +VM_MASK                = 0x00020000
1179 +
1180 +#ifdef CONFIG_PREEMPT
1181 +#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
1182 +#else
1183 +#define preempt_stop(clobbers)
1184 +#define resume_kernel          restore_nocheck
1185 +#endif
1186 +
1187 +.macro TRACE_IRQS_IRET
1188 +#ifdef CONFIG_TRACE_IRQFLAGS
1189 +       testl $IF_MASK,PT_EFLAGS(%esp)     # interrupts off?
1190 +       jz 1f
1191 +       TRACE_IRQS_ON
1192 +1:
1193 +#endif
1194 +.endm
1195 +
1196 +#ifdef CONFIG_VM86
1197 +#define resume_userspace_sig   check_userspace
1198 +#else
1199 +#define resume_userspace_sig   resume_userspace
1200 +#endif
1201 +
1202 +#define SAVE_ALL \
1203 +       cld; \
1204 +       pushl %fs; \
1205 +       CFI_ADJUST_CFA_OFFSET 4;\
1206 +       /*CFI_REL_OFFSET fs, 0;*/\
1207 +       pushl %es; \
1208 +       CFI_ADJUST_CFA_OFFSET 4;\
1209 +       /*CFI_REL_OFFSET es, 0;*/\
1210 +       pushl %ds; \
1211 +       CFI_ADJUST_CFA_OFFSET 4;\
1212 +       /*CFI_REL_OFFSET ds, 0;*/\
1213 +       pushl %eax; \
1214 +       CFI_ADJUST_CFA_OFFSET 4;\
1215 +       CFI_REL_OFFSET eax, 0;\
1216 +       pushl %ebp; \
1217 +       CFI_ADJUST_CFA_OFFSET 4;\
1218 +       CFI_REL_OFFSET ebp, 0;\
1219 +       pushl %edi; \
1220 +       CFI_ADJUST_CFA_OFFSET 4;\
1221 +       CFI_REL_OFFSET edi, 0;\
1222 +       pushl %esi; \
1223 +       CFI_ADJUST_CFA_OFFSET 4;\
1224 +       CFI_REL_OFFSET esi, 0;\
1225 +       pushl %edx; \
1226 +       CFI_ADJUST_CFA_OFFSET 4;\
1227 +       CFI_REL_OFFSET edx, 0;\
1228 +       pushl %ecx; \
1229 +       CFI_ADJUST_CFA_OFFSET 4;\
1230 +       CFI_REL_OFFSET ecx, 0;\
1231 +       pushl %ebx; \
1232 +       CFI_ADJUST_CFA_OFFSET 4;\
1233 +       CFI_REL_OFFSET ebx, 0;\
1234 +       movl $(__USER_DS), %edx; \
1235 +       movl %edx, %ds; \
1236 +       movl %edx, %es; \
1237 +       movl $(__KERNEL_PERCPU), %edx; \
1238 +       movl %edx, %fs
1239 +
1240 +#define RESTORE_INT_REGS \
1241 +       popl %ebx;      \
1242 +       CFI_ADJUST_CFA_OFFSET -4;\
1243 +       CFI_RESTORE ebx;\
1244 +       popl %ecx;      \
1245 +       CFI_ADJUST_CFA_OFFSET -4;\
1246 +       CFI_RESTORE ecx;\
1247 +       popl %edx;      \
1248 +       CFI_ADJUST_CFA_OFFSET -4;\
1249 +       CFI_RESTORE edx;\
1250 +       popl %esi;      \
1251 +       CFI_ADJUST_CFA_OFFSET -4;\
1252 +       CFI_RESTORE esi;\
1253 +       popl %edi;      \
1254 +       CFI_ADJUST_CFA_OFFSET -4;\
1255 +       CFI_RESTORE edi;\
1256 +       popl %ebp;      \
1257 +       CFI_ADJUST_CFA_OFFSET -4;\
1258 +       CFI_RESTORE ebp;\
1259 +       popl %eax;      \
1260 +       CFI_ADJUST_CFA_OFFSET -4;\
1261 +       CFI_RESTORE eax
1262 +
1263 +#define RESTORE_REGS   \
1264 +       RESTORE_INT_REGS; \
1265 +1:     popl %ds;       \
1266 +       CFI_ADJUST_CFA_OFFSET -4;\
1267 +       /*CFI_RESTORE ds;*/\
1268 +2:     popl %es;       \
1269 +       CFI_ADJUST_CFA_OFFSET -4;\
1270 +       /*CFI_RESTORE es;*/\
1271 +3:     popl %fs;       \
1272 +       CFI_ADJUST_CFA_OFFSET -4;\
1273 +       /*CFI_RESTORE fs;*/\
1274 +.pushsection .fixup,"ax";      \
1275 +4:     movl $0,(%esp); \
1276 +       jmp 1b;         \
1277 +5:     movl $0,(%esp); \
1278 +       jmp 2b;         \
1279 +6:     movl $0,(%esp); \
1280 +       jmp 3b;         \
1281 +.section __ex_table,"a";\
1282 +       .align 4;       \
1283 +       .long 1b,4b;    \
1284 +       .long 2b,5b;    \
1285 +       .long 3b,6b;    \
1286 +.popsection
1287 +
1288 +#define RING0_INT_FRAME \
1289 +       CFI_STARTPROC simple;\
1290 +       CFI_SIGNAL_FRAME;\
1291 +       CFI_DEF_CFA esp, 3*4;\
1292 +       /*CFI_OFFSET cs, -2*4;*/\
1293 +       CFI_OFFSET eip, -3*4
1294 +
1295 +#define RING0_EC_FRAME \
1296 +       CFI_STARTPROC simple;\
1297 +       CFI_SIGNAL_FRAME;\
1298 +       CFI_DEF_CFA esp, 4*4;\
1299 +       /*CFI_OFFSET cs, -2*4;*/\
1300 +       CFI_OFFSET eip, -3*4
1301 +
1302 +#define RING0_PTREGS_FRAME \
1303 +       CFI_STARTPROC simple;\
1304 +       CFI_SIGNAL_FRAME;\
1305 +       CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
1306 +       /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
1307 +       CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
1308 +       /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
1309 +       /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
1310 +       CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
1311 +       CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
1312 +       CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
1313 +       CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
1314 +       CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
1315 +       CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
1316 +       CFI_OFFSET ebx, PT_EBX-PT_OLDESP
1317 +
1318 +ENTRY(ret_from_fork)
1319 +       CFI_STARTPROC
1320 +       pushl %eax
1321 +       CFI_ADJUST_CFA_OFFSET 4
1322 +       call schedule_tail
1323 +       GET_THREAD_INFO(%ebp)
1324 +       popl %eax
1325 +       CFI_ADJUST_CFA_OFFSET -4
1326 +       pushl $0x0202                   # Reset kernel eflags
1327 +       CFI_ADJUST_CFA_OFFSET 4
1328 +       popfl
1329 +       CFI_ADJUST_CFA_OFFSET -4
1330 +       jmp syscall_exit
1331 +       CFI_ENDPROC
1332 +END(ret_from_fork)
1333 +
1334 +/*
1335 + * Return to user mode is not as complex as all this looks,
1336 + * but we want the default path for a system call return to
1337 + * go as quickly as possible which is why some of this is
1338 + * less clear than it otherwise should be.
1339 + */
1340 +
1341 +       # userspace resumption stub bypassing syscall exit tracing
1342 +       ALIGN
1343 +       RING0_PTREGS_FRAME
1344 +ret_from_exception:
1345 +       preempt_stop(CLBR_ANY)
1346 +ret_from_intr:
1347 +       GET_THREAD_INFO(%ebp)
1348 +check_userspace:
1349 +       movl PT_EFLAGS(%esp), %eax      # mix EFLAGS and CS
1350 +       movb PT_CS(%esp), %al
1351 +       andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
1352 +       cmpl $USER_RPL, %eax
1353 +       jb resume_kernel                # not returning to v8086 or userspace
1354 +
1355 +ENTRY(resume_userspace)
1356 +       DISABLE_INTERRUPTS(CLBR_ANY)    # make sure we don't miss an interrupt
1357 +                                       # setting need_resched or sigpending
1358 +                                       # between sampling and the iret
1359 +       movl TI_flags(%ebp), %ecx
1360 +       andl $_TIF_WORK_MASK, %ecx      # is there any work to be done on
1361 +                                       # int/exception return?
1362 +       jne work_pending
1363 +       jmp restore_all
1364 +END(ret_from_exception)
1365 +
1366 +#ifdef CONFIG_PREEMPT
1367 +ENTRY(resume_kernel)
1368 +       DISABLE_INTERRUPTS(CLBR_ANY)
1369 +       cmpl $0,TI_preempt_count(%ebp)  # non-zero preempt_count ?
1370 +       jnz restore_nocheck
1371 +need_resched:
1372 +       movl TI_flags(%ebp), %ecx       # need_resched set ?
1373 +       testb $_TIF_NEED_RESCHED, %cl
1374 +       jz restore_all
1375 +       testl $IF_MASK,PT_EFLAGS(%esp)  # interrupts off (exception path) ?
1376 +       jz restore_all
1377 +       call preempt_schedule_irq
1378 +       jmp need_resched
1379 +END(resume_kernel)
1380 +#endif
1381 +       CFI_ENDPROC
1382 +
1383 +/* SYSENTER_RETURN points to after the "sysenter" instruction in
1384 +   the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */
1385 +
1386 +       # sysenter call handler stub
1387 +ENTRY(sysenter_entry)
1388 +       CFI_STARTPROC simple
1389 +       CFI_SIGNAL_FRAME
1390 +       CFI_DEF_CFA esp, 0
1391 +       CFI_REGISTER esp, ebp
1392 +       movl TSS_sysenter_esp0(%esp),%esp
1393 +sysenter_past_esp:
1394 +       /*
1395 +        * No need to follow this irqs on/off section: the syscall
1396 +        * disabled irqs and here we enable it straight after entry:
1397 +        */
1398 +       ENABLE_INTERRUPTS(CLBR_NONE)
1399 +       pushl $(__USER_DS)
1400 +       CFI_ADJUST_CFA_OFFSET 4
1401 +       /*CFI_REL_OFFSET ss, 0*/
1402 +       pushl %ebp
1403 +       CFI_ADJUST_CFA_OFFSET 4
1404 +       CFI_REL_OFFSET esp, 0
1405 +       pushfl
1406 +       CFI_ADJUST_CFA_OFFSET 4
1407 +       pushl $(__USER_CS)
1408 +       CFI_ADJUST_CFA_OFFSET 4
1409 +       /*CFI_REL_OFFSET cs, 0*/
1410 +       /*
1411 +        * Push current_thread_info()->sysenter_return to the stack.
1412 +        * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
1413 +        * pushed above; +8 corresponds to copy_thread's esp0 setting.
1414 +        */
1415 +       pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
1416 +       CFI_ADJUST_CFA_OFFSET 4
1417 +       CFI_REL_OFFSET eip, 0
1418 +
1419 +/*
1420 + * Load the potential sixth argument from user stack.
1421 + * Careful about security.
1422 + */
1423 +       cmpl $__PAGE_OFFSET-3,%ebp
1424 +       jae syscall_fault
1425 +1:     movl (%ebp),%ebp
1426 +.section __ex_table,"a"
1427 +       .align 4
1428 +       .long 1b,syscall_fault
1429 +.previous
1430 +
1431 +       pushl %eax
1432 +       CFI_ADJUST_CFA_OFFSET 4
1433 +       SAVE_ALL
1434 +       GET_THREAD_INFO(%ebp)
1435 +
1436 +       /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
1437 +       testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
1438 +       jnz syscall_trace_entry
1439 +       cmpl $(nr_syscalls), %eax
1440 +       jae syscall_badsys
1441 +       call *sys_call_table(,%eax,4)
1442 +       movl %eax,PT_EAX(%esp)
1443 +       DISABLE_INTERRUPTS(CLBR_ANY)
1444 +       TRACE_IRQS_OFF
1445 +       movl TI_flags(%ebp), %ecx
1446 +       testw $_TIF_ALLWORK_MASK, %cx
1447 +       jne syscall_exit_work
1448 +/* if something modifies registers it must also disable sysexit */
1449 +       movl PT_EIP(%esp), %edx
1450 +       movl PT_OLDESP(%esp), %ecx
1451 +       xorl %ebp,%ebp
1452 +       TRACE_IRQS_ON
1453 +1:     mov  PT_FS(%esp), %fs
1454 +       ENABLE_INTERRUPTS_SYSEXIT
1455 +       CFI_ENDPROC
1456 +.pushsection .fixup,"ax"
1457 +2:     movl $0,PT_FS(%esp)
1458 +       jmp 1b
1459 +.section __ex_table,"a"
1460 +       .align 4
1461 +       .long 1b,2b
1462 +.popsection
1463 +ENDPROC(sysenter_entry)
1464 +
1465 +       # system call handler stub
1466 +ENTRY(system_call)
1467 +       RING0_INT_FRAME                 # can't unwind into user space anyway
1468 +       pushl %eax                      # save orig_eax
1469 +       CFI_ADJUST_CFA_OFFSET 4
1470 +       SAVE_ALL
1471 +       GET_THREAD_INFO(%ebp)
1472 +                                       # system call tracing in operation / emulation
1473 +       /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
1474 +       testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
1475 +       jnz syscall_trace_entry
1476 +       cmpl $(nr_syscalls), %eax
1477 +       jae syscall_badsys
1478 +syscall_call:
1479 +    /* Move Chopstix syscall probe here */
1480 +    /* Save and clobber: eax, ecx, ebp  */
1481 +    pushl   %ebp
1482 +    movl    %esp, %ebp
1483 +    pushl   %eax
1484 +    pushl   %ecx
1485 +    subl    $SPEC_EVENT_SIZE, %esp 
1486 +    movl    rec_event, %ecx
1487 +    testl   %ecx, %ecx
1488 +    jz  carry_on
1489 +    movl    %eax, (SPEC_number-EVENT_SIZE)(%ebp)
1490 +    leal    SPEC_EVENT_SIZE(%ebp), %eax
1491 +    movl    %eax, EVENT_event_data(%ebp)
1492 +    GET_THREAD_INFO(%eax)
1493 +    movl    %eax, EVENT_task(%ebp)
1494 +    movl    $7, EVENT_event_type(%ebp)
1495 +    movl    rec_event, %edx
1496 +    movl    $1, 4(%esp)
1497 +    leal    -EVENT_SIZE(%ebp), %eax
1498 +    movl    %eax, (%esp)
1499 +    /*call    rec_event_asm */
1500 +carry_on: 
1501 +    addl $SPEC_EVENT_SIZE, %esp
1502 +    popl %ecx
1503 +    popl %eax
1504 +    popl %ebp
1505 +     /* End chopstix */
1506 +
1507 +       call *sys_call_table(,%eax,4)
1508 +       movl %eax,PT_EAX(%esp)          # store the return value
1509 +syscall_exit:
1510 +       DISABLE_INTERRUPTS(CLBR_ANY)    # make sure we don't miss an interrupt
1511 +                                       # setting need_resched or sigpending
1512 +                                       # between sampling and the iret
1513 +       TRACE_IRQS_OFF
1514 +       testl $TF_MASK,PT_EFLAGS(%esp)  # If tracing set singlestep flag on exit
1515 +       jz no_singlestep
1516 +       orl $_TIF_SINGLESTEP,TI_flags(%ebp)
1517 +no_singlestep:
1518 +       movl TI_flags(%ebp), %ecx
1519 +       testw $_TIF_ALLWORK_MASK, %cx   # current->work
1520 +       jne syscall_exit_work
1521 +
1522 +restore_all:
1523 +       movl PT_EFLAGS(%esp), %eax      # mix EFLAGS, SS and CS
1524 +       # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
1525 +       # are returning to the kernel.
1526 +       # See comments in process.c:copy_thread() for details.
1527 +       movb PT_OLDSS(%esp), %ah
1528 +       movb PT_CS(%esp), %al
1529 +       andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
1530 +       cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
1531 +       CFI_REMEMBER_STATE
1532 +       je ldt_ss                       # returning to user-space with LDT SS
1533 +restore_nocheck:
1534 +       TRACE_IRQS_IRET
1535 +restore_nocheck_notrace:
1536 +       RESTORE_REGS
1537 +       addl $4, %esp                   # skip orig_eax/error_code
1538 +       CFI_ADJUST_CFA_OFFSET -4
1539 +1:     INTERRUPT_RETURN
1540 +.section .fixup,"ax"
1541 +iret_exc:
1542 +       pushl $0                        # no error code
1543 +       pushl $do_iret_error
1544 +       jmp error_code
1545 +.previous
1546 +.section __ex_table,"a"
1547 +       .align 4
1548 +       .long 1b,iret_exc
1549 +.previous
1550 +
1551 +       CFI_RESTORE_STATE
1552 +ldt_ss:
1553 +       larl PT_OLDSS(%esp), %eax
1554 +       jnz restore_nocheck
1555 +       testl $0x00400000, %eax         # returning to 32bit stack?
1556 +       jnz restore_nocheck             # allright, normal return
1557 +
1558 +#ifdef CONFIG_PARAVIRT
1559 +       /*
1560 +        * The kernel can't run on a non-flat stack if paravirt mode
1561 +        * is active.  Rather than try to fixup the high bits of
1562 +        * ESP, bypass this code entirely.  This may break DOSemu
1563 +        * and/or Wine support in a paravirt VM, although the option
1564 +        * is still available to implement the setting of the high
1565 +        * 16-bits in the INTERRUPT_RETURN paravirt-op.
1566 +        */
1567 +       cmpl $0, paravirt_ops+PARAVIRT_enabled
1568 +       jne restore_nocheck
1569 +#endif
1570 +
1571 +       /* If returning to userspace with 16bit stack,
1572 +        * try to fix the higher word of ESP, as the CPU
1573 +        * won't restore it.
1574 +        * This is an "official" bug of all the x86-compatible
1575 +        * CPUs, which we can try to work around to make
1576 +        * dosemu and wine happy. */
1577 +       movl PT_OLDESP(%esp), %eax
1578 +       movl %esp, %edx
1579 +       call patch_espfix_desc
1580 +       pushl $__ESPFIX_SS
1581 +       CFI_ADJUST_CFA_OFFSET 4
1582 +       pushl %eax
1583 +       CFI_ADJUST_CFA_OFFSET 4
1584 +       DISABLE_INTERRUPTS(CLBR_EAX)
1585 +       TRACE_IRQS_OFF
1586 +       lss (%esp), %esp
1587 +       CFI_ADJUST_CFA_OFFSET -8
1588 +       jmp restore_nocheck
1589 +       CFI_ENDPROC
1590 +ENDPROC(system_call)
1591 +
1592 +       # perform work that needs to be done immediately before resumption
1593 +       ALIGN
1594 +       RING0_PTREGS_FRAME              # can't unwind into user space anyway
1595 +work_pending:
1596 +       testb $_TIF_NEED_RESCHED, %cl
1597 +       jz work_notifysig
1598 +work_resched:
1599 +       call schedule
1600 +       DISABLE_INTERRUPTS(CLBR_ANY)    # make sure we don't miss an interrupt
1601 +                                       # setting need_resched or sigpending
1602 +                                       # between sampling and the iret
1603 +       TRACE_IRQS_OFF
1604 +       movl TI_flags(%ebp), %ecx
1605 +       andl $_TIF_WORK_MASK, %ecx      # is there any work to be done other
1606 +                                       # than syscall tracing?
1607 +       jz restore_all
1608 +       testb $_TIF_NEED_RESCHED, %cl
1609 +       jnz work_resched
1610 +
1611 +work_notifysig:                                # deal with pending signals and
1612 +                                       # notify-resume requests
1613 +#ifdef CONFIG_VM86
1614 +       testl $VM_MASK, PT_EFLAGS(%esp)
1615 +       movl %esp, %eax
1616 +       jne work_notifysig_v86          # returning to kernel-space or
1617 +                                       # vm86-space
1618 +       xorl %edx, %edx
1619 +       call do_notify_resume
1620 +       jmp resume_userspace_sig
1621 +
1622 +       ALIGN
1623 +work_notifysig_v86:
1624 +       pushl %ecx                      # save ti_flags for do_notify_resume
1625 +       CFI_ADJUST_CFA_OFFSET 4
1626 +       call save_v86_state             # %eax contains pt_regs pointer
1627 +       popl %ecx
1628 +       CFI_ADJUST_CFA_OFFSET -4
1629 +       movl %eax, %esp
1630 +#else
1631 +       movl %esp, %eax
1632 +#endif
1633 +       xorl %edx, %edx
1634 +       call do_notify_resume
1635 +       jmp resume_userspace_sig
1636 +END(work_pending)
1637 +
1638 +       # perform syscall exit tracing
1639 +       ALIGN
1640 +syscall_trace_entry:
1641 +       movl $-ENOSYS,PT_EAX(%esp)
1642 +       movl %esp, %eax
1643 +       xorl %edx,%edx
1644 +       call do_syscall_trace
1645 +       cmpl $0, %eax
1646 +       jne resume_userspace            # ret != 0 -> running under PTRACE_SYSEMU,
1647 +                                       # so must skip actual syscall
1648 +       movl PT_ORIG_EAX(%esp), %eax
1649 +       cmpl $(nr_syscalls), %eax
1650 +       jnae syscall_call
1651 +       jmp syscall_exit
1652 +END(syscall_trace_entry)
1653 +
1654 +       # perform syscall exit tracing
1655 +       ALIGN
1656 +syscall_exit_work:
1657 +       testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
1658 +       jz work_pending
1659 +       TRACE_IRQS_ON
1660 +       ENABLE_INTERRUPTS(CLBR_ANY)     # could let do_syscall_trace() call
1661 +                                       # schedule() instead
1662 +       movl %esp, %eax
1663 +       movl $1, %edx
1664 +       call do_syscall_trace
1665 +       jmp resume_userspace
1666 +END(syscall_exit_work)
1667 +       CFI_ENDPROC
1668 +
1669 +       RING0_INT_FRAME                 # can't unwind into user space anyway
1670 +syscall_fault:
1671 +       pushl %eax                      # save orig_eax
1672 +       CFI_ADJUST_CFA_OFFSET 4
1673 +       SAVE_ALL
1674 +       GET_THREAD_INFO(%ebp)
1675 +       movl $-EFAULT,PT_EAX(%esp)
1676 +       jmp resume_userspace
1677 +END(syscall_fault)
1678 +
1679 +syscall_badsys:
1680 +       movl $-ENOSYS,PT_EAX(%esp)
1681 +       jmp resume_userspace
1682 +END(syscall_badsys)
1683 +       CFI_ENDPROC
1684 +
1685 +#define FIXUP_ESPFIX_STACK \
1686 +       /* since we are on a wrong stack, we cant make it a C code :( */ \
1687 +       PER_CPU(gdt_page, %ebx); \
1688 +       GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
1689 +       addl %esp, %eax; \
1690 +       pushl $__KERNEL_DS; \
1691 +       CFI_ADJUST_CFA_OFFSET 4; \
1692 +       pushl %eax; \
1693 +       CFI_ADJUST_CFA_OFFSET 4; \
1694 +       lss (%esp), %esp; \
1695 +       CFI_ADJUST_CFA_OFFSET -8;
1696 +#define UNWIND_ESPFIX_STACK \
1697 +       movl %ss, %eax; \
1698 +       /* see if on espfix stack */ \
1699 +       cmpw $__ESPFIX_SS, %ax; \
1700 +       jne 27f; \
1701 +       movl $__KERNEL_DS, %eax; \
1702 +       movl %eax, %ds; \
1703 +       movl %eax, %es; \
1704 +       /* switch to normal stack */ \
1705 +       FIXUP_ESPFIX_STACK; \
1706 +27:;
1707 +
1708 +/*
1709 + * Build the entry stubs and pointer table with
1710 + * some assembler magic.
1711 + */
1712 +.data
1713 +ENTRY(interrupt)
1714 +.text
1715 +
1716 +ENTRY(irq_entries_start)
1717 +       RING0_INT_FRAME
1718 +vector=0
1719 +.rept NR_IRQS
1720 +       ALIGN
1721 + .if vector
1722 +       CFI_ADJUST_CFA_OFFSET -4
1723 + .endif
1724 +1:     pushl $~(vector)
1725 +       CFI_ADJUST_CFA_OFFSET 4
1726 +       jmp common_interrupt
1727 + .previous
1728 +       .long 1b
1729 + .text
1730 +vector=vector+1
1731 +.endr
1732 +END(irq_entries_start)
1733 +
1734 +.previous
1735 +END(interrupt)
1736 +.previous
1737 +
1738 +/*
1739 + * the CPU automatically disables interrupts when executing an IRQ vector,
1740 + * so IRQ-flags tracing has to follow that:
1741 + */
1742 +       ALIGN
1743 +common_interrupt:
1744 +       SAVE_ALL
1745 +       TRACE_IRQS_OFF
1746 +       movl %esp,%eax
1747 +       call do_IRQ
1748 +       jmp ret_from_intr
1749 +ENDPROC(common_interrupt)
1750 +       CFI_ENDPROC
1751 +
1752 +#define BUILD_INTERRUPT(name, nr)      \
1753 +ENTRY(name)                            \
1754 +       RING0_INT_FRAME;                \
1755 +       pushl $~(nr);                   \
1756 +       CFI_ADJUST_CFA_OFFSET 4;        \
1757 +       SAVE_ALL;                       \
1758 +       TRACE_IRQS_OFF                  \
1759 +       movl %esp,%eax;                 \
1760 +       call smp_##name;                \
1761 +       jmp ret_from_intr;              \
1762 +       CFI_ENDPROC;                    \
1763 +ENDPROC(name)
1764 +
1765 +/* The include is where all of the SMP etc. interrupts come from */
1766 +#include "entry_arch.h"
1767 +
1768 +KPROBE_ENTRY(page_fault)
1769 +       RING0_EC_FRAME
1770 +       pushl $do_page_fault
1771 +       CFI_ADJUST_CFA_OFFSET 4
1772 +       ALIGN
1773 +error_code:
1774 +       /* the function address is in %fs's slot on the stack */
1775 +       pushl %es
1776 +       CFI_ADJUST_CFA_OFFSET 4
1777 +       /*CFI_REL_OFFSET es, 0*/
1778 +       pushl %ds
1779 +       CFI_ADJUST_CFA_OFFSET 4
1780 +       /*CFI_REL_OFFSET ds, 0*/
1781 +       pushl %eax
1782 +       CFI_ADJUST_CFA_OFFSET 4
1783 +       CFI_REL_OFFSET eax, 0
1784 +       pushl %ebp
1785 +       CFI_ADJUST_CFA_OFFSET 4
1786 +       CFI_REL_OFFSET ebp, 0
1787 +       pushl %edi
1788 +       CFI_ADJUST_CFA_OFFSET 4
1789 +       CFI_REL_OFFSET edi, 0
1790 +       pushl %esi
1791 +       CFI_ADJUST_CFA_OFFSET 4
1792 +       CFI_REL_OFFSET esi, 0
1793 +       pushl %edx
1794 +       CFI_ADJUST_CFA_OFFSET 4
1795 +       CFI_REL_OFFSET edx, 0
1796 +       pushl %ecx
1797 +       CFI_ADJUST_CFA_OFFSET 4
1798 +       CFI_REL_OFFSET ecx, 0
1799 +       pushl %ebx
1800 +       CFI_ADJUST_CFA_OFFSET 4
1801 +       CFI_REL_OFFSET ebx, 0
1802 +       cld
1803 +       pushl %fs
1804 +       CFI_ADJUST_CFA_OFFSET 4
1805 +       /*CFI_REL_OFFSET fs, 0*/
1806 +       movl $(__KERNEL_PERCPU), %ecx
1807 +       movl %ecx, %fs
1808 +       UNWIND_ESPFIX_STACK
1809 +       popl %ecx
1810 +       CFI_ADJUST_CFA_OFFSET -4
1811 +       /*CFI_REGISTER es, ecx*/
1812 +       movl PT_FS(%esp), %edi          # get the function address
1813 +       movl PT_ORIG_EAX(%esp), %edx    # get the error code
1814 +       movl $-1, PT_ORIG_EAX(%esp)     # no syscall to restart
1815 +       mov  %ecx, PT_FS(%esp)
1816 +       /*CFI_REL_OFFSET fs, ES*/
1817 +       movl $(__USER_DS), %ecx
1818 +       movl %ecx, %ds
1819 +       movl %ecx, %es
1820 +       movl %esp,%eax                  # pt_regs pointer
1821 +       call *%edi
1822 +       jmp ret_from_exception
1823 +       CFI_ENDPROC
1824 +KPROBE_END(page_fault)
1825 +
1826 +ENTRY(coprocessor_error)
1827 +       RING0_INT_FRAME
1828 +       pushl $0
1829 +       CFI_ADJUST_CFA_OFFSET 4
1830 +       pushl $do_coprocessor_error
1831 +       CFI_ADJUST_CFA_OFFSET 4
1832 +       jmp error_code
1833 +       CFI_ENDPROC
1834 +END(coprocessor_error)
1835 +
1836 +ENTRY(simd_coprocessor_error)
1837 +       RING0_INT_FRAME
1838 +       pushl $0
1839 +       CFI_ADJUST_CFA_OFFSET 4
1840 +       pushl $do_simd_coprocessor_error
1841 +       CFI_ADJUST_CFA_OFFSET 4
1842 +       jmp error_code
1843 +       CFI_ENDPROC
1844 +END(simd_coprocessor_error)
1845 +
1846 +ENTRY(device_not_available)
1847 +       RING0_INT_FRAME
1848 +       pushl $-1                       # mark this as an int
1849 +       CFI_ADJUST_CFA_OFFSET 4
1850 +       SAVE_ALL
1851 +       GET_CR0_INTO_EAX
1852 +       testl $0x4, %eax                # EM (math emulation bit)
1853 +       jne device_not_available_emulate
1854 +       preempt_stop(CLBR_ANY)
1855 +       call math_state_restore
1856 +       jmp ret_from_exception
1857 +device_not_available_emulate:
1858 +       pushl $0                        # temporary storage for ORIG_EIP
1859 +       CFI_ADJUST_CFA_OFFSET 4
1860 +       call math_emulate
1861 +       addl $4, %esp
1862 +       CFI_ADJUST_CFA_OFFSET -4
1863 +       jmp ret_from_exception
1864 +       CFI_ENDPROC
1865 +END(device_not_available)
1866 +
1867 +/*
1868 + * Debug traps and NMI can happen at the one SYSENTER instruction
1869 + * that sets up the real kernel stack. Check here, since we can't
1870 + * allow the wrong stack to be used.
1871 + *
1872 + * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
1873 + * already pushed 3 words if it hits on the sysenter instruction:
1874 + * eflags, cs and eip.
1875 + *
1876 + * We just load the right stack, and push the three (known) values
1877 + * by hand onto the new stack - while updating the return eip past
1878 + * the instruction that would have done it for sysenter.
1879 + */
1880 +#define FIX_STACK(offset, ok, label)           \
1881 +       cmpw $__KERNEL_CS,4(%esp);              \
1882 +       jne ok;                                 \
1883 +label:                                         \
1884 +       movl TSS_sysenter_esp0+offset(%esp),%esp;       \
1885 +       CFI_DEF_CFA esp, 0;                     \
1886 +       CFI_UNDEFINED eip;                      \
1887 +       pushfl;                                 \
1888 +       CFI_ADJUST_CFA_OFFSET 4;                \
1889 +       pushl $__KERNEL_CS;                     \
1890 +       CFI_ADJUST_CFA_OFFSET 4;                \
1891 +       pushl $sysenter_past_esp;               \
1892 +       CFI_ADJUST_CFA_OFFSET 4;                \
1893 +       CFI_REL_OFFSET eip, 0
1894 +
1895 +KPROBE_ENTRY(debug)
1896 +       RING0_INT_FRAME
1897 +       cmpl $sysenter_entry,(%esp)
1898 +       jne debug_stack_correct
1899 +       FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
1900 +debug_stack_correct:
1901 +       pushl $-1                       # mark this as an int
1902 +       CFI_ADJUST_CFA_OFFSET 4
1903 +       SAVE_ALL
1904 +       xorl %edx,%edx                  # error code 0
1905 +       movl %esp,%eax                  # pt_regs pointer
1906 +       call do_debug
1907 +       jmp ret_from_exception
1908 +       CFI_ENDPROC
1909 +KPROBE_END(debug)
1910 +
1911 +/*
1912 + * NMI is doubly nasty. It can happen _while_ we're handling
1913 + * a debug fault, and the debug fault hasn't yet been able to
1914 + * clear up the stack. So we first check whether we got  an
1915 + * NMI on the sysenter entry path, but after that we need to
1916 + * check whether we got an NMI on the debug path where the debug
1917 + * fault happened on the sysenter path.
1918 + */
1919 +KPROBE_ENTRY(nmi)
1920 +       RING0_INT_FRAME
1921 +       pushl %eax
1922 +       CFI_ADJUST_CFA_OFFSET 4
1923 +       movl %ss, %eax
1924 +       cmpw $__ESPFIX_SS, %ax
1925 +       popl %eax
1926 +       CFI_ADJUST_CFA_OFFSET -4
1927 +       je nmi_espfix_stack
1928 +       cmpl $sysenter_entry,(%esp)
1929 +       je nmi_stack_fixup
1930 +       pushl %eax
1931 +       CFI_ADJUST_CFA_OFFSET 4
1932 +       movl %esp,%eax
1933 +       /* Do not access memory above the end of our stack page,
1934 +        * it might not exist.
1935 +        */
1936 +       andl $(THREAD_SIZE-1),%eax
1937 +       cmpl $(THREAD_SIZE-20),%eax
1938 +       popl %eax
1939 +       CFI_ADJUST_CFA_OFFSET -4
1940 +       jae nmi_stack_correct
1941 +       cmpl $sysenter_entry,12(%esp)
1942 +       je nmi_debug_stack_check
1943 +nmi_stack_correct:
1944 +       /* We have a RING0_INT_FRAME here */
1945 +       pushl %eax
1946 +       CFI_ADJUST_CFA_OFFSET 4
1947 +       SAVE_ALL
1948 +       xorl %edx,%edx          # zero error code
1949 +       movl %esp,%eax          # pt_regs pointer
1950 +       call do_nmi
1951 +       jmp restore_nocheck_notrace
1952 +       CFI_ENDPROC
1953 +
1954 +nmi_stack_fixup:
1955 +       RING0_INT_FRAME
1956 +       FIX_STACK(12,nmi_stack_correct, 1)
1957 +       jmp nmi_stack_correct
1958 +
1959 +nmi_debug_stack_check:
1960 +       /* We have a RING0_INT_FRAME here */
1961 +       cmpw $__KERNEL_CS,16(%esp)
1962 +       jne nmi_stack_correct
1963 +       cmpl $debug,(%esp)
1964 +       jb nmi_stack_correct
1965 +       cmpl $debug_esp_fix_insn,(%esp)
1966 +       ja nmi_stack_correct
1967 +       FIX_STACK(24,nmi_stack_correct, 1)
1968 +       jmp nmi_stack_correct
1969 +
1970 +nmi_espfix_stack:
1971 +       /* We have a RING0_INT_FRAME here.
1972 +        *
1973 +        * create the pointer to lss back
1974 +        */
1975 +       pushl %ss
1976 +       CFI_ADJUST_CFA_OFFSET 4
1977 +       pushl %esp
1978 +       CFI_ADJUST_CFA_OFFSET 4
1979 +       addw $4, (%esp)
1980 +       /* copy the iret frame of 12 bytes */
1981 +       .rept 3
1982 +       pushl 16(%esp)
1983 +       CFI_ADJUST_CFA_OFFSET 4
1984 +       .endr
1985 +       pushl %eax
1986 +       CFI_ADJUST_CFA_OFFSET 4
1987 +       SAVE_ALL
1988 +       FIXUP_ESPFIX_STACK              # %eax == %esp
1989 +       xorl %edx,%edx                  # zero error code
1990 +       call do_nmi
1991 +       RESTORE_REGS
1992 +       lss 12+4(%esp), %esp            # back to espfix stack
1993 +       CFI_ADJUST_CFA_OFFSET -24
1994 +1:     INTERRUPT_RETURN
1995 +       CFI_ENDPROC
1996 +.section __ex_table,"a"
1997 +       .align 4
1998 +       .long 1b,iret_exc
1999 +.previous
2000 +KPROBE_END(nmi)
2001 +
2002 +#ifdef CONFIG_PARAVIRT
2003 +ENTRY(native_iret)
2004 +1:     iret
2005 +.section __ex_table,"a"
2006 +       .align 4
2007 +       .long 1b,iret_exc
2008 +.previous
2009 +END(native_iret)
2010 +
2011 +ENTRY(native_irq_enable_sysexit)
2012 +       sti
2013 +       sysexit
2014 +END(native_irq_enable_sysexit)
2015 +#endif
2016 +
2017 +KPROBE_ENTRY(int3)
2018 +       RING0_INT_FRAME
2019 +       pushl $-1                       # mark this as an int
2020 +       CFI_ADJUST_CFA_OFFSET 4
2021 +       SAVE_ALL
2022 +       xorl %edx,%edx          # zero error code
2023 +       movl %esp,%eax          # pt_regs pointer
2024 +       call do_int3
2025 +       jmp ret_from_exception
2026 +       CFI_ENDPROC
2027 +KPROBE_END(int3)
2028 +
2029 +ENTRY(overflow)
2030 +       RING0_INT_FRAME
2031 +       pushl $0
2032 +       CFI_ADJUST_CFA_OFFSET 4
2033 +       pushl $do_overflow
2034 +       CFI_ADJUST_CFA_OFFSET 4
2035 +       jmp error_code
2036 +       CFI_ENDPROC
2037 +END(overflow)
2038 +
2039 +ENTRY(bounds)
2040 +       RING0_INT_FRAME
2041 +       pushl $0
2042 +       CFI_ADJUST_CFA_OFFSET 4
2043 +       pushl $do_bounds
2044 +       CFI_ADJUST_CFA_OFFSET 4
2045 +       jmp error_code
2046 +       CFI_ENDPROC
2047 +END(bounds)
2048 +
2049 +ENTRY(invalid_op)
2050 +       RING0_INT_FRAME
2051 +       pushl $0
2052 +       CFI_ADJUST_CFA_OFFSET 4
2053 +       pushl $do_invalid_op
2054 +       CFI_ADJUST_CFA_OFFSET 4
2055 +       jmp error_code
2056 +       CFI_ENDPROC
2057 +END(invalid_op)
2058 +
2059 +ENTRY(coprocessor_segment_overrun)
2060 +       RING0_INT_FRAME
2061 +       pushl $0
2062 +       CFI_ADJUST_CFA_OFFSET 4
2063 +       pushl $do_coprocessor_segment_overrun
2064 +       CFI_ADJUST_CFA_OFFSET 4
2065 +       jmp error_code
2066 +       CFI_ENDPROC
2067 +END(coprocessor_segment_overrun)
2068 +
2069 +ENTRY(invalid_TSS)
2070 +       RING0_EC_FRAME
2071 +       pushl $do_invalid_TSS
2072 +       CFI_ADJUST_CFA_OFFSET 4
2073 +       jmp error_code
2074 +       CFI_ENDPROC
2075 +END(invalid_TSS)
2076 +
2077 +ENTRY(segment_not_present)
2078 +       RING0_EC_FRAME
2079 +       pushl $do_segment_not_present
2080 +       CFI_ADJUST_CFA_OFFSET 4
2081 +       jmp error_code
2082 +       CFI_ENDPROC
2083 +END(segment_not_present)
2084 +
2085 +ENTRY(stack_segment)
2086 +       RING0_EC_FRAME
2087 +       pushl $do_stack_segment
2088 +       CFI_ADJUST_CFA_OFFSET 4
2089 +       jmp error_code
2090 +       CFI_ENDPROC
2091 +END(stack_segment)
2092 +
2093 +KPROBE_ENTRY(general_protection)
2094 +       RING0_EC_FRAME
2095 +       pushl $do_general_protection
2096 +       CFI_ADJUST_CFA_OFFSET 4
2097 +       jmp error_code
2098 +       CFI_ENDPROC
2099 +KPROBE_END(general_protection)
2100 +
2101 +ENTRY(alignment_check)
2102 +       RING0_EC_FRAME
2103 +       pushl $do_alignment_check
2104 +       CFI_ADJUST_CFA_OFFSET 4
2105 +       jmp error_code
2106 +       CFI_ENDPROC
2107 +END(alignment_check)
2108 +
2109 +ENTRY(divide_error)
2110 +       RING0_INT_FRAME
2111 +       pushl $0                        # no error code
2112 +       CFI_ADJUST_CFA_OFFSET 4
2113 +       pushl $do_divide_error
2114 +       CFI_ADJUST_CFA_OFFSET 4
2115 +       jmp error_code
2116 +       CFI_ENDPROC
2117 +END(divide_error)
2118 +
2119 +#ifdef CONFIG_X86_MCE
2120 +ENTRY(machine_check)
2121 +       RING0_INT_FRAME
2122 +       pushl $0
2123 +       CFI_ADJUST_CFA_OFFSET 4
2124 +       pushl machine_check_vector
2125 +       CFI_ADJUST_CFA_OFFSET 4
2126 +       jmp error_code
2127 +       CFI_ENDPROC
2128 +END(machine_check)
2129 +#endif
2130 +
2131 +ENTRY(spurious_interrupt_bug)
2132 +       RING0_INT_FRAME
2133 +       pushl $0
2134 +       CFI_ADJUST_CFA_OFFSET 4
2135 +       pushl $do_spurious_interrupt_bug
2136 +       CFI_ADJUST_CFA_OFFSET 4
2137 +       jmp error_code
2138 +       CFI_ENDPROC
2139 +END(spurious_interrupt_bug)
2140 +
2141 +ENTRY(kernel_thread_helper)
2142 +       pushl $0                # fake return address for unwinder
2143 +       CFI_STARTPROC
2144 +       movl %edx,%eax
2145 +       push %edx
2146 +       CFI_ADJUST_CFA_OFFSET 4
2147 +       call *%ebx
2148 +       push %eax
2149 +       CFI_ADJUST_CFA_OFFSET 4
2150 +       call do_exit
2151 +       CFI_ENDPROC
2152 +ENDPROC(kernel_thread_helper)
2153 +
2154 +.section .rodata,"a"
2155 +#include "syscall_table.S"
2156 +
2157 +syscall_table_size=(.-sys_call_table)
2158 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/arch/i386/mm/fault.c linux-2.6.22-590/arch/i386/mm/fault.c
2159 --- linux-2.6.22-580/arch/i386/mm/fault.c       2009-02-18 09:56:02.000000000 -0500
2160 +++ linux-2.6.22-590/arch/i386/mm/fault.c       2009-02-18 09:57:23.000000000 -0500
2161 @@ -60,6 +60,15 @@
2162                                           DIE_PAGE_FAULT, &args);
2163  }
2164  
2165 +
2166 +extern void (*rec_event)(void *,unsigned int);
2167 +struct event_spec {
2168 +       unsigned long pc;
2169 +       unsigned long dcookie; 
2170 +       unsigned count;
2171 +       unsigned char reason;
2172 +};
2173 +
2174  /*
2175   * Return EIP plus the CS segment base.  The segment limit is also
2176   * adjusted, clamped to the kernel/user address space (whichever is
2177 @@ -296,6 +305,8 @@
2178   *     bit 3 == 1 means use of reserved bit detected
2179   *     bit 4 == 1 means fault was an instruction fetch
2180   */
2181 +
2182 +
2183  fastcall void __kprobes do_page_fault(struct pt_regs *regs,
2184                                       unsigned long error_code)
2185  {
2186 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/block/ll_rw_blk.c linux-2.6.22-590/block/ll_rw_blk.c
2187 --- linux-2.6.22-580/block/ll_rw_blk.c  2009-02-18 09:55:48.000000000 -0500
2188 +++ linux-2.6.22-590/block/ll_rw_blk.c  2009-02-18 09:57:23.000000000 -0500
2189 @@ -30,6 +30,7 @@
2190  #include <linux/cpu.h>
2191  #include <linux/blktrace_api.h>
2192  #include <linux/fault-inject.h>
2193 +#include <linux/arrays.h>
2194  
2195  /*
2196   * for max sense size
2197 @@ -3102,6 +3103,13 @@
2198  
2199  #endif /* CONFIG_FAIL_MAKE_REQUEST */
2200  
2201 +extern void (*rec_event)(void *,unsigned int);
2202 +struct event_spec {
2203 +       unsigned long pc;
2204 +       unsigned long dcookie;
2205 +       unsigned count;
2206 +       unsigned char reason;
2207 +};
2208  /**
2209   * generic_make_request: hand a buffer to its device driver for I/O
2210   * @bio:  The bio describing the location in memory and on the device.
2211 @@ -3220,7 +3228,23 @@
2212                                 goto end_io;
2213                         }
2214                 }
2215 -
2216 +#ifdef CONFIG_CHOPSTIX
2217 +               if (rec_event) {
2218 +                       struct event event;
2219 +                       struct event_spec espec;
2220 +                       unsigned long eip;
2221 +                       
2222 +                       espec.reason = 0;/*request */
2223 +
2224 +                       eip = bio->bi_end_io;
2225 +                       event.event_data=&espec;
2226 +                       espec.pc=eip;
2227 +                       event.event_type=3; 
2228 +                       /* index in the event array currently set up */
2229 +                       /* make sure the counters are loaded in the order we want them to show up*/ 
2230 +                       (*rec_event)(&event, bio->bi_size);
2231 +               }
2232 +#endif
2233                 ret = q->make_request_fn(q, bio);
2234         } while (ret);
2235  }
2236 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/chopstix.S linux-2.6.22-590/chopstix.S
2237 --- linux-2.6.22-580/chopstix.S 1969-12-31 19:00:00.000000000 -0500
2238 +++ linux-2.6.22-590/chopstix.S 2009-02-18 09:57:23.000000000 -0500
2239 @@ -0,0 +1,1055 @@
2240 +/*
2241 + *  linux/arch/i386/entry.S
2242 + *
2243 + *  Copyright (C) 1991, 1992  Linus Torvalds
2244 + */
2245 +
2246 +/*
2247 + * entry.S contains the system-call and fault low-level handling routines.
2248 + * This also contains the timer-interrupt handler, as well as all interrupts
2249 + * and faults that can result in a task-switch.
2250 + *
2251 + * NOTE: This code handles signal-recognition, which happens every time
2252 + * after a timer-interrupt and after each system call.
2253 + *
2254 + * I changed all the .align's to 4 (16 byte alignment), as that's faster
2255 + * on a 486.
2256 + *
2257 + * Stack layout in 'syscall_exit':
2258 + *     ptrace needs to have all regs on the stack.
2259 + *     if the order here is changed, it needs to be
2260 + *     updated in fork.c:copy_process, signal.c:do_signal,
2261 + *     ptrace.c and ptrace.h
2262 + *
2263 + *      0(%esp) - %ebx
2264 + *      4(%esp) - %ecx
2265 + *      8(%esp) - %edx
2266 + *       C(%esp) - %esi
2267 + *     10(%esp) - %edi
2268 + *     14(%esp) - %ebp
2269 + *     18(%esp) - %eax
2270 + *     1C(%esp) - %ds
2271 + *     20(%esp) - %es
2272 + *     24(%esp) - %fs
2273 + *     28(%esp) - orig_eax
2274 + *     2C(%esp) - %eip
2275 + *     30(%esp) - %cs
2276 + *     34(%esp) - %eflags
2277 + *     38(%esp) - %oldesp
2278 + *     3C(%esp) - %oldss
2279 + *
2280 + * "current" is in register %ebx during any slow entries.
2281 + */
2282 +
2283 +#include <linux/linkage.h>
2284 +#include <asm/thread_info.h>
2285 +#include <asm/irqflags.h>
2286 +#include <asm/errno.h>
2287 +#include <asm/segment.h>
2288 +#include <asm/smp.h>
2289 +#include <asm/page.h>
2290 +#include <asm/desc.h>
2291 +#include <asm/percpu.h>
2292 +#include <asm/dwarf2.h>
2293 +#include "irq_vectors.h"
2294 +
2295 +/*
2296 + * We use macros for low-level operations which need to be overridden
2297 + * for paravirtualization.  The following will never clobber any registers:
2298 + *   INTERRUPT_RETURN (aka. "iret")
2299 + *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
2300 + *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
2301 + *
2302 + * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
2303 + * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
2304 + * Allowing a register to be clobbered can shrink the paravirt replacement
2305 + * enough to patch inline, increasing performance.
2306 + */
2307 +
2308 +#define nr_syscalls ((syscall_table_size)/4)
2309 +
2310 +CF_MASK                = 0x00000001
2311 +TF_MASK                = 0x00000100
2312 +IF_MASK                = 0x00000200
2313 +DF_MASK                = 0x00000400 
2314 +NT_MASK                = 0x00004000
2315 +VM_MASK                = 0x00020000
2316 +
2317 +#ifdef CONFIG_PREEMPT
2318 +#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
2319 +#else
2320 +#define preempt_stop(clobbers)
2321 +#define resume_kernel          restore_nocheck
2322 +#endif
2323 +
2324 +.macro TRACE_IRQS_IRET
2325 +#ifdef CONFIG_TRACE_IRQFLAGS
2326 +       testl $IF_MASK,PT_EFLAGS(%esp)     # interrupts off?
2327 +       jz 1f
2328 +       TRACE_IRQS_ON
2329 +1:
2330 +#endif
2331 +.endm
2332 +
2333 +#ifdef CONFIG_VM86
2334 +#define resume_userspace_sig   check_userspace
2335 +#else
2336 +#define resume_userspace_sig   resume_userspace
2337 +#endif
2338 +
2339 +#define SAVE_ALL \
2340 +       cld; \
2341 +       pushl %fs; \
2342 +       CFI_ADJUST_CFA_OFFSET 4;\
2343 +       /*CFI_REL_OFFSET fs, 0;*/\
2344 +       pushl %es; \
2345 +       CFI_ADJUST_CFA_OFFSET 4;\
2346 +       /*CFI_REL_OFFSET es, 0;*/\
2347 +       pushl %ds; \
2348 +       CFI_ADJUST_CFA_OFFSET 4;\
2349 +       /*CFI_REL_OFFSET ds, 0;*/\
2350 +       pushl %eax; \
2351 +       CFI_ADJUST_CFA_OFFSET 4;\
2352 +       CFI_REL_OFFSET eax, 0;\
2353 +       pushl %ebp; \
2354 +       CFI_ADJUST_CFA_OFFSET 4;\
2355 +       CFI_REL_OFFSET ebp, 0;\
2356 +       pushl %edi; \
2357 +       CFI_ADJUST_CFA_OFFSET 4;\
2358 +       CFI_REL_OFFSET edi, 0;\
2359 +       pushl %esi; \
2360 +       CFI_ADJUST_CFA_OFFSET 4;\
2361 +       CFI_REL_OFFSET esi, 0;\
2362 +       pushl %edx; \
2363 +       CFI_ADJUST_CFA_OFFSET 4;\
2364 +       CFI_REL_OFFSET edx, 0;\
2365 +       pushl %ecx; \
2366 +       CFI_ADJUST_CFA_OFFSET 4;\
2367 +       CFI_REL_OFFSET ecx, 0;\
2368 +       pushl %ebx; \
2369 +       CFI_ADJUST_CFA_OFFSET 4;\
2370 +       CFI_REL_OFFSET ebx, 0;\
2371 +       movl $(__USER_DS), %edx; \
2372 +       movl %edx, %ds; \
2373 +       movl %edx, %es; \
2374 +       movl $(__KERNEL_PERCPU), %edx; \
2375 +       movl %edx, %fs
2376 +
2377 +#define RESTORE_INT_REGS \
2378 +       popl %ebx;      \
2379 +       CFI_ADJUST_CFA_OFFSET -4;\
2380 +       CFI_RESTORE ebx;\
2381 +       popl %ecx;      \
2382 +       CFI_ADJUST_CFA_OFFSET -4;\
2383 +       CFI_RESTORE ecx;\
2384 +       popl %edx;      \
2385 +       CFI_ADJUST_CFA_OFFSET -4;\
2386 +       CFI_RESTORE edx;\
2387 +       popl %esi;      \
2388 +       CFI_ADJUST_CFA_OFFSET -4;\
2389 +       CFI_RESTORE esi;\
2390 +       popl %edi;      \
2391 +       CFI_ADJUST_CFA_OFFSET -4;\
2392 +       CFI_RESTORE edi;\
2393 +       popl %ebp;      \
2394 +       CFI_ADJUST_CFA_OFFSET -4;\
2395 +       CFI_RESTORE ebp;\
2396 +       popl %eax;      \
2397 +       CFI_ADJUST_CFA_OFFSET -4;\
2398 +       CFI_RESTORE eax
2399 +
2400 +#define RESTORE_REGS   \
2401 +       RESTORE_INT_REGS; \
2402 +1:     popl %ds;       \
2403 +       CFI_ADJUST_CFA_OFFSET -4;\
2404 +       /*CFI_RESTORE ds;*/\
2405 +2:     popl %es;       \
2406 +       CFI_ADJUST_CFA_OFFSET -4;\
2407 +       /*CFI_RESTORE es;*/\
2408 +3:     popl %fs;       \
2409 +       CFI_ADJUST_CFA_OFFSET -4;\
2410 +       /*CFI_RESTORE fs;*/\
2411 +.pushsection .fixup,"ax";      \
2412 +4:     movl $0,(%esp); \
2413 +       jmp 1b;         \
2414 +5:     movl $0,(%esp); \
2415 +       jmp 2b;         \
2416 +6:     movl $0,(%esp); \
2417 +       jmp 3b;         \
2418 +.section __ex_table,"a";\
2419 +       .align 4;       \
2420 +       .long 1b,4b;    \
2421 +       .long 2b,5b;    \
2422 +       .long 3b,6b;    \
2423 +.popsection
2424 +
2425 +#define RING0_INT_FRAME \
2426 +       CFI_STARTPROC simple;\
2427 +       CFI_SIGNAL_FRAME;\
2428 +       CFI_DEF_CFA esp, 3*4;\
2429 +       /*CFI_OFFSET cs, -2*4;*/\
2430 +       CFI_OFFSET eip, -3*4
2431 +
2432 +#define RING0_EC_FRAME \
2433 +       CFI_STARTPROC simple;\
2434 +       CFI_SIGNAL_FRAME;\
2435 +       CFI_DEF_CFA esp, 4*4;\
2436 +       /*CFI_OFFSET cs, -2*4;*/\
2437 +       CFI_OFFSET eip, -3*4
2438 +
2439 +#define RING0_PTREGS_FRAME \
2440 +       CFI_STARTPROC simple;\
2441 +       CFI_SIGNAL_FRAME;\
2442 +       CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
2443 +       /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
2444 +       CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
2445 +       /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
2446 +       /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
2447 +       CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
2448 +       CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
2449 +       CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
2450 +       CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
2451 +       CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
2452 +       CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
2453 +       CFI_OFFSET ebx, PT_EBX-PT_OLDESP
2454 +
2455 +ENTRY(ret_from_fork)
2456 +       CFI_STARTPROC
2457 +       pushl %eax
2458 +       CFI_ADJUST_CFA_OFFSET 4
2459 +       call schedule_tail
2460 +       GET_THREAD_INFO(%ebp)
2461 +       popl %eax
2462 +       CFI_ADJUST_CFA_OFFSET -4
2463 +       pushl $0x0202                   # Reset kernel eflags
2464 +       CFI_ADJUST_CFA_OFFSET 4
2465 +       popfl
2466 +       CFI_ADJUST_CFA_OFFSET -4
2467 +       jmp syscall_exit
2468 +       CFI_ENDPROC
2469 +END(ret_from_fork)
2470 +
2471 +/*
2472 + * Return to user mode is not as complex as all this looks,
2473 + * but we want the default path for a system call return to
2474 + * go as quickly as possible which is why some of this is
2475 + * less clear than it otherwise should be.
2476 + */
2477 +
2478 +       # userspace resumption stub bypassing syscall exit tracing
2479 +       ALIGN
2480 +       RING0_PTREGS_FRAME
2481 +ret_from_exception:
2482 +       preempt_stop(CLBR_ANY)
2483 +ret_from_intr:
2484 +       GET_THREAD_INFO(%ebp)
2485 +check_userspace:
2486 +       movl PT_EFLAGS(%esp), %eax      # mix EFLAGS and CS
2487 +       movb PT_CS(%esp), %al
2488 +       andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
2489 +       cmpl $USER_RPL, %eax
2490 +       jb resume_kernel                # not returning to v8086 or userspace
2491 +
2492 +ENTRY(resume_userspace)
2493 +       DISABLE_INTERRUPTS(CLBR_ANY)    # make sure we don't miss an interrupt
2494 +                                       # setting need_resched or sigpending
2495 +                                       # between sampling and the iret
2496 +       movl TI_flags(%ebp), %ecx
2497 +       andl $_TIF_WORK_MASK, %ecx      # is there any work to be done on
2498 +                                       # int/exception return?
2499 +       jne work_pending
2500 +       jmp restore_all
2501 +END(ret_from_exception)
2502 +
2503 +#ifdef CONFIG_PREEMPT
2504 +ENTRY(resume_kernel)
2505 +       DISABLE_INTERRUPTS(CLBR_ANY)
2506 +       cmpl $0,TI_preempt_count(%ebp)  # non-zero preempt_count ?
2507 +       jnz restore_nocheck
2508 +need_resched:
2509 +       movl TI_flags(%ebp), %ecx       # need_resched set ?
2510 +       testb $_TIF_NEED_RESCHED, %cl
2511 +       jz restore_all
2512 +       testl $IF_MASK,PT_EFLAGS(%esp)  # interrupts off (exception path) ?
2513 +       jz restore_all
2514 +       call preempt_schedule_irq
2515 +       jmp need_resched
2516 +END(resume_kernel)
2517 +#endif
2518 +       CFI_ENDPROC
2519 +
2520 +/* SYSENTER_RETURN points to after the "sysenter" instruction in
2521 +   the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */
2522 +
2523 +       # sysenter call handler stub
2524 +ENTRY(sysenter_entry)
2525 +       CFI_STARTPROC simple
2526 +       CFI_SIGNAL_FRAME
2527 +       CFI_DEF_CFA esp, 0
2528 +       CFI_REGISTER esp, ebp
2529 +       movl TSS_sysenter_esp0(%esp),%esp
2530 +sysenter_past_esp:
2531 +       /*
2532 +        * No need to follow this irqs on/off section: the syscall
2533 +        * disabled irqs and here we enable it straight after entry:
2534 +        */
2535 +       ENABLE_INTERRUPTS(CLBR_NONE)
2536 +       pushl $(__USER_DS)
2537 +       CFI_ADJUST_CFA_OFFSET 4
2538 +       /*CFI_REL_OFFSET ss, 0*/
2539 +       pushl %ebp
2540 +       CFI_ADJUST_CFA_OFFSET 4
2541 +       CFI_REL_OFFSET esp, 0
2542 +       pushfl
2543 +       CFI_ADJUST_CFA_OFFSET 4
2544 +       pushl $(__USER_CS)
2545 +       CFI_ADJUST_CFA_OFFSET 4
2546 +       /*CFI_REL_OFFSET cs, 0*/
2547 +       /*
2548 +        * Push current_thread_info()->sysenter_return to the stack.
2549 +        * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
2550 +        * pushed above; +8 corresponds to copy_thread's esp0 setting.
2551 +        */
2552 +       pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
2553 +       CFI_ADJUST_CFA_OFFSET 4
2554 +       CFI_REL_OFFSET eip, 0
2555 +
2556 +/*
2557 + * Load the potential sixth argument from user stack.
2558 + * Careful about security.
2559 + */
2560 +       cmpl $__PAGE_OFFSET-3,%ebp
2561 +       jae syscall_fault
2562 +1:     movl (%ebp),%ebp
2563 +.section __ex_table,"a"
2564 +       .align 4
2565 +       .long 1b,syscall_fault
2566 +.previous
2567 +
2568 +       pushl %eax
2569 +       CFI_ADJUST_CFA_OFFSET 4
2570 +       SAVE_ALL
2571 +       GET_THREAD_INFO(%ebp)
2572 +
2573 +       /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
2574 +       testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
2575 +       jnz syscall_trace_entry
2576 +       cmpl $(nr_syscalls), %eax
2577 +       jae syscall_badsys
2578 +       call *sys_call_table(,%eax,4)
2579 +       movl %eax,PT_EAX(%esp)
2580 +       DISABLE_INTERRUPTS(CLBR_ANY)
2581 +       TRACE_IRQS_OFF
2582 +       movl TI_flags(%ebp), %ecx
2583 +       testw $_TIF_ALLWORK_MASK, %cx
2584 +       jne syscall_exit_work
2585 +/* if something modifies registers it must also disable sysexit */
2586 +       movl PT_EIP(%esp), %edx
2587 +       movl PT_OLDESP(%esp), %ecx
2588 +       xorl %ebp,%ebp
2589 +       TRACE_IRQS_ON
2590 +1:     mov  PT_FS(%esp), %fs
2591 +       ENABLE_INTERRUPTS_SYSEXIT
2592 +       CFI_ENDPROC
2593 +.pushsection .fixup,"ax"
2594 +2:     movl $0,PT_FS(%esp)
2595 +       jmp 1b
2596 +.section __ex_table,"a"
2597 +       .align 4
2598 +       .long 1b,2b
2599 +.popsection
2600 +ENDPROC(sysenter_entry)
2601 +
2602 +       # system call handler stub
2603 +ENTRY(system_call)
2604 +       RING0_INT_FRAME                 # can't unwind into user space anyway
2605 +       pushl %eax                      # save orig_eax
2606 +       CFI_ADJUST_CFA_OFFSET 4
2607 +       SAVE_ALL
2608 +       GET_THREAD_INFO(%ebp)
2609 +                                       # system call tracing in operation / emulation
2610 +       /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
2611 +       testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
2612 +       jnz syscall_trace_entry
2613 +       cmpl $(nr_syscalls), %eax
2614 +       jae syscall_badsys
2615 +syscall_call:
2616 +    /* Move Chopstix syscall probe here */
2617 +    /* Save and clobber: eax, ecx, ebp  */
2618 +    pushl   %ebp
2619 +    movl    %esp, %ebp
2620 +    pushl   %eax
2621 +    pushl   %ecx
2622 +    subl    $16, %esp /*
2623 +    movl    rec_event, %ecx
2624 +    testl   %ecx, %ecx
2625 +    jz  carry_on
2626 +    movl    %eax, (SPEC_number-EVENT_SIZE)(%ebp)
2627 +    leal    SPEC_EVENT_SIZE(%ebp), %eax
2628 +    movl    %eax, EVENT_event_data(%ebp)
2629 +    GET_THREAD_INFO(%eax)
2630 +    movl    %eax, EVENT_task(%ebp)
2631 +    movl    $7, EVENT_event_type(%ebp)
2632 +    movl    rec_event, %edx
2633 +    movl    $1, 4(%esp)
2634 +    leal    -EVENT_SIZE(%ebp), %eax
2635 +    movl    %eax, (%esp)
2636 +    call    rec_event_asm
2637 +carry_on: */
2638 +    addl $16, %esp
2639 +    popl %ecx
2640 +    popl %eax
2641 +    popl %ebp
2642 +     /* End chopstix */
2643 +
2644 +       call *sys_call_table(,%eax,4)
2645 +       movl %eax,PT_EAX(%esp)          # store the return value
2646 +syscall_exit:
2647 +       DISABLE_INTERRUPTS(CLBR_ANY)    # make sure we don't miss an interrupt
2648 +                                       # setting need_resched or sigpending
2649 +                                       # between sampling and the iret
2650 +       TRACE_IRQS_OFF
2651 +       testl $TF_MASK,PT_EFLAGS(%esp)  # If tracing set singlestep flag on exit
2652 +       jz no_singlestep
2653 +       orl $_TIF_SINGLESTEP,TI_flags(%ebp)
2654 +no_singlestep:
2655 +       movl TI_flags(%ebp), %ecx
2656 +       testw $_TIF_ALLWORK_MASK, %cx   # current->work
2657 +       jne syscall_exit_work
2658 +
2659 +restore_all:
2660 +       movl PT_EFLAGS(%esp), %eax      # mix EFLAGS, SS and CS
2661 +       # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
2662 +       # are returning to the kernel.
2663 +       # See comments in process.c:copy_thread() for details.
2664 +       movb PT_OLDSS(%esp), %ah
2665 +       movb PT_CS(%esp), %al
2666 +       andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
2667 +       cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
2668 +       CFI_REMEMBER_STATE
2669 +       je ldt_ss                       # returning to user-space with LDT SS
2670 +restore_nocheck:
2671 +       TRACE_IRQS_IRET
2672 +restore_nocheck_notrace:
2673 +       RESTORE_REGS
2674 +       addl $4, %esp                   # skip orig_eax/error_code
2675 +       CFI_ADJUST_CFA_OFFSET -4
2676 +1:     INTERRUPT_RETURN
2677 +.section .fixup,"ax"
2678 +iret_exc:
2679 +       pushl $0                        # no error code
2680 +       pushl $do_iret_error
2681 +       jmp error_code
2682 +.previous
2683 +.section __ex_table,"a"
2684 +       .align 4
2685 +       .long 1b,iret_exc
2686 +.previous
2687 +
2688 +       CFI_RESTORE_STATE
2689 +ldt_ss:
2690 +       larl PT_OLDSS(%esp), %eax
2691 +       jnz restore_nocheck
2692 +       testl $0x00400000, %eax         # returning to 32bit stack?
2693 +       jnz restore_nocheck             # allright, normal return
2694 +
2695 +#ifdef CONFIG_PARAVIRT
2696 +       /*
2697 +        * The kernel can't run on a non-flat stack if paravirt mode
2698 +        * is active.  Rather than try to fixup the high bits of
2699 +        * ESP, bypass this code entirely.  This may break DOSemu
2700 +        * and/or Wine support in a paravirt VM, although the option
2701 +        * is still available to implement the setting of the high
2702 +        * 16-bits in the INTERRUPT_RETURN paravirt-op.
2703 +        */
2704 +       cmpl $0, paravirt_ops+PARAVIRT_enabled
2705 +       jne restore_nocheck
2706 +#endif
2707 +
2708 +       /* If returning to userspace with 16bit stack,
2709 +        * try to fix the higher word of ESP, as the CPU
2710 +        * won't restore it.
2711 +        * This is an "official" bug of all the x86-compatible
2712 +        * CPUs, which we can try to work around to make
2713 +        * dosemu and wine happy. */
2714 +       movl PT_OLDESP(%esp), %eax
2715 +       movl %esp, %edx
2716 +       call patch_espfix_desc
2717 +       pushl $__ESPFIX_SS
2718 +       CFI_ADJUST_CFA_OFFSET 4
2719 +       pushl %eax
2720 +       CFI_ADJUST_CFA_OFFSET 4
2721 +       DISABLE_INTERRUPTS(CLBR_EAX)
2722 +       TRACE_IRQS_OFF
2723 +       lss (%esp), %esp
2724 +       CFI_ADJUST_CFA_OFFSET -8
2725 +       jmp restore_nocheck
2726 +       CFI_ENDPROC
2727 +ENDPROC(system_call)
2728 +
2729 +       # perform work that needs to be done immediately before resumption
2730 +       ALIGN
2731 +       RING0_PTREGS_FRAME              # can't unwind into user space anyway
2732 +work_pending:
2733 +       testb $_TIF_NEED_RESCHED, %cl
2734 +       jz work_notifysig
2735 +work_resched:
2736 +       call schedule
2737 +       DISABLE_INTERRUPTS(CLBR_ANY)    # make sure we don't miss an interrupt
2738 +                                       # setting need_resched or sigpending
2739 +                                       # between sampling and the iret
2740 +       TRACE_IRQS_OFF
2741 +       movl TI_flags(%ebp), %ecx
2742 +       andl $_TIF_WORK_MASK, %ecx      # is there any work to be done other
2743 +                                       # than syscall tracing?
2744 +       jz restore_all
2745 +       testb $_TIF_NEED_RESCHED, %cl
2746 +       jnz work_resched
2747 +
2748 +work_notifysig:                                # deal with pending signals and
2749 +                                       # notify-resume requests
2750 +#ifdef CONFIG_VM86
2751 +       testl $VM_MASK, PT_EFLAGS(%esp)
2752 +       movl %esp, %eax
2753 +       jne work_notifysig_v86          # returning to kernel-space or
2754 +                                       # vm86-space
2755 +       xorl %edx, %edx
2756 +       call do_notify_resume
2757 +       jmp resume_userspace_sig
2758 +
2759 +       ALIGN
2760 +work_notifysig_v86:
2761 +       pushl %ecx                      # save ti_flags for do_notify_resume
2762 +       CFI_ADJUST_CFA_OFFSET 4
2763 +       call save_v86_state             # %eax contains pt_regs pointer
2764 +       popl %ecx
2765 +       CFI_ADJUST_CFA_OFFSET -4
2766 +       movl %eax, %esp
2767 +#else
2768 +       movl %esp, %eax
2769 +#endif
2770 +       xorl %edx, %edx
2771 +       call do_notify_resume
2772 +       jmp resume_userspace_sig
2773 +END(work_pending)
2774 +
2775 +       # perform syscall exit tracing
2776 +       ALIGN
2777 +syscall_trace_entry:
2778 +       movl $-ENOSYS,PT_EAX(%esp)
2779 +       movl %esp, %eax
2780 +       xorl %edx,%edx
2781 +       call do_syscall_trace
2782 +       cmpl $0, %eax
2783 +       jne resume_userspace            # ret != 0 -> running under PTRACE_SYSEMU,
2784 +                                       # so must skip actual syscall
2785 +       movl PT_ORIG_EAX(%esp), %eax
2786 +       cmpl $(nr_syscalls), %eax
2787 +       jnae syscall_call
2788 +       jmp syscall_exit
2789 +END(syscall_trace_entry)
2790 +
2791 +       # perform syscall exit tracing
2792 +       ALIGN
2793 +syscall_exit_work:
2794 +       testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
2795 +       jz work_pending
2796 +       TRACE_IRQS_ON
2797 +       ENABLE_INTERRUPTS(CLBR_ANY)     # could let do_syscall_trace() call
2798 +                                       # schedule() instead
2799 +       movl %esp, %eax
2800 +       movl $1, %edx
2801 +       call do_syscall_trace
2802 +       jmp resume_userspace
2803 +END(syscall_exit_work)
2804 +       CFI_ENDPROC
2805 +
2806 +       RING0_INT_FRAME                 # can't unwind into user space anyway
2807 +syscall_fault:
2808 +       pushl %eax                      # save orig_eax
2809 +       CFI_ADJUST_CFA_OFFSET 4
2810 +       SAVE_ALL
2811 +       GET_THREAD_INFO(%ebp)
2812 +       movl $-EFAULT,PT_EAX(%esp)
2813 +       jmp resume_userspace
2814 +END(syscall_fault)
2815 +
2816 +syscall_badsys:
2817 +       movl $-ENOSYS,PT_EAX(%esp)
2818 +       jmp resume_userspace
2819 +END(syscall_badsys)
2820 +       CFI_ENDPROC
2821 +
2822 +#define FIXUP_ESPFIX_STACK \
2823 +       /* since we are on a wrong stack, we cant make it a C code :( */ \
2824 +       PER_CPU(gdt_page, %ebx); \
2825 +       GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
2826 +       addl %esp, %eax; \
2827 +       pushl $__KERNEL_DS; \
2828 +       CFI_ADJUST_CFA_OFFSET 4; \
2829 +       pushl %eax; \
2830 +       CFI_ADJUST_CFA_OFFSET 4; \
2831 +       lss (%esp), %esp; \
2832 +       CFI_ADJUST_CFA_OFFSET -8;
2833 +#define UNWIND_ESPFIX_STACK \
2834 +       movl %ss, %eax; \
2835 +       /* see if on espfix stack */ \
2836 +       cmpw $__ESPFIX_SS, %ax; \
2837 +       jne 27f; \
2838 +       movl $__KERNEL_DS, %eax; \
2839 +       movl %eax, %ds; \
2840 +       movl %eax, %es; \
2841 +       /* switch to normal stack */ \
2842 +       FIXUP_ESPFIX_STACK; \
2843 +27:;
2844 +
2845 +/*
2846 + * Build the entry stubs and pointer table with
2847 + * some assembler magic.
2848 + */
2849 +.data
2850 +ENTRY(interrupt)
2851 +.text
2852 +
2853 +ENTRY(irq_entries_start)
2854 +       RING0_INT_FRAME
2855 +vector=0
2856 +.rept NR_IRQS
2857 +       ALIGN
2858 + .if vector
2859 +       CFI_ADJUST_CFA_OFFSET -4
2860 + .endif
2861 +1:     pushl $~(vector)
2862 +       CFI_ADJUST_CFA_OFFSET 4
2863 +       jmp common_interrupt
2864 + .previous
2865 +       .long 1b
2866 + .text
2867 +vector=vector+1
2868 +.endr
2869 +END(irq_entries_start)
2870 +
2871 +.previous
2872 +END(interrupt)
2873 +.previous
2874 +
2875 +/*
2876 + * the CPU automatically disables interrupts when executing an IRQ vector,
2877 + * so IRQ-flags tracing has to follow that:
2878 + */
2879 +       ALIGN
2880 +common_interrupt:
2881 +       SAVE_ALL
2882 +       TRACE_IRQS_OFF
2883 +       movl %esp,%eax
2884 +       call do_IRQ
2885 +       jmp ret_from_intr
2886 +ENDPROC(common_interrupt)
2887 +       CFI_ENDPROC
2888 +
2889 +#define BUILD_INTERRUPT(name, nr)      \
2890 +ENTRY(name)                            \
2891 +       RING0_INT_FRAME;                \
2892 +       pushl $~(nr);                   \
2893 +       CFI_ADJUST_CFA_OFFSET 4;        \
2894 +       SAVE_ALL;                       \
2895 +       TRACE_IRQS_OFF                  \
2896 +       movl %esp,%eax;                 \
2897 +       call smp_##name;                \
2898 +       jmp ret_from_intr;              \
2899 +       CFI_ENDPROC;                    \
2900 +ENDPROC(name)
2901 +
2902 +/* The include is where all of the SMP etc. interrupts come from */
2903 +#include "entry_arch.h"
2904 +
2905 +KPROBE_ENTRY(page_fault)
2906 +       RING0_EC_FRAME
2907 +       pushl $do_page_fault
2908 +       CFI_ADJUST_CFA_OFFSET 4
2909 +       ALIGN
2910 +error_code:
2911 +       /* the function address is in %fs's slot on the stack */
2912 +       pushl %es
2913 +       CFI_ADJUST_CFA_OFFSET 4
2914 +       /*CFI_REL_OFFSET es, 0*/
2915 +       pushl %ds
2916 +       CFI_ADJUST_CFA_OFFSET 4
2917 +       /*CFI_REL_OFFSET ds, 0*/
2918 +       pushl %eax
2919 +       CFI_ADJUST_CFA_OFFSET 4
2920 +       CFI_REL_OFFSET eax, 0
2921 +       pushl %ebp
2922 +       CFI_ADJUST_CFA_OFFSET 4
2923 +       CFI_REL_OFFSET ebp, 0
2924 +       pushl %edi
2925 +       CFI_ADJUST_CFA_OFFSET 4
2926 +       CFI_REL_OFFSET edi, 0
2927 +       pushl %esi
2928 +       CFI_ADJUST_CFA_OFFSET 4
2929 +       CFI_REL_OFFSET esi, 0
2930 +       pushl %edx
2931 +       CFI_ADJUST_CFA_OFFSET 4
2932 +       CFI_REL_OFFSET edx, 0
2933 +       pushl %ecx
2934 +       CFI_ADJUST_CFA_OFFSET 4
2935 +       CFI_REL_OFFSET ecx, 0
2936 +       pushl %ebx
2937 +       CFI_ADJUST_CFA_OFFSET 4
2938 +       CFI_REL_OFFSET ebx, 0
2939 +       cld
2940 +       pushl %fs
2941 +       CFI_ADJUST_CFA_OFFSET 4
2942 +       /*CFI_REL_OFFSET fs, 0*/
2943 +       movl $(__KERNEL_PERCPU), %ecx
2944 +       movl %ecx, %fs
2945 +       UNWIND_ESPFIX_STACK
2946 +       popl %ecx
2947 +       CFI_ADJUST_CFA_OFFSET -4
2948 +       /*CFI_REGISTER es, ecx*/
2949 +       movl PT_FS(%esp), %edi          # get the function address
2950 +       movl PT_ORIG_EAX(%esp), %edx    # get the error code
2951 +       movl $-1, PT_ORIG_EAX(%esp)     # no syscall to restart
2952 +       mov  %ecx, PT_FS(%esp)
2953 +       /*CFI_REL_OFFSET fs, ES*/
2954 +       movl $(__USER_DS), %ecx
2955 +       movl %ecx, %ds
2956 +       movl %ecx, %es
2957 +       movl %esp,%eax                  # pt_regs pointer
2958 +       call *%edi
2959 +       jmp ret_from_exception
2960 +       CFI_ENDPROC
2961 +KPROBE_END(page_fault)
2962 +
2963 +ENTRY(coprocessor_error)
2964 +       RING0_INT_FRAME
2965 +       pushl $0
2966 +       CFI_ADJUST_CFA_OFFSET 4
2967 +       pushl $do_coprocessor_error
2968 +       CFI_ADJUST_CFA_OFFSET 4
2969 +       jmp error_code
2970 +       CFI_ENDPROC
2971 +END(coprocessor_error)
2972 +
2973 +ENTRY(simd_coprocessor_error)
2974 +       RING0_INT_FRAME
2975 +       pushl $0
2976 +       CFI_ADJUST_CFA_OFFSET 4
2977 +       pushl $do_simd_coprocessor_error
2978 +       CFI_ADJUST_CFA_OFFSET 4
2979 +       jmp error_code
2980 +       CFI_ENDPROC
2981 +END(simd_coprocessor_error)
2982 +
2983 +ENTRY(device_not_available)
2984 +       RING0_INT_FRAME
2985 +       pushl $-1                       # mark this as an int
2986 +       CFI_ADJUST_CFA_OFFSET 4
2987 +       SAVE_ALL
2988 +       GET_CR0_INTO_EAX
2989 +       testl $0x4, %eax                # EM (math emulation bit)
2990 +       jne device_not_available_emulate
2991 +       preempt_stop(CLBR_ANY)
2992 +       call math_state_restore
2993 +       jmp ret_from_exception
2994 +device_not_available_emulate:
2995 +       pushl $0                        # temporary storage for ORIG_EIP
2996 +       CFI_ADJUST_CFA_OFFSET 4
2997 +       call math_emulate
2998 +       addl $4, %esp
2999 +       CFI_ADJUST_CFA_OFFSET -4
3000 +       jmp ret_from_exception
3001 +       CFI_ENDPROC
3002 +END(device_not_available)
3003 +
3004 +/*
3005 + * Debug traps and NMI can happen at the one SYSENTER instruction
3006 + * that sets up the real kernel stack. Check here, since we can't
3007 + * allow the wrong stack to be used.
3008 + *
3009 + * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
3010 + * already pushed 3 words if it hits on the sysenter instruction:
3011 + * eflags, cs and eip.
3012 + *
3013 + * We just load the right stack, and push the three (known) values
3014 + * by hand onto the new stack - while updating the return eip past
3015 + * the instruction that would have done it for sysenter.
3016 + */
3017 +#define FIX_STACK(offset, ok, label)           \
3018 +       cmpw $__KERNEL_CS,4(%esp);              \
3019 +       jne ok;                                 \
3020 +label:                                         \
3021 +       movl TSS_sysenter_esp0+offset(%esp),%esp;       \
3022 +       CFI_DEF_CFA esp, 0;                     \
3023 +       CFI_UNDEFINED eip;                      \
3024 +       pushfl;                                 \
3025 +       CFI_ADJUST_CFA_OFFSET 4;                \
3026 +       pushl $__KERNEL_CS;                     \
3027 +       CFI_ADJUST_CFA_OFFSET 4;                \
3028 +       pushl $sysenter_past_esp;               \
3029 +       CFI_ADJUST_CFA_OFFSET 4;                \
3030 +       CFI_REL_OFFSET eip, 0
3031 +
3032 +KPROBE_ENTRY(debug)
3033 +       RING0_INT_FRAME
3034 +       cmpl $sysenter_entry,(%esp)
3035 +       jne debug_stack_correct
3036 +       FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
3037 +debug_stack_correct:
3038 +       pushl $-1                       # mark this as an int
3039 +       CFI_ADJUST_CFA_OFFSET 4
3040 +       SAVE_ALL
3041 +       xorl %edx,%edx                  # error code 0
3042 +       movl %esp,%eax                  # pt_regs pointer
3043 +       call do_debug
3044 +       jmp ret_from_exception
3045 +       CFI_ENDPROC
3046 +KPROBE_END(debug)
3047 +
3048 +/*
3049 + * NMI is doubly nasty. It can happen _while_ we're handling
3050 + * a debug fault, and the debug fault hasn't yet been able to
3051 + * clear up the stack. So we first check whether we got  an
3052 + * NMI on the sysenter entry path, but after that we need to
3053 + * check whether we got an NMI on the debug path where the debug
3054 + * fault happened on the sysenter path.
3055 + */
3056 +KPROBE_ENTRY(nmi)
3057 +       RING0_INT_FRAME
3058 +       pushl %eax
3059 +       CFI_ADJUST_CFA_OFFSET 4
3060 +       movl %ss, %eax
3061 +       cmpw $__ESPFIX_SS, %ax
3062 +       popl %eax
3063 +       CFI_ADJUST_CFA_OFFSET -4
3064 +       je nmi_espfix_stack
3065 +       cmpl $sysenter_entry,(%esp)
3066 +       je nmi_stack_fixup
3067 +       pushl %eax
3068 +       CFI_ADJUST_CFA_OFFSET 4
3069 +       movl %esp,%eax
3070 +       /* Do not access memory above the end of our stack page,
3071 +        * it might not exist.
3072 +        */
3073 +       andl $(THREAD_SIZE-1),%eax
3074 +       cmpl $(THREAD_SIZE-20),%eax
3075 +       popl %eax
3076 +       CFI_ADJUST_CFA_OFFSET -4
3077 +       jae nmi_stack_correct
3078 +       cmpl $sysenter_entry,12(%esp)
3079 +       je nmi_debug_stack_check
3080 +nmi_stack_correct:
3081 +       /* We have a RING0_INT_FRAME here */
3082 +       pushl %eax
3083 +       CFI_ADJUST_CFA_OFFSET 4
3084 +       SAVE_ALL
3085 +       xorl %edx,%edx          # zero error code
3086 +       movl %esp,%eax          # pt_regs pointer
3087 +       call do_nmi
3088 +       jmp restore_nocheck_notrace
3089 +       CFI_ENDPROC
3090 +
3091 +nmi_stack_fixup:
3092 +       RING0_INT_FRAME
3093 +       FIX_STACK(12,nmi_stack_correct, 1)
3094 +       jmp nmi_stack_correct
3095 +
3096 +nmi_debug_stack_check:
3097 +       /* We have a RING0_INT_FRAME here */
3098 +       cmpw $__KERNEL_CS,16(%esp)
3099 +       jne nmi_stack_correct
3100 +       cmpl $debug,(%esp)
3101 +       jb nmi_stack_correct
3102 +       cmpl $debug_esp_fix_insn,(%esp)
3103 +       ja nmi_stack_correct
3104 +       FIX_STACK(24,nmi_stack_correct, 1)
3105 +       jmp nmi_stack_correct
3106 +
3107 +nmi_espfix_stack:
3108 +       /* We have a RING0_INT_FRAME here.
3109 +        *
3110 +        * create the pointer to lss back
3111 +        */
3112 +       pushl %ss
3113 +       CFI_ADJUST_CFA_OFFSET 4
3114 +       pushl %esp
3115 +       CFI_ADJUST_CFA_OFFSET 4
3116 +       addw $4, (%esp)
3117 +       /* copy the iret frame of 12 bytes */
3118 +       .rept 3
3119 +       pushl 16(%esp)
3120 +       CFI_ADJUST_CFA_OFFSET 4
3121 +       .endr
3122 +       pushl %eax
3123 +       CFI_ADJUST_CFA_OFFSET 4
3124 +       SAVE_ALL
3125 +       FIXUP_ESPFIX_STACK              # %eax == %esp
3126 +       xorl %edx,%edx                  # zero error code
3127 +       call do_nmi
3128 +       RESTORE_REGS
3129 +       lss 12+4(%esp), %esp            # back to espfix stack
3130 +       CFI_ADJUST_CFA_OFFSET -24
3131 +1:     INTERRUPT_RETURN
3132 +       CFI_ENDPROC
3133 +.section __ex_table,"a"
3134 +       .align 4
3135 +       .long 1b,iret_exc
3136 +.previous
3137 +KPROBE_END(nmi)
3138 +
3139 +#ifdef CONFIG_PARAVIRT
3140 +ENTRY(native_iret)
3141 +1:     iret
3142 +.section __ex_table,"a"
3143 +       .align 4
3144 +       .long 1b,iret_exc
3145 +.previous
3146 +END(native_iret)
3147 +
3148 +ENTRY(native_irq_enable_sysexit)
3149 +       sti
3150 +       sysexit
3151 +END(native_irq_enable_sysexit)
3152 +#endif
3153 +
3154 +KPROBE_ENTRY(int3)
3155 +       RING0_INT_FRAME
3156 +       pushl $-1                       # mark this as an int
3157 +       CFI_ADJUST_CFA_OFFSET 4
3158 +       SAVE_ALL
3159 +       xorl %edx,%edx          # zero error code
3160 +       movl %esp,%eax          # pt_regs pointer
3161 +       call do_int3
3162 +       jmp ret_from_exception
3163 +       CFI_ENDPROC
3164 +KPROBE_END(int3)
3165 +
3166 +ENTRY(overflow)
3167 +       RING0_INT_FRAME
3168 +       pushl $0
3169 +       CFI_ADJUST_CFA_OFFSET 4
3170 +       pushl $do_overflow
3171 +       CFI_ADJUST_CFA_OFFSET 4
3172 +       jmp error_code
3173 +       CFI_ENDPROC
3174 +END(overflow)
3175 +
3176 +ENTRY(bounds)
3177 +       RING0_INT_FRAME
3178 +       pushl $0
3179 +       CFI_ADJUST_CFA_OFFSET 4
3180 +       pushl $do_bounds
3181 +       CFI_ADJUST_CFA_OFFSET 4
3182 +       jmp error_code
3183 +       CFI_ENDPROC
3184 +END(bounds)
3185 +
3186 +ENTRY(invalid_op)
3187 +       RING0_INT_FRAME
3188 +       pushl $0
3189 +       CFI_ADJUST_CFA_OFFSET 4
3190 +       pushl $do_invalid_op
3191 +       CFI_ADJUST_CFA_OFFSET 4
3192 +       jmp error_code
3193 +       CFI_ENDPROC
3194 +END(invalid_op)
3195 +
3196 +ENTRY(coprocessor_segment_overrun)
3197 +       RING0_INT_FRAME
3198 +       pushl $0
3199 +       CFI_ADJUST_CFA_OFFSET 4
3200 +       pushl $do_coprocessor_segment_overrun
3201 +       CFI_ADJUST_CFA_OFFSET 4
3202 +       jmp error_code
3203 +       CFI_ENDPROC
3204 +END(coprocessor_segment_overrun)
3205 +
3206 +ENTRY(invalid_TSS)
3207 +       RING0_EC_FRAME
3208 +       pushl $do_invalid_TSS
3209 +       CFI_ADJUST_CFA_OFFSET 4
3210 +       jmp error_code
3211 +       CFI_ENDPROC
3212 +END(invalid_TSS)
3213 +
3214 +ENTRY(segment_not_present)
3215 +       RING0_EC_FRAME
3216 +       pushl $do_segment_not_present
3217 +       CFI_ADJUST_CFA_OFFSET 4
3218 +       jmp error_code
3219 +       CFI_ENDPROC
3220 +END(segment_not_present)
3221 +
3222 +ENTRY(stack_segment)
3223 +       RING0_EC_FRAME
3224 +       pushl $do_stack_segment
3225 +       CFI_ADJUST_CFA_OFFSET 4
3226 +       jmp error_code
3227 +       CFI_ENDPROC
3228 +END(stack_segment)
3229 +
3230 +KPROBE_ENTRY(general_protection)
3231 +       RING0_EC_FRAME
3232 +       pushl $do_general_protection
3233 +       CFI_ADJUST_CFA_OFFSET 4
3234 +       jmp error_code
3235 +       CFI_ENDPROC
3236 +KPROBE_END(general_protection)
3237 +
3238 +ENTRY(alignment_check)
3239 +       RING0_EC_FRAME
3240 +       pushl $do_alignment_check
3241 +       CFI_ADJUST_CFA_OFFSET 4
3242 +       jmp error_code
3243 +       CFI_ENDPROC
3244 +END(alignment_check)
3245 +
3246 +ENTRY(divide_error)
3247 +       RING0_INT_FRAME
3248 +       pushl $0                        # no error code
3249 +       CFI_ADJUST_CFA_OFFSET 4
3250 +       pushl $do_divide_error
3251 +       CFI_ADJUST_CFA_OFFSET 4
3252 +       jmp error_code
3253 +       CFI_ENDPROC
3254 +END(divide_error)
3255 +
3256 +#ifdef CONFIG_X86_MCE
3257 +ENTRY(machine_check)
3258 +       RING0_INT_FRAME
3259 +       pushl $0
3260 +       CFI_ADJUST_CFA_OFFSET 4
3261 +       pushl machine_check_vector
3262 +       CFI_ADJUST_CFA_OFFSET 4
3263 +       jmp error_code
3264 +       CFI_ENDPROC
3265 +END(machine_check)
3266 +#endif
3267 +
3268 +ENTRY(spurious_interrupt_bug)
3269 +       RING0_INT_FRAME
3270 +       pushl $0
3271 +       CFI_ADJUST_CFA_OFFSET 4
3272 +       pushl $do_spurious_interrupt_bug
3273 +       CFI_ADJUST_CFA_OFFSET 4
3274 +       jmp error_code
3275 +       CFI_ENDPROC
3276 +END(spurious_interrupt_bug)
3277 +
3278 +ENTRY(kernel_thread_helper)
3279 +       pushl $0                # fake return address for unwinder
3280 +       CFI_STARTPROC
3281 +       movl %edx,%eax
3282 +       push %edx
3283 +       CFI_ADJUST_CFA_OFFSET 4
3284 +       call *%ebx
3285 +       push %eax
3286 +       CFI_ADJUST_CFA_OFFSET 4
3287 +       call do_exit
3288 +       CFI_ENDPROC
3289 +ENDPROC(kernel_thread_helper)
3290 +
3291 +.section .rodata,"a"
3292 +#include "syscall_table.S"
3293 +
3294 +syscall_table_size=(.-sys_call_table)
3295 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/drivers/oprofile/cpu_buffer.c linux-2.6.22-590/drivers/oprofile/cpu_buffer.c
3296 --- linux-2.6.22-580/drivers/oprofile/cpu_buffer.c      2007-07-08 19:32:17.000000000 -0400
3297 +++ linux-2.6.22-590/drivers/oprofile/cpu_buffer.c      2009-02-18 09:57:23.000000000 -0500
3298 @@ -21,6 +21,7 @@
3299  #include <linux/oprofile.h>
3300  #include <linux/vmalloc.h>
3301  #include <linux/errno.h>
3302 +#include <linux/arrays.h>
3303   
3304  #include "event_buffer.h"
3305  #include "cpu_buffer.h"
3306 @@ -143,6 +144,17 @@
3307                 b->head_pos = 0;
3308  }
3309  
3310 +#ifdef CONFIG_CHOPSTIX
3311 +
3312 +struct event_spec {
3313 +       unsigned int pc;
3314 +       unsigned long dcookie;
3315 +       unsigned count;
3316 +};
3317 +
3318 +extern void (*rec_event)(void *,unsigned int);
3319 +#endif
3320 +
3321  static inline void
3322  add_sample(struct oprofile_cpu_buffer * cpu_buf,
3323             unsigned long pc, unsigned long event)
3324 @@ -151,6 +163,7 @@
3325         entry->eip = pc;
3326         entry->event = event;
3327         increment_head(cpu_buf);
3328 +
3329  }
3330  
3331  static inline void
3332 @@ -241,8 +254,28 @@
3333  {
3334         int is_kernel = !user_mode(regs);
3335         unsigned long pc = profile_pc(regs);
3336 +       int res=0;
3337  
3338 +#ifdef CONFIG_CHOPSTIX
3339 +       if (rec_event) {
3340 +               struct event esig;
3341 +               struct event_spec espec;
3342 +               esig.task = current;
3343 +               espec.pc=pc;
3344 +               espec.count=1;
3345 +               esig.event_data=&espec;
3346 +               esig.event_type=event; /* index in the event array currently set up */
3347 +                                       /* make sure the counters are loaded in the order we want them to show up*/ 
3348 +               (*rec_event)(&esig, 1);
3349 +       }
3350 +       else {
3351         oprofile_add_ext_sample(pc, regs, event, is_kernel);
3352 +       }
3353 +#else
3354 +       oprofile_add_ext_sample(pc, regs, event, is_kernel);
3355 +#endif
3356 +
3357 +
3358  }
3359  
3360  void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
3361 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/evsend.S linux-2.6.22-590/evsend.S
3362 --- linux-2.6.22-580/evsend.S   1969-12-31 19:00:00.000000000 -0500
3363 +++ linux-2.6.22-590/evsend.S   2009-02-18 09:57:23.000000000 -0500
3364 @@ -0,0 +1,51 @@
3365 +       .file   "evsend.c"
3366 +.globl num
3367 +       .data
3368 +       .align 4
3369 +       .type   num, @object
3370 +       .size   num, 4
3371 +num:
3372 +       .long   5
3373 +       .text
3374 +.globl main
3375 +       .type   main, @function
3376 +main:
3377 +       leal    4(%esp), %ecx
3378 +       andl    $-16, %esp
3379 +       pushl   -4(%ecx)
3380 +       pushl   %ebp
3381 +       movl    %esp, %ebp
3382 +       pushl   %ecx
3383 +       subl    $68, %esp
3384 +       movl    rec_event, %eax
3385 +       testl   %eax, %eax
3386 +       je      .L5
3387 +       movl    num, %eax
3388 +       movzwl  %ax, %eax
3389 +       movw    %ax, -36(%ebp)
3390 +       movl    current, %eax
3391 +       movl    (%eax), %eax
3392 +       andl    $4096, %eax
3393 +       movl    %eax, -8(%ebp)
3394 +       leal    -48(%ebp), %eax
3395 +       movl    %eax, -24(%ebp)
3396 +       movl    current, %eax
3397 +       movl    %eax, -12(%ebp)
3398 +       movl    -8(%ebp), %eax
3399 +       movl    %eax, -48(%ebp)
3400 +       movl    $7, -16(%ebp)
3401 +       movl    rec_event, %edx
3402 +       movl    $1, 4(%esp)
3403 +       leal    -32(%ebp), %eax
3404 +       movl    %eax, (%esp)
3405 +       call    *%edx
3406 +.L5:
3407 +       addl    $68, %esp
3408 +       popl    %ecx
3409 +       popl    %ebp
3410 +       leal    -4(%ecx), %esp
3411 +       ret
3412 +       .size   main, .-main
3413 +       .comm   current,4,4
3414 +       .ident  "GCC: (GNU) 4.1.1 (Gentoo 4.1.1-r3)"
3415 +       .section        .note.GNU-stack,"",@progbits
3416 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/evsend.c linux-2.6.22-590/evsend.c
3417 --- linux-2.6.22-580/evsend.c   1969-12-31 19:00:00.000000000 -0500
3418 +++ linux-2.6.22-590/evsend.c   2009-02-18 09:57:23.000000000 -0500
3419 @@ -0,0 +1,43 @@
3420 +#include <linux/list.h>
3421 +
3422 +extern void (*rec_event)(void *,unsigned int);
3423 +struct event_spec {
3424 +       unsigned long pc;
3425 +       unsigned long dcookie;
3426 +       unsigned count;
3427 +       unsigned short number;
3428 +};
3429 +
3430 +struct event {
3431 +       struct list_head link;
3432 +       void *event_data;
3433 +       unsigned int count;
3434 +       unsigned int event_type;
3435 +       struct task_struct *task;
3436 +};
3437 +
3438 +int num=5;
3439 +
3440 +struct task_struct {
3441 +    struct thread_type {
3442 +        unsigned esp;
3443 +    } thread;
3444 +} *current;
3445 +
3446 +int main() {
3447 +    if (rec_event) {
3448 +        struct event event;
3449 +        struct event_spec espec;
3450 +        unsigned long eip;
3451 +
3452 +        espec.number = num;
3453 +        eip = current->thread.esp & 4096;
3454 +        event.event_data=&espec;
3455 +        event.task=current;
3456 +        espec.pc=eip;
3457 +        event.event_type=7; 
3458 +        /* index in the event array currently set up */
3459 +        /* make sure the counters are loaded in the order we want them to show up*/ 
3460 +        (*rec_event)(&event, 1);
3461 +    }
3462 +}
3463 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/fs/bio.c linux-2.6.22-590/fs/bio.c
3464 --- linux-2.6.22-580/fs/bio.c   2007-07-08 19:32:17.000000000 -0400
3465 +++ linux-2.6.22-590/fs/bio.c   2009-02-18 09:57:23.000000000 -0500
3466 @@ -27,6 +27,7 @@
3467  #include <linux/workqueue.h>
3468  #include <linux/blktrace_api.h>
3469  #include <scsi/sg.h>           /* for struct sg_iovec */
3470 +#include <linux/arrays.h>
3471  
3472  #define BIO_POOL_SIZE 2
3473  
3474 @@ -47,6 +48,7 @@
3475         struct kmem_cache *slab;
3476  };
3477  
3478 +
3479  /*
3480   * if you change this list, also change bvec_alloc or things will
3481   * break badly! cannot be bigger than what you can fit into an
3482 @@ -999,6 +1001,14 @@
3483         }
3484  }
3485  
3486 +struct event_spec {
3487 +       unsigned long pc;
3488 +       unsigned long dcookie;
3489 +       unsigned count;
3490 +       unsigned char reason;
3491 +};
3492 +
3493 +extern void (*rec_event)(void *,unsigned int);
3494  /**
3495   * bio_endio - end I/O on a bio
3496   * @bio:       bio
3497 @@ -1028,6 +1038,24 @@
3498         bio->bi_size -= bytes_done;
3499         bio->bi_sector += (bytes_done >> 9);
3500  
3501 +#ifdef CONFIG_CHOPSTIX
3502 +               if (rec_event) {
3503 +                       struct event event;
3504 +                       struct event_spec espec;
3505 +                       unsigned long eip;
3506 +                       
3507 +                       espec.reason = 1;/*response */
3508 +
3509 +                       eip = bio->bi_end_io;
3510 +                       event.event_data=&espec;
3511 +                       espec.pc=eip;
3512 +                       event.event_type=3; 
3513 +                       /* index in the event array currently set up */
3514 +                       /* make sure the counters are loaded in the order we want them to show up*/ 
3515 +                       (*rec_event)(&event, bytes_done);
3516 +               }
3517 +#endif
3518 +
3519         if (bio->bi_end_io)
3520                 bio->bi_end_io(bio, bytes_done, error);
3521  }
3522 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/fs/exec.c linux-2.6.22-590/fs/exec.c
3523 --- linux-2.6.22-580/fs/exec.c  2009-02-18 09:56:02.000000000 -0500
3524 +++ linux-2.6.22-590/fs/exec.c  2009-02-18 09:57:23.000000000 -0500
3525 @@ -27,6 +27,7 @@
3526  #include <linux/mman.h>
3527  #include <linux/a.out.h>
3528  #include <linux/stat.h>
3529 +#include <linux/dcookies.h>
3530  #include <linux/fcntl.h>
3531  #include <linux/smp_lock.h>
3532  #include <linux/init.h>
3533 @@ -38,7 +39,7 @@
3534  #include <linux/binfmts.h>
3535  #include <linux/swap.h>
3536  #include <linux/utsname.h>
3537 -#include <linux/pid_namespace.h>
3538 +/*#include <linux/pid_namespace.h>*/
3539  #include <linux/module.h>
3540  #include <linux/namei.h>
3541  #include <linux/proc_fs.h>
3542 @@ -488,6 +489,12 @@
3543  
3544         if (!err) {
3545                 struct inode *inode = nd.dentry->d_inode;
3546 +#ifdef CONFIG_CHOPSTIX
3547 +               unsigned long cookie;
3548 +               if (!nd.dentry->d_cookie)
3549 +                       get_dcookie(nd.dentry, nd.mnt, &cookie);
3550 +#endif
3551 +
3552                 file = ERR_PTR(-EACCES);
3553                 if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
3554                     S_ISREG(inode->i_mode)) {
3555 @@ -627,8 +634,10 @@
3556          * Reparenting needs write_lock on tasklist_lock,
3557          * so it is safe to do it under read_lock.
3558          */
3559 +       /*
3560         if (unlikely(tsk->group_leader == child_reaper(tsk)))
3561                 tsk->nsproxy->pid_ns->child_reaper = tsk;
3562 +               */
3563  
3564         zap_other_threads(tsk);
3565         read_unlock(&tasklist_lock);
3566 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/fs/exec.c.orig linux-2.6.22-590/fs/exec.c.orig
3567 --- linux-2.6.22-580/fs/exec.c.orig     1969-12-31 19:00:00.000000000 -0500
3568 +++ linux-2.6.22-590/fs/exec.c.orig     2009-02-18 09:56:02.000000000 -0500
3569 @@ -0,0 +1,1590 @@
3570 +/*
3571 + *  linux/fs/exec.c
3572 + *
3573 + *  Copyright (C) 1991, 1992  Linus Torvalds
3574 + */
3575 +
3576 +/*
3577 + * #!-checking implemented by tytso.
3578 + */
3579 +/*
3580 + * Demand-loading implemented 01.12.91 - no need to read anything but
3581 + * the header into memory. The inode of the executable is put into
3582 + * "current->executable", and page faults do the actual loading. Clean.
3583 + *
3584 + * Once more I can proudly say that linux stood up to being changed: it
3585 + * was less than 2 hours work to get demand-loading completely implemented.
3586 + *
3587 + * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
3588 + * current->executable is only used by the procfs.  This allows a dispatch
3589 + * table to check for several different types  of binary formats.  We keep
3590 + * trying until we recognize the file or we run out of supported binary
3591 + * formats. 
3592 + */
3593 +
3594 +#include <linux/slab.h>
3595 +#include <linux/file.h>
3596 +#include <linux/mman.h>
3597 +#include <linux/a.out.h>
3598 +#include <linux/stat.h>
3599 +#include <linux/fcntl.h>
3600 +#include <linux/smp_lock.h>
3601 +#include <linux/init.h>
3602 +#include <linux/pagemap.h>
3603 +#include <linux/highmem.h>
3604 +#include <linux/spinlock.h>
3605 +#include <linux/key.h>
3606 +#include <linux/personality.h>
3607 +#include <linux/binfmts.h>
3608 +#include <linux/swap.h>
3609 +#include <linux/utsname.h>
3610 +#include <linux/pid_namespace.h>
3611 +#include <linux/module.h>
3612 +#include <linux/namei.h>
3613 +#include <linux/proc_fs.h>
3614 +#include <linux/ptrace.h>
3615 +#include <linux/mount.h>
3616 +#include <linux/security.h>
3617 +#include <linux/syscalls.h>
3618 +#include <linux/rmap.h>
3619 +#include <linux/tsacct_kern.h>
3620 +#include <linux/cn_proc.h>
3621 +#include <linux/audit.h>
3622 +#include <linux/signalfd.h>
3623 +#include <linux/vs_memory.h>
3624 +
3625 +#include <asm/uaccess.h>
3626 +#include <asm/mmu_context.h>
3627 +
3628 +#ifdef CONFIG_KMOD
3629 +#include <linux/kmod.h>
3630 +#endif
3631 +
3632 +int core_uses_pid;
3633 +char core_pattern[CORENAME_MAX_SIZE] = "core";
3634 +int suid_dumpable = 0;
3635 +
3636 +EXPORT_SYMBOL(suid_dumpable);
3637 +/* The maximal length of core_pattern is also specified in sysctl.c */
3638 +
3639 +static struct linux_binfmt *formats;
3640 +static DEFINE_RWLOCK(binfmt_lock);
3641 +
3642 +int register_binfmt(struct linux_binfmt * fmt)
3643 +{
3644 +       struct linux_binfmt ** tmp = &formats;
3645 +
3646 +       if (!fmt)
3647 +               return -EINVAL;
3648 +       if (fmt->next)
3649 +               return -EBUSY;
3650 +       write_lock(&binfmt_lock);
3651 +       while (*tmp) {
3652 +               if (fmt == *tmp) {
3653 +                       write_unlock(&binfmt_lock);
3654 +                       return -EBUSY;
3655 +               }
3656 +               tmp = &(*tmp)->next;
3657 +       }
3658 +       fmt->next = formats;
3659 +       formats = fmt;
3660 +       write_unlock(&binfmt_lock);
3661 +       return 0;       
3662 +}
3663 +
3664 +EXPORT_SYMBOL(register_binfmt);
3665 +
3666 +int unregister_binfmt(struct linux_binfmt * fmt)
3667 +{
3668 +       struct linux_binfmt ** tmp = &formats;
3669 +
3670 +       write_lock(&binfmt_lock);
3671 +       while (*tmp) {
3672 +               if (fmt == *tmp) {
3673 +                       *tmp = fmt->next;
3674 +                       fmt->next = NULL;
3675 +                       write_unlock(&binfmt_lock);
3676 +                       return 0;
3677 +               }
3678 +               tmp = &(*tmp)->next;
3679 +       }
3680 +       write_unlock(&binfmt_lock);
3681 +       return -EINVAL;
3682 +}
3683 +
3684 +EXPORT_SYMBOL(unregister_binfmt);
3685 +
3686 +static inline void put_binfmt(struct linux_binfmt * fmt)
3687 +{
3688 +       module_put(fmt->module);
3689 +}
3690 +
3691 +/*
3692 + * Note that a shared library must be both readable and executable due to
3693 + * security reasons.
3694 + *
3695 + * Also note that we take the address to load from from the file itself.
3696 + */
3697 +asmlinkage long sys_uselib(const char __user * library)
3698 +{
3699 +       struct file * file;
3700 +       struct nameidata nd;
3701 +       int error;
3702 +
3703 +       error = __user_path_lookup_open(library, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
3704 +       if (error)
3705 +               goto out;
3706 +
3707 +       error = -EACCES;
3708 +       if (nd.mnt->mnt_flags & MNT_NOEXEC)
3709 +               goto exit;
3710 +       error = -EINVAL;
3711 +       if (!S_ISREG(nd.dentry->d_inode->i_mode))
3712 +               goto exit;
3713 +
3714 +       error = vfs_permission(&nd, MAY_READ | MAY_EXEC);
3715 +       if (error)
3716 +               goto exit;
3717 +
3718 +       file = nameidata_to_filp(&nd, O_RDONLY);
3719 +       error = PTR_ERR(file);
3720 +       if (IS_ERR(file))
3721 +               goto out;
3722 +
3723 +       error = -ENOEXEC;
3724 +       if(file->f_op) {
3725 +               struct linux_binfmt * fmt;
3726 +
3727 +               read_lock(&binfmt_lock);
3728 +               for (fmt = formats ; fmt ; fmt = fmt->next) {
3729 +                       if (!fmt->load_shlib)
3730 +                               continue;
3731 +                       if (!try_module_get(fmt->module))
3732 +                               continue;
3733 +                       read_unlock(&binfmt_lock);
3734 +                       error = fmt->load_shlib(file);
3735 +                       read_lock(&binfmt_lock);
3736 +                       put_binfmt(fmt);
3737 +                       if (error != -ENOEXEC)
3738 +                               break;
3739 +               }
3740 +               read_unlock(&binfmt_lock);
3741 +       }
3742 +       fput(file);
3743 +out:
3744 +       return error;
3745 +exit:
3746 +       release_open_intent(&nd);
3747 +       path_release(&nd);
3748 +       goto out;
3749 +}
3750 +
3751 +/*
3752 + * count() counts the number of strings in array ARGV.
3753 + */
3754 +static int count(char __user * __user * argv, int max)
3755 +{
3756 +       int i = 0;
3757 +
3758 +       if (argv != NULL) {
3759 +               for (;;) {
3760 +                       char __user * p;
3761 +
3762 +                       if (get_user(p, argv))
3763 +                               return -EFAULT;
3764 +                       if (!p)
3765 +                               break;
3766 +                       argv++;
3767 +                       if(++i > max)
3768 +                               return -E2BIG;
3769 +                       cond_resched();
3770 +               }
3771 +       }
3772 +       return i;
3773 +}
3774 +
3775 +/*
3776 + * 'copy_strings()' copies argument/environment strings from user
3777 + * memory to free pages in kernel mem. These are in a format ready
3778 + * to be put directly into the top of new user memory.
3779 + */
3780 +static int copy_strings(int argc, char __user * __user * argv,
3781 +                       struct linux_binprm *bprm)
3782 +{
3783 +       struct page *kmapped_page = NULL;
3784 +       char *kaddr = NULL;
3785 +       int ret;
3786 +
3787 +       while (argc-- > 0) {
3788 +               char __user *str;
3789 +               int len;
3790 +               unsigned long pos;
3791 +
3792 +               if (get_user(str, argv+argc) ||
3793 +                               !(len = strnlen_user(str, bprm->p))) {
3794 +                       ret = -EFAULT;
3795 +                       goto out;
3796 +               }
3797 +
3798 +               if (bprm->p < len)  {
3799 +                       ret = -E2BIG;
3800 +                       goto out;
3801 +               }
3802 +
3803 +               bprm->p -= len;
3804 +               /* XXX: add architecture specific overflow check here. */
3805 +               pos = bprm->p;
3806 +
3807 +               while (len > 0) {
3808 +                       int i, new, err;
3809 +                       int offset, bytes_to_copy;
3810 +                       struct page *page;
3811 +
3812 +                       offset = pos % PAGE_SIZE;
3813 +                       i = pos/PAGE_SIZE;
3814 +                       page = bprm->page[i];
3815 +                       new = 0;
3816 +                       if (!page) {
3817 +                               page = alloc_page(GFP_HIGHUSER);
3818 +                               bprm->page[i] = page;
3819 +                               if (!page) {
3820 +                                       ret = -ENOMEM;
3821 +                                       goto out;
3822 +                               }
3823 +                               new = 1;
3824 +                       }
3825 +
3826 +                       if (page != kmapped_page) {
3827 +                               if (kmapped_page)
3828 +                                       kunmap(kmapped_page);
3829 +                               kmapped_page = page;
3830 +                               kaddr = kmap(kmapped_page);
3831 +                       }
3832 +                       if (new && offset)
3833 +                               memset(kaddr, 0, offset);
3834 +                       bytes_to_copy = PAGE_SIZE - offset;
3835 +                       if (bytes_to_copy > len) {
3836 +                               bytes_to_copy = len;
3837 +                               if (new)
3838 +                                       memset(kaddr+offset+len, 0,
3839 +                                               PAGE_SIZE-offset-len);
3840 +                       }
3841 +                       err = copy_from_user(kaddr+offset, str, bytes_to_copy);
3842 +                       if (err) {
3843 +                               ret = -EFAULT;
3844 +                               goto out;
3845 +                       }
3846 +
3847 +                       pos += bytes_to_copy;
3848 +                       str += bytes_to_copy;
3849 +                       len -= bytes_to_copy;
3850 +               }
3851 +       }
3852 +       ret = 0;
3853 +out:
3854 +       if (kmapped_page)
3855 +               kunmap(kmapped_page);
3856 +       return ret;
3857 +}
3858 +
3859 +/*
3860 + * Like copy_strings, but get argv and its values from kernel memory.
3861 + */
3862 +int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
3863 +{
3864 +       int r;
3865 +       mm_segment_t oldfs = get_fs();
3866 +       set_fs(KERNEL_DS);
3867 +       r = copy_strings(argc, (char __user * __user *)argv, bprm);
3868 +       set_fs(oldfs);
3869 +       return r;
3870 +}
3871 +
3872 +EXPORT_SYMBOL(copy_strings_kernel);
3873 +
3874 +#ifdef CONFIG_MMU
3875 +/*
3876 + * This routine is used to map in a page into an address space: needed by
3877 + * execve() for the initial stack and environment pages.
3878 + *
3879 + * vma->vm_mm->mmap_sem is held for writing.
3880 + */
3881 +void install_arg_page(struct vm_area_struct *vma,
3882 +                       struct page *page, unsigned long address)
3883 +{
3884 +       struct mm_struct *mm = vma->vm_mm;
3885 +       pte_t * pte;
3886 +       spinlock_t *ptl;
3887 +
3888 +       if (unlikely(anon_vma_prepare(vma)))
3889 +               goto out;
3890 +
3891 +       flush_dcache_page(page);
3892 +       pte = get_locked_pte(mm, address, &ptl);
3893 +       if (!pte)
3894 +               goto out;
3895 +       if (!pte_none(*pte)) {
3896 +               pte_unmap_unlock(pte, ptl);
3897 +               goto out;
3898 +       }
3899 +       inc_mm_counter(mm, anon_rss);
3900 +       lru_cache_add_active(page);
3901 +       set_pte_at(mm, address, pte, pte_mkdirty(pte_mkwrite(mk_pte(
3902 +                                       page, vma->vm_page_prot))));
3903 +       page_add_new_anon_rmap(page, vma, address);
3904 +       pte_unmap_unlock(pte, ptl);
3905 +
3906 +       /* no need for flush_tlb */
3907 +       return;
3908 +out:
3909 +       __free_page(page);
3910 +       force_sig(SIGKILL, current);
3911 +}
3912 +
3913 +#define EXTRA_STACK_VM_PAGES   20      /* random */
3914 +
3915 +int setup_arg_pages(struct linux_binprm *bprm,
3916 +                   unsigned long stack_top,
3917 +                   int executable_stack)
3918 +{
3919 +       unsigned long stack_base;
3920 +       struct vm_area_struct *mpnt;
3921 +       struct mm_struct *mm = current->mm;
3922 +       int i, ret;
3923 +       long arg_size;
3924 +
3925 +#ifdef CONFIG_STACK_GROWSUP
3926 +       /* Move the argument and environment strings to the bottom of the
3927 +        * stack space.
3928 +        */
3929 +       int offset, j;
3930 +       char *to, *from;
3931 +
3932 +       /* Start by shifting all the pages down */
3933 +       i = 0;
3934 +       for (j = 0; j < MAX_ARG_PAGES; j++) {
3935 +               struct page *page = bprm->page[j];
3936 +               if (!page)
3937 +                       continue;
3938 +               bprm->page[i++] = page;
3939 +       }
3940 +
3941 +       /* Now move them within their pages */
3942 +       offset = bprm->p % PAGE_SIZE;
3943 +       to = kmap(bprm->page[0]);
3944 +       for (j = 1; j < i; j++) {
3945 +               memmove(to, to + offset, PAGE_SIZE - offset);
3946 +               from = kmap(bprm->page[j]);
3947 +               memcpy(to + PAGE_SIZE - offset, from, offset);
3948 +               kunmap(bprm->page[j - 1]);
3949 +               to = from;
3950 +       }
3951 +       memmove(to, to + offset, PAGE_SIZE - offset);
3952 +       kunmap(bprm->page[j - 1]);
3953 +
3954 +       /* Limit stack size to 1GB */
3955 +       stack_base = current->signal->rlim[RLIMIT_STACK].rlim_max;
3956 +       if (stack_base > (1 << 30))
3957 +               stack_base = 1 << 30;
3958 +       stack_base = PAGE_ALIGN(stack_top - stack_base);
3959 +
3960 +       /* Adjust bprm->p to point to the end of the strings. */
3961 +       bprm->p = stack_base + PAGE_SIZE * i - offset;
3962 +
3963 +       mm->arg_start = stack_base;
3964 +       arg_size = i << PAGE_SHIFT;
3965 +
3966 +       /* zero pages that were copied above */
3967 +       while (i < MAX_ARG_PAGES)
3968 +               bprm->page[i++] = NULL;
3969 +#else
3970 +       stack_base = arch_align_stack(stack_top - MAX_ARG_PAGES*PAGE_SIZE);
3971 +       stack_base = PAGE_ALIGN(stack_base);
3972 +       bprm->p += stack_base;
3973 +       mm->arg_start = bprm->p;
3974 +       arg_size = stack_top - (PAGE_MASK & (unsigned long) mm->arg_start);
3975 +#endif
3976 +
3977 +       arg_size += EXTRA_STACK_VM_PAGES * PAGE_SIZE;
3978 +
3979 +       if (bprm->loader)
3980 +               bprm->loader += stack_base;
3981 +       bprm->exec += stack_base;
3982 +
3983 +       mpnt = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
3984 +       if (!mpnt)
3985 +               return -ENOMEM;
3986 +
3987 +       down_write(&mm->mmap_sem);
3988 +       {
3989 +               mpnt->vm_mm = mm;
3990 +#ifdef CONFIG_STACK_GROWSUP
3991 +               mpnt->vm_start = stack_base;
3992 +               mpnt->vm_end = stack_base + arg_size;
3993 +#else
3994 +               mpnt->vm_end = stack_top;
3995 +               mpnt->vm_start = mpnt->vm_end - arg_size;
3996 +#endif
3997 +               /* Adjust stack execute permissions; explicitly enable
3998 +                * for EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X
3999 +                * and leave alone (arch default) otherwise. */
4000 +               if (unlikely(executable_stack == EXSTACK_ENABLE_X))
4001 +                       mpnt->vm_flags = VM_STACK_FLAGS |  VM_EXEC;
4002 +               else if (executable_stack == EXSTACK_DISABLE_X)
4003 +                       mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
4004 +               else
4005 +                       mpnt->vm_flags = VM_STACK_FLAGS;
4006 +               mpnt->vm_flags |= mm->def_flags;
4007 +               mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7];
4008 +               if ((ret = insert_vm_struct(mm, mpnt))) {
4009 +                       up_write(&mm->mmap_sem);
4010 +                       kmem_cache_free(vm_area_cachep, mpnt);
4011 +                       return ret;
4012 +               }
4013 +               vx_vmpages_sub(mm, mm->total_vm - vma_pages(mpnt));
4014 +               mm->stack_vm = mm->total_vm;
4015 +       }
4016 +
4017 +       for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
4018 +               struct page *page = bprm->page[i];
4019 +               if (page) {
4020 +                       bprm->page[i] = NULL;
4021 +                       install_arg_page(mpnt, page, stack_base);
4022 +               }
4023 +               stack_base += PAGE_SIZE;
4024 +       }
4025 +       up_write(&mm->mmap_sem);
4026 +       
4027 +       return 0;
4028 +}
4029 +
4030 +EXPORT_SYMBOL(setup_arg_pages);
4031 +
4032 +#define free_arg_pages(bprm) do { } while (0)
4033 +
4034 +#else
4035 +
4036 +static inline void free_arg_pages(struct linux_binprm *bprm)
4037 +{
4038 +       int i;
4039 +
4040 +       for (i = 0; i < MAX_ARG_PAGES; i++) {
4041 +               if (bprm->page[i])
4042 +                       __free_page(bprm->page[i]);
4043 +               bprm->page[i] = NULL;
4044 +       }
4045 +}
4046 +
4047 +#endif /* CONFIG_MMU */
4048 +
4049 +struct file *open_exec(const char *name)
4050 +{
4051 +       struct nameidata nd;
4052 +       int err;
4053 +       struct file *file;
4054 +
4055 +       err = path_lookup_open(AT_FDCWD, name, LOOKUP_FOLLOW, &nd, FMODE_READ|FMODE_EXEC);
4056 +       file = ERR_PTR(err);
4057 +
4058 +       if (!err) {
4059 +               struct inode *inode = nd.dentry->d_inode;
4060 +               file = ERR_PTR(-EACCES);
4061 +               if (!(nd.mnt->mnt_flags & MNT_NOEXEC) &&
4062 +                   S_ISREG(inode->i_mode)) {
4063 +                       int err = vfs_permission(&nd, MAY_EXEC);
4064 +                       file = ERR_PTR(err);
4065 +                       if (!err) {
4066 +                               file = nameidata_to_filp(&nd, O_RDONLY);
4067 +                               if (!IS_ERR(file)) {
4068 +                                       err = deny_write_access(file);
4069 +                                       if (err) {
4070 +                                               fput(file);
4071 +                                               file = ERR_PTR(err);
4072 +                                       }
4073 +                               }
4074 +out:
4075 +                               return file;
4076 +                       }
4077 +               }
4078 +               release_open_intent(&nd);
4079 +               path_release(&nd);
4080 +       }
4081 +       goto out;
4082 +}
4083 +
4084 +EXPORT_SYMBOL(open_exec);
4085 +
4086 +int kernel_read(struct file *file, unsigned long offset,
4087 +       char *addr, unsigned long count)
4088 +{
4089 +       mm_segment_t old_fs;
4090 +       loff_t pos = offset;
4091 +       int result;
4092 +
4093 +       old_fs = get_fs();
4094 +       set_fs(get_ds());
4095 +       /* The cast to a user pointer is valid due to the set_fs() */
4096 +       result = vfs_read(file, (void __user *)addr, count, &pos);
4097 +       set_fs(old_fs);
4098 +       return result;
4099 +}
4100 +
4101 +EXPORT_SYMBOL(kernel_read);
4102 +
4103 +static int exec_mmap(struct mm_struct *mm)
4104 +{
4105 +       struct task_struct *tsk;
4106 +       struct mm_struct * old_mm, *active_mm;
4107 +
4108 +       /* Notify parent that we're no longer interested in the old VM */
4109 +       tsk = current;
4110 +       old_mm = current->mm;
4111 +       mm_release(tsk, old_mm);
4112 +
4113 +       if (old_mm) {
4114 +               /*
4115 +                * Make sure that if there is a core dump in progress
4116 +                * for the old mm, we get out and die instead of going
4117 +                * through with the exec.  We must hold mmap_sem around
4118 +                * checking core_waiters and changing tsk->mm.  The
4119 +                * core-inducing thread will increment core_waiters for
4120 +                * each thread whose ->mm == old_mm.
4121 +                */
4122 +               down_read(&old_mm->mmap_sem);
4123 +               if (unlikely(old_mm->core_waiters)) {
4124 +                       up_read(&old_mm->mmap_sem);
4125 +                       return -EINTR;
4126 +               }
4127 +       }
4128 +       task_lock(tsk);
4129 +       active_mm = tsk->active_mm;
4130 +       tsk->mm = mm;
4131 +       tsk->active_mm = mm;
4132 +       activate_mm(active_mm, mm);
4133 +       task_unlock(tsk);
4134 +       arch_pick_mmap_layout(mm);
4135 +       if (old_mm) {
4136 +               up_read(&old_mm->mmap_sem);
4137 +               BUG_ON(active_mm != old_mm);
4138 +               mmput(old_mm);
4139 +               return 0;
4140 +       }
4141 +       mmdrop(active_mm);
4142 +       return 0;
4143 +}
4144 +
4145 +/*
4146 + * This function makes sure the current process has its own signal table,
4147 + * so that flush_signal_handlers can later reset the handlers without
4148 + * disturbing other processes.  (Other processes might share the signal
4149 + * table via the CLONE_SIGHAND option to clone().)
4150 + */
4151 +static int de_thread(struct task_struct *tsk)
4152 +{
4153 +       struct signal_struct *sig = tsk->signal;
4154 +       struct sighand_struct *newsighand, *oldsighand = tsk->sighand;
4155 +       spinlock_t *lock = &oldsighand->siglock;
4156 +       struct task_struct *leader = NULL;
4157 +       int count;
4158 +
4159 +       /*
4160 +        * If we don't share sighandlers, then we aren't sharing anything
4161 +        * and we can just re-use it all.
4162 +        */
4163 +       if (atomic_read(&oldsighand->count) <= 1) {
4164 +               BUG_ON(atomic_read(&sig->count) != 1);
4165 +               signalfd_detach(tsk);
4166 +               exit_itimers(sig);
4167 +               return 0;
4168 +       }
4169 +
4170 +       newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
4171 +       if (!newsighand)
4172 +               return -ENOMEM;
4173 +
4174 +       if (thread_group_empty(tsk))
4175 +               goto no_thread_group;
4176 +
4177 +       /*
4178 +        * Kill all other threads in the thread group.
4179 +        * We must hold tasklist_lock to call zap_other_threads.
4180 +        */
4181 +       read_lock(&tasklist_lock);
4182 +       spin_lock_irq(lock);
4183 +       if (sig->flags & SIGNAL_GROUP_EXIT) {
4184 +               /*
4185 +                * Another group action in progress, just
4186 +                * return so that the signal is processed.
4187 +                */
4188 +               spin_unlock_irq(lock);
4189 +               read_unlock(&tasklist_lock);
4190 +               kmem_cache_free(sighand_cachep, newsighand);
4191 +               return -EAGAIN;
4192 +       }
4193 +
4194 +       /*
4195 +        * child_reaper ignores SIGKILL, change it now.
4196 +        * Reparenting needs write_lock on tasklist_lock,
4197 +        * so it is safe to do it under read_lock.
4198 +        */
4199 +       if (unlikely(tsk->group_leader == child_reaper(tsk)))
4200 +               tsk->nsproxy->pid_ns->child_reaper = tsk;
4201 +
4202 +       zap_other_threads(tsk);
4203 +       read_unlock(&tasklist_lock);
4204 +
4205 +       /*
4206 +        * Account for the thread group leader hanging around:
4207 +        */
4208 +       count = 1;
4209 +       if (!thread_group_leader(tsk)) {
4210 +               count = 2;
4211 +               /*
4212 +                * The SIGALRM timer survives the exec, but needs to point
4213 +                * at us as the new group leader now.  We have a race with
4214 +                * a timer firing now getting the old leader, so we need to
4215 +                * synchronize with any firing (by calling del_timer_sync)
4216 +                * before we can safely let the old group leader die.
4217 +                */
4218 +               sig->tsk = tsk;
4219 +               spin_unlock_irq(lock);
4220 +               if (hrtimer_cancel(&sig->real_timer))
4221 +                       hrtimer_restart(&sig->real_timer);
4222 +               spin_lock_irq(lock);
4223 +       }
4224 +       while (atomic_read(&sig->count) > count) {
4225 +               sig->group_exit_task = tsk;
4226 +               sig->notify_count = count;
4227 +               __set_current_state(TASK_UNINTERRUPTIBLE);
4228 +               spin_unlock_irq(lock);
4229 +               schedule();
4230 +               spin_lock_irq(lock);
4231 +       }
4232 +       sig->group_exit_task = NULL;
4233 +       sig->notify_count = 0;
4234 +       spin_unlock_irq(lock);
4235 +
4236 +       /*
4237 +        * At this point all other threads have exited, all we have to
4238 +        * do is to wait for the thread group leader to become inactive,
4239 +        * and to assume its PID:
4240 +        */
4241 +       if (!thread_group_leader(tsk)) {
4242 +               /*
4243 +                * Wait for the thread group leader to be a zombie.
4244 +                * It should already be zombie at this point, most
4245 +                * of the time.
4246 +                */
4247 +               leader = tsk->group_leader;
4248 +               while (leader->exit_state != EXIT_ZOMBIE)
4249 +                       yield();
4250 +
4251 +               /*
4252 +                * The only record we have of the real-time age of a
4253 +                * process, regardless of execs it's done, is start_time.
4254 +                * All the past CPU time is accumulated in signal_struct
4255 +                * from sister threads now dead.  But in this non-leader
4256 +                * exec, nothing survives from the original leader thread,
4257 +                * whose birth marks the true age of this process now.
4258 +                * When we take on its identity by switching to its PID, we
4259 +                * also take its birthdate (always earlier than our own).
4260 +                */
4261 +               tsk->start_time = leader->start_time;
4262 +
4263 +               write_lock_irq(&tasklist_lock);
4264 +
4265 +               BUG_ON(leader->tgid != tsk->tgid);
4266 +               BUG_ON(tsk->pid == tsk->tgid);
4267 +               /*
4268 +                * An exec() starts a new thread group with the
4269 +                * TGID of the previous thread group. Rehash the
4270 +                * two threads with a switched PID, and release
4271 +                * the former thread group leader:
4272 +                */
4273 +
4274 +               /* Become a process group leader with the old leader's pid.
4275 +                * The old leader becomes a thread of the this thread group.
4276 +                * Note: The old leader also uses this pid until release_task
4277 +                *       is called.  Odd but simple and correct.
4278 +                */
4279 +               detach_pid(tsk, PIDTYPE_PID);
4280 +               tsk->pid = leader->pid;
4281 +               attach_pid(tsk, PIDTYPE_PID,  find_pid(tsk->pid));
4282 +               transfer_pid(leader, tsk, PIDTYPE_PGID);
4283 +               transfer_pid(leader, tsk, PIDTYPE_SID);
4284 +               list_replace_rcu(&leader->tasks, &tsk->tasks);
4285 +
4286 +               tsk->group_leader = tsk;
4287 +               leader->group_leader = tsk;
4288 +
4289 +               tsk->exit_signal = SIGCHLD;
4290 +
4291 +               BUG_ON(leader->exit_state != EXIT_ZOMBIE);
4292 +               leader->exit_state = EXIT_DEAD;
4293 +
4294 +               write_unlock_irq(&tasklist_lock);
4295 +        }
4296 +
4297 +       /*
4298 +        * There may be one thread left which is just exiting,
4299 +        * but it's safe to stop telling the group to kill themselves.
4300 +        */
4301 +       sig->flags = 0;
4302 +
4303 +no_thread_group:
4304 +       signalfd_detach(tsk);
4305 +       exit_itimers(sig);
4306 +       if (leader)
4307 +               release_task(leader);
4308 +
4309 +       BUG_ON(atomic_read(&sig->count) != 1);
4310 +
4311 +       if (atomic_read(&oldsighand->count) == 1) {
4312 +               /*
4313 +                * Now that we nuked the rest of the thread group,
4314 +                * it turns out we are not sharing sighand any more either.
4315 +                * So we can just keep it.
4316 +                */
4317 +               kmem_cache_free(sighand_cachep, newsighand);
4318 +       } else {
4319 +               /*
4320 +                * Move our state over to newsighand and switch it in.
4321 +                */
4322 +               atomic_set(&newsighand->count, 1);
4323 +               memcpy(newsighand->action, oldsighand->action,
4324 +                      sizeof(newsighand->action));
4325 +
4326 +               write_lock_irq(&tasklist_lock);
4327 +               spin_lock(&oldsighand->siglock);
4328 +               spin_lock_nested(&newsighand->siglock, SINGLE_DEPTH_NESTING);
4329 +
4330 +               rcu_assign_pointer(tsk->sighand, newsighand);
4331 +               recalc_sigpending();
4332 +
4333 +               spin_unlock(&newsighand->siglock);
4334 +               spin_unlock(&oldsighand->siglock);
4335 +               write_unlock_irq(&tasklist_lock);
4336 +
4337 +               __cleanup_sighand(oldsighand);
4338 +       }
4339 +
4340 +       BUG_ON(!thread_group_leader(tsk));
4341 +       return 0;
4342 +}
4343 +       
4344 +/*
4345 + * These functions flushes out all traces of the currently running executable
4346 + * so that a new one can be started
4347 + */
4348 +
4349 +static void flush_old_files(struct files_struct * files)
4350 +{
4351 +       long j = -1;
4352 +       struct fdtable *fdt;
4353 +
4354 +       spin_lock(&files->file_lock);
4355 +       for (;;) {
4356 +               unsigned long set, i;
4357 +
4358 +               j++;
4359 +               i = j * __NFDBITS;
4360 +               fdt = files_fdtable(files);
4361 +               if (i >= fdt->max_fds)
4362 +                       break;
4363 +               set = fdt->close_on_exec->fds_bits[j];
4364 +               if (!set)
4365 +                       continue;
4366 +               fdt->close_on_exec->fds_bits[j] = 0;
4367 +               spin_unlock(&files->file_lock);
4368 +               for ( ; set ; i++,set >>= 1) {
4369 +                       if (set & 1) {
4370 +                               sys_close(i);
4371 +                       }
4372 +               }
4373 +               spin_lock(&files->file_lock);
4374 +
4375 +       }
4376 +       spin_unlock(&files->file_lock);
4377 +}
4378 +
4379 +void get_task_comm(char *buf, struct task_struct *tsk)
4380 +{
4381 +       /* buf must be at least sizeof(tsk->comm) in size */
4382 +       task_lock(tsk);
4383 +       strncpy(buf, tsk->comm, sizeof(tsk->comm));
4384 +       task_unlock(tsk);
4385 +}
4386 +
4387 +void set_task_comm(struct task_struct *tsk, char *buf)
4388 +{
4389 +       task_lock(tsk);
4390 +       strlcpy(tsk->comm, buf, sizeof(tsk->comm));
4391 +       task_unlock(tsk);
4392 +}
4393 +
4394 +int flush_old_exec(struct linux_binprm * bprm)
4395 +{
4396 +       char * name;
4397 +       int i, ch, retval;
4398 +       struct files_struct *files;
4399 +       char tcomm[sizeof(current->comm)];
4400 +
4401 +       /*
4402 +        * Make sure we have a private signal table and that
4403 +        * we are unassociated from the previous thread group.
4404 +        */
4405 +       retval = de_thread(current);
4406 +       if (retval)
4407 +               goto out;
4408 +
4409 +       /*
4410 +        * Make sure we have private file handles. Ask the
4411 +        * fork helper to do the work for us and the exit
4412 +        * helper to do the cleanup of the old one.
4413 +        */
4414 +       files = current->files;         /* refcounted so safe to hold */
4415 +       retval = unshare_files();
4416 +       if (retval)
4417 +               goto out;
4418 +       /*
4419 +        * Release all of the old mmap stuff
4420 +        */
4421 +       retval = exec_mmap(bprm->mm);
4422 +       if (retval)
4423 +               goto mmap_failed;
4424 +
4425 +       bprm->mm = NULL;                /* We're using it now */
4426 +
4427 +       /* This is the point of no return */
4428 +       put_files_struct(files);
4429 +
4430 +       current->sas_ss_sp = current->sas_ss_size = 0;
4431 +
4432 +       if (current->euid == current->uid && current->egid == current->gid)
4433 +               current->mm->dumpable = 1;
4434 +       else
4435 +               current->mm->dumpable = suid_dumpable;
4436 +
4437 +       name = bprm->filename;
4438 +
4439 +       /* Copies the binary name from after last slash */
4440 +       for (i=0; (ch = *(name++)) != '\0';) {
4441 +               if (ch == '/')
4442 +                       i = 0; /* overwrite what we wrote */
4443 +               else
4444 +                       if (i < (sizeof(tcomm) - 1))
4445 +                               tcomm[i++] = ch;
4446 +       }
4447 +       tcomm[i] = '\0';
4448 +       set_task_comm(current, tcomm);
4449 +
4450 +       current->flags &= ~PF_RANDOMIZE;
4451 +       flush_thread();
4452 +
4453 +       /* Set the new mm task size. We have to do that late because it may
4454 +        * depend on TIF_32BIT which is only updated in flush_thread() on
4455 +        * some architectures like powerpc
4456 +        */
4457 +       current->mm->task_size = TASK_SIZE;
4458 +
4459 +       if (bprm->e_uid != current->euid || bprm->e_gid != current->egid) {
4460 +               suid_keys(current);
4461 +               current->mm->dumpable = suid_dumpable;
4462 +               current->pdeath_signal = 0;
4463 +       } else if (file_permission(bprm->file, MAY_READ) ||
4464 +                       (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP)) {
4465 +               suid_keys(current);
4466 +               current->mm->dumpable = suid_dumpable;
4467 +       }
4468 +
4469 +       /* An exec changes our domain. We are no longer part of the thread
4470 +          group */
4471 +
4472 +       current->self_exec_id++;
4473 +                       
4474 +       flush_signal_handlers(current, 0);
4475 +       flush_old_files(current->files);
4476 +
4477 +       return 0;
4478 +
4479 +mmap_failed:
4480 +       reset_files_struct(current, files);
4481 +out:
4482 +       return retval;
4483 +}
4484 +
4485 +EXPORT_SYMBOL(flush_old_exec);
4486 +
4487 +/* 
4488 + * Fill the binprm structure from the inode. 
4489 + * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
4490 + */
4491 +int prepare_binprm(struct linux_binprm *bprm)
4492 +{
4493 +       int mode;
4494 +       struct inode * inode = bprm->file->f_path.dentry->d_inode;
4495 +       int retval;
4496 +
4497 +       mode = inode->i_mode;
4498 +       if (bprm->file->f_op == NULL)
4499 +               return -EACCES;
4500 +
4501 +       bprm->e_uid = current->euid;
4502 +       bprm->e_gid = current->egid;
4503 +
4504 +       if(!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)) {
4505 +               /* Set-uid? */
4506 +               if (mode & S_ISUID) {
4507 +                       current->personality &= ~PER_CLEAR_ON_SETID;
4508 +                       bprm->e_uid = inode->i_uid;
4509 +               }
4510 +
4511 +               /* Set-gid? */
4512 +               /*
4513 +                * If setgid is set but no group execute bit then this
4514 +                * is a candidate for mandatory locking, not a setgid
4515 +                * executable.
4516 +                */
4517 +               if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
4518 +                       current->personality &= ~PER_CLEAR_ON_SETID;
4519 +                       bprm->e_gid = inode->i_gid;
4520 +               }
4521 +       }
4522 +
4523 +       /* fill in binprm security blob */
4524 +       retval = security_bprm_set(bprm);
4525 +       if (retval)
4526 +               return retval;
4527 +
4528 +       memset(bprm->buf,0,BINPRM_BUF_SIZE);
4529 +       return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
4530 +}
4531 +
4532 +EXPORT_SYMBOL(prepare_binprm);
4533 +
4534 +static int unsafe_exec(struct task_struct *p)
4535 +{
4536 +       int unsafe = 0;
4537 +       if (p->ptrace & PT_PTRACED) {
4538 +               if (p->ptrace & PT_PTRACE_CAP)
4539 +                       unsafe |= LSM_UNSAFE_PTRACE_CAP;
4540 +               else
4541 +                       unsafe |= LSM_UNSAFE_PTRACE;
4542 +       }
4543 +       if (atomic_read(&p->fs->count) > 1 ||
4544 +           atomic_read(&p->files->count) > 1 ||
4545 +           atomic_read(&p->sighand->count) > 1)
4546 +               unsafe |= LSM_UNSAFE_SHARE;
4547 +
4548 +       return unsafe;
4549 +}
4550 +
4551 +void compute_creds(struct linux_binprm *bprm)
4552 +{
4553 +       int unsafe;
4554 +
4555 +       if (bprm->e_uid != current->uid) {
4556 +               suid_keys(current);
4557 +               current->pdeath_signal = 0;
4558 +       }
4559 +       exec_keys(current);
4560 +
4561 +       task_lock(current);
4562 +       unsafe = unsafe_exec(current);
4563 +       security_bprm_apply_creds(bprm, unsafe);
4564 +       task_unlock(current);
4565 +       security_bprm_post_apply_creds(bprm);
4566 +}
4567 +EXPORT_SYMBOL(compute_creds);
4568 +
4569 +/*
4570 + * Arguments are '\0' separated strings found at the location bprm->p
4571 + * points to; chop off the first by relocating brpm->p to right after
4572 + * the first '\0' encountered.
4573 + */
4574 +void remove_arg_zero(struct linux_binprm *bprm)
4575 +{
4576 +       if (bprm->argc) {
4577 +               char ch;
4578 +
4579 +               do {
4580 +                       unsigned long offset;
4581 +                       unsigned long index;
4582 +                       char *kaddr;
4583 +                       struct page *page;
4584 +
4585 +                       offset = bprm->p & ~PAGE_MASK;
4586 +                       index = bprm->p >> PAGE_SHIFT;
4587 +
4588 +                       page = bprm->page[index];
4589 +                       kaddr = kmap_atomic(page, KM_USER0);
4590 +
4591 +                       /* run through page until we reach end or find NUL */
4592 +                       do {
4593 +                               ch = *(kaddr + offset);
4594 +
4595 +                               /* discard that character... */
4596 +                               bprm->p++;
4597 +                               offset++;
4598 +                       } while (offset < PAGE_SIZE && ch != '\0');
4599 +
4600 +                       kunmap_atomic(kaddr, KM_USER0);
4601 +
4602 +                       /* free the old page */
4603 +                       if (offset == PAGE_SIZE) {
4604 +                               __free_page(page);
4605 +                               bprm->page[index] = NULL;
4606 +                       }
4607 +               } while (ch != '\0');
4608 +
4609 +               bprm->argc--;
4610 +       }
4611 +}
4612 +EXPORT_SYMBOL(remove_arg_zero);
4613 +
4614 +/*
4615 + * cycle the list of binary formats handler, until one recognizes the image
4616 + */
4617 +int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
4618 +{
4619 +       int try,retval;
4620 +       struct linux_binfmt *fmt;
4621 +#ifdef __alpha__
4622 +       /* handle /sbin/loader.. */
4623 +       {
4624 +           struct exec * eh = (struct exec *) bprm->buf;
4625 +
4626 +           if (!bprm->loader && eh->fh.f_magic == 0x183 &&
4627 +               (eh->fh.f_flags & 0x3000) == 0x3000)
4628 +           {
4629 +               struct file * file;
4630 +               unsigned long loader;
4631 +
4632 +               allow_write_access(bprm->file);
4633 +               fput(bprm->file);
4634 +               bprm->file = NULL;
4635 +
4636 +               loader = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
4637 +
4638 +               file = open_exec("/sbin/loader");
4639 +               retval = PTR_ERR(file);
4640 +               if (IS_ERR(file))
4641 +                       return retval;
4642 +
4643 +               /* Remember if the application is TASO.  */
4644 +               bprm->sh_bang = eh->ah.entry < 0x100000000UL;
4645 +
4646 +               bprm->file = file;
4647 +               bprm->loader = loader;
4648 +               retval = prepare_binprm(bprm);
4649 +               if (retval<0)
4650 +                       return retval;
4651 +               /* should call search_binary_handler recursively here,
4652 +                  but it does not matter */
4653 +           }
4654 +       }
4655 +#endif
4656 +       retval = security_bprm_check(bprm);
4657 +       if (retval)
4658 +               return retval;
4659 +
4660 +       /* kernel module loader fixup */
4661 +       /* so we don't try to load run modprobe in kernel space. */
4662 +       set_fs(USER_DS);
4663 +
4664 +       retval = audit_bprm(bprm);
4665 +       if (retval)
4666 +               return retval;
4667 +
4668 +       retval = -ENOENT;
4669 +       for (try=0; try<2; try++) {
4670 +               read_lock(&binfmt_lock);
4671 +               for (fmt = formats ; fmt ; fmt = fmt->next) {
4672 +                       int (*fn)(struct linux_binprm *, struct pt_regs *) = fmt->load_binary;
4673 +                       if (!fn)
4674 +                               continue;
4675 +                       if (!try_module_get(fmt->module))
4676 +                               continue;
4677 +                       read_unlock(&binfmt_lock);
4678 +                       retval = fn(bprm, regs);
4679 +                       if (retval >= 0) {
4680 +                               put_binfmt(fmt);
4681 +                               allow_write_access(bprm->file);
4682 +                               if (bprm->file)
4683 +                                       fput(bprm->file);
4684 +                               bprm->file = NULL;
4685 +                               current->did_exec = 1;
4686 +                               proc_exec_connector(current);
4687 +                               return retval;
4688 +                       }
4689 +                       read_lock(&binfmt_lock);
4690 +                       put_binfmt(fmt);
4691 +                       if (retval != -ENOEXEC || bprm->mm == NULL)
4692 +                               break;
4693 +                       if (!bprm->file) {
4694 +                               read_unlock(&binfmt_lock);
4695 +                               return retval;
4696 +                       }
4697 +               }
4698 +               read_unlock(&binfmt_lock);
4699 +               if (retval != -ENOEXEC || bprm->mm == NULL) {
4700 +                       break;
4701 +#ifdef CONFIG_KMOD
4702 +               }else{
4703 +#define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
4704 +                       if (printable(bprm->buf[0]) &&
4705 +                           printable(bprm->buf[1]) &&
4706 +                           printable(bprm->buf[2]) &&
4707 +                           printable(bprm->buf[3]))
4708 +                               break; /* -ENOEXEC */
4709 +                       request_module("binfmt-%04x", *(unsigned short *)(&bprm->buf[2]));
4710 +#endif
4711 +               }
4712 +       }
4713 +       return retval;
4714 +}
4715 +
4716 +EXPORT_SYMBOL(search_binary_handler);
4717 +
4718 +/*
4719 + * sys_execve() executes a new program.
4720 + */
4721 +int do_execve(char * filename,
4722 +       char __user *__user *argv,
4723 +       char __user *__user *envp,
4724 +       struct pt_regs * regs)
4725 +{
4726 +       struct linux_binprm *bprm;
4727 +       struct file *file;
4728 +       int retval;
4729 +       int i;
4730 +
4731 +       retval = -ENOMEM;
4732 +       bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
4733 +       if (!bprm)
4734 +               goto out_ret;
4735 +
4736 +       file = open_exec(filename);
4737 +       retval = PTR_ERR(file);
4738 +       if (IS_ERR(file))
4739 +               goto out_kfree;
4740 +
4741 +       sched_exec();
4742 +
4743 +       bprm->p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
4744 +
4745 +       bprm->file = file;
4746 +       bprm->filename = filename;
4747 +       bprm->interp = filename;
4748 +       bprm->mm = mm_alloc();
4749 +       retval = -ENOMEM;
4750 +       if (!bprm->mm)
4751 +               goto out_file;
4752 +
4753 +       retval = init_new_context(current, bprm->mm);
4754 +       if (retval < 0)
4755 +               goto out_mm;
4756 +
4757 +       bprm->argc = count(argv, bprm->p / sizeof(void *));
4758 +       if ((retval = bprm->argc) < 0)
4759 +               goto out_mm;
4760 +
4761 +       bprm->envc = count(envp, bprm->p / sizeof(void *));
4762 +       if ((retval = bprm->envc) < 0)
4763 +               goto out_mm;
4764 +
4765 +       retval = security_bprm_alloc(bprm);
4766 +       if (retval)
4767 +               goto out;
4768 +
4769 +       retval = prepare_binprm(bprm);
4770 +       if (retval < 0)
4771 +               goto out;
4772 +
4773 +       retval = copy_strings_kernel(1, &bprm->filename, bprm);
4774 +       if (retval < 0)
4775 +               goto out;
4776 +
4777 +       bprm->exec = bprm->p;
4778 +       retval = copy_strings(bprm->envc, envp, bprm);
4779 +       if (retval < 0)
4780 +               goto out;
4781 +
4782 +       retval = copy_strings(bprm->argc, argv, bprm);
4783 +       if (retval < 0)
4784 +               goto out;
4785 +
4786 +       retval = search_binary_handler(bprm,regs);
4787 +       if (retval >= 0) {
4788 +               free_arg_pages(bprm);
4789 +
4790 +               /* execve success */
4791 +               security_bprm_free(bprm);
4792 +               acct_update_integrals(current);
4793 +               kfree(bprm);
4794 +               return retval;
4795 +       }
4796 +
4797 +out:
4798 +       /* Something went wrong, return the inode and free the argument pages*/
4799 +       for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
4800 +               struct page * page = bprm->page[i];
4801 +               if (page)
4802 +                       __free_page(page);
4803 +       }
4804 +
4805 +       if (bprm->security)
4806 +               security_bprm_free(bprm);
4807 +
4808 +out_mm:
4809 +       if (bprm->mm)
4810 +               mmdrop(bprm->mm);
4811 +
4812 +out_file:
4813 +       if (bprm->file) {
4814 +               allow_write_access(bprm->file);
4815 +               fput(bprm->file);
4816 +       }
4817 +
4818 +out_kfree:
4819 +       kfree(bprm);
4820 +
4821 +out_ret:
4822 +       return retval;
4823 +}
4824 +
4825 +int set_binfmt(struct linux_binfmt *new)
4826 +{
4827 +       struct linux_binfmt *old = current->binfmt;
4828 +
4829 +       if (new) {
4830 +               if (!try_module_get(new->module))
4831 +                       return -1;
4832 +       }
4833 +       current->binfmt = new;
4834 +       if (old)
4835 +               module_put(old->module);
4836 +       return 0;
4837 +}
4838 +
4839 +EXPORT_SYMBOL(set_binfmt);
4840 +
4841 +/* format_corename will inspect the pattern parameter, and output a
4842 + * name into corename, which must have space for at least
4843 + * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
4844 + */
4845 +static int format_corename(char *corename, const char *pattern, long signr)
4846 +{
4847 +       const char *pat_ptr = pattern;
4848 +       char *out_ptr = corename;
4849 +       char *const out_end = corename + CORENAME_MAX_SIZE;
4850 +       int rc;
4851 +       int pid_in_pattern = 0;
4852 +       int ispipe = 0;
4853 +
4854 +       if (*pattern == '|')
4855 +               ispipe = 1;
4856 +
4857 +       /* Repeat as long as we have more pattern to process and more output
4858 +          space */
4859 +       while (*pat_ptr) {
4860 +               if (*pat_ptr != '%') {
4861 +                       if (out_ptr == out_end)
4862 +                               goto out;
4863 +                       *out_ptr++ = *pat_ptr++;
4864 +               } else {
4865 +                       switch (*++pat_ptr) {
4866 +                       case 0:
4867 +                               goto out;
4868 +                       /* Double percent, output one percent */
4869 +                       case '%':
4870 +                               if (out_ptr == out_end)
4871 +                                       goto out;
4872 +                               *out_ptr++ = '%';
4873 +                               break;
4874 +                       /* pid */
4875 +                       case 'p':
4876 +                               pid_in_pattern = 1;
4877 +                               rc = snprintf(out_ptr, out_end - out_ptr,
4878 +                                             "%d", current->tgid);
4879 +                               if (rc > out_end - out_ptr)
4880 +                                       goto out;
4881 +                               out_ptr += rc;
4882 +                               break;
4883 +                       /* uid */
4884 +                       case 'u':
4885 +                               rc = snprintf(out_ptr, out_end - out_ptr,
4886 +                                             "%d", current->uid);
4887 +                               if (rc > out_end - out_ptr)
4888 +                                       goto out;
4889 +                               out_ptr += rc;
4890 +                               break;
4891 +                       /* gid */
4892 +                       case 'g':
4893 +                               rc = snprintf(out_ptr, out_end - out_ptr,
4894 +                                             "%d", current->gid);
4895 +                               if (rc > out_end - out_ptr)
4896 +                                       goto out;
4897 +                               out_ptr += rc;
4898 +                               break;
4899 +                       /* signal that caused the coredump */
4900 +                       case 's':
4901 +                               rc = snprintf(out_ptr, out_end - out_ptr,
4902 +                                             "%ld", signr);
4903 +                               if (rc > out_end - out_ptr)
4904 +                                       goto out;
4905 +                               out_ptr += rc;
4906 +                               break;
4907 +                       /* UNIX time of coredump */
4908 +                       case 't': {
4909 +                               struct timeval tv;
4910 +                               vx_gettimeofday(&tv);
4911 +                               rc = snprintf(out_ptr, out_end - out_ptr,
4912 +                                             "%lu", tv.tv_sec);
4913 +                               if (rc > out_end - out_ptr)
4914 +                                       goto out;
4915 +                               out_ptr += rc;
4916 +                               break;
4917 +                       }
4918 +                       /* hostname */
4919 +                       case 'h':
4920 +                               down_read(&uts_sem);
4921 +                               rc = snprintf(out_ptr, out_end - out_ptr,
4922 +                                             "%s", utsname()->nodename);
4923 +                               up_read(&uts_sem);
4924 +                               if (rc > out_end - out_ptr)
4925 +                                       goto out;
4926 +                               out_ptr += rc;
4927 +                               break;
4928 +                       /* executable */
4929 +                       case 'e':
4930 +                               rc = snprintf(out_ptr, out_end - out_ptr,
4931 +                                             "%s", current->comm);
4932 +                               if (rc > out_end - out_ptr)
4933 +                                       goto out;
4934 +                               out_ptr += rc;
4935 +                               break;
4936 +                       default:
4937 +                               break;
4938 +                       }
4939 +                       ++pat_ptr;
4940 +               }
4941 +       }
4942 +       /* Backward compatibility with core_uses_pid:
4943 +        *
4944 +        * If core_pattern does not include a %p (as is the default)
4945 +        * and core_uses_pid is set, then .%pid will be appended to
4946 +        * the filename. Do not do this for piped commands. */
4947 +       if (!ispipe && !pid_in_pattern
4948 +            && (core_uses_pid || atomic_read(&current->mm->mm_users) != 1)) {
4949 +               rc = snprintf(out_ptr, out_end - out_ptr,
4950 +                             ".%d", current->tgid);
4951 +               if (rc > out_end - out_ptr)
4952 +                       goto out;
4953 +               out_ptr += rc;
4954 +       }
4955 +out:
4956 +       *out_ptr = 0;
4957 +       return ispipe;
4958 +}
4959 +
4960 +static void zap_process(struct task_struct *start)
4961 +{
4962 +       struct task_struct *t;
4963 +
4964 +       start->signal->flags = SIGNAL_GROUP_EXIT;
4965 +       start->signal->group_stop_count = 0;
4966 +
4967 +       t = start;
4968 +       do {
4969 +               if (t != current && t->mm) {
4970 +                       t->mm->core_waiters++;
4971 +                       sigaddset(&t->pending.signal, SIGKILL);
4972 +                       signal_wake_up(t, 1);
4973 +               }
4974 +       } while ((t = next_thread(t)) != start);
4975 +}
4976 +
4977 +static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
4978 +                               int exit_code)
4979 +{
4980 +       struct task_struct *g, *p;
4981 +       unsigned long flags;
4982 +       int err = -EAGAIN;
4983 +
4984 +       spin_lock_irq(&tsk->sighand->siglock);
4985 +       if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
4986 +               tsk->signal->group_exit_code = exit_code;
4987 +               zap_process(tsk);
4988 +               err = 0;
4989 +       }
4990 +       spin_unlock_irq(&tsk->sighand->siglock);
4991 +       if (err)
4992 +               return err;
4993 +
4994 +       if (atomic_read(&mm->mm_users) == mm->core_waiters + 1)
4995 +               goto done;
4996 +
4997 +       rcu_read_lock();
4998 +       for_each_process(g) {
4999 +               if (g == tsk->group_leader)
5000 +                       continue;
5001 +
5002 +               p = g;
5003 +               do {
5004 +                       if (p->mm) {
5005 +                               if (p->mm == mm) {
5006 +                                       /*
5007 +                                        * p->sighand can't disappear, but
5008 +                                        * may be changed by de_thread()
5009 +                                        */
5010 +                                       lock_task_sighand(p, &flags);
5011 +                                       zap_process(p);
5012 +                                       unlock_task_sighand(p, &flags);
5013 +                               }
5014 +                               break;
5015 +                       }
5016 +               } while ((p = next_thread(p)) != g);
5017 +       }
5018 +       rcu_read_unlock();
5019 +done:
5020 +       return mm->core_waiters;
5021 +}
5022 +
5023 +static int coredump_wait(int exit_code)
5024 +{
5025 +       struct task_struct *tsk = current;
5026 +       struct mm_struct *mm = tsk->mm;
5027 +       struct completion startup_done;
5028 +       struct completion *vfork_done;
5029 +       int core_waiters;
5030 +
5031 +       init_completion(&mm->core_done);
5032 +       init_completion(&startup_done);
5033 +       mm->core_startup_done = &startup_done;
5034 +
5035 +       core_waiters = zap_threads(tsk, mm, exit_code);
5036 +       up_write(&mm->mmap_sem);
5037 +
5038 +       if (unlikely(core_waiters < 0))
5039 +               goto fail;
5040 +
5041 +       /*
5042 +        * Make sure nobody is waiting for us to release the VM,
5043 +        * otherwise we can deadlock when we wait on each other
5044 +        */
5045 +       vfork_done = tsk->vfork_done;
5046 +       if (vfork_done) {
5047 +               tsk->vfork_done = NULL;
5048 +               complete(vfork_done);
5049 +       }
5050 +
5051 +       if (core_waiters)
5052 +               wait_for_completion(&startup_done);
5053 +fail:
5054 +       BUG_ON(mm->core_waiters);
5055 +       return core_waiters;
5056 +}
5057 +
5058 +int do_coredump(long signr, int exit_code, struct pt_regs * regs)
5059 +{
5060 +       char corename[CORENAME_MAX_SIZE + 1];
5061 +       struct mm_struct *mm = current->mm;
5062 +       struct linux_binfmt * binfmt;
5063 +       struct inode * inode;
5064 +       struct file * file;
5065 +       int retval = 0;
5066 +       int fsuid = current->fsuid;
5067 +       int flag = 0;
5068 +       int ispipe = 0;
5069 +
5070 +       audit_core_dumps(signr);
5071 +
5072 +       binfmt = current->binfmt;
5073 +       if (!binfmt || !binfmt->core_dump)
5074 +               goto fail;
5075 +       down_write(&mm->mmap_sem);
5076 +       if (!mm->dumpable) {
5077 +               up_write(&mm->mmap_sem);
5078 +               goto fail;
5079 +       }
5080 +
5081 +       /*
5082 +        *      We cannot trust fsuid as being the "true" uid of the
5083 +        *      process nor do we know its entire history. We only know it
5084 +        *      was tainted so we dump it as root in mode 2.
5085 +        */
5086 +       if (mm->dumpable == 2) {        /* Setuid core dump mode */
5087 +               flag = O_EXCL;          /* Stop rewrite attacks */
5088 +               current->fsuid = 0;     /* Dump root private */
5089 +       }
5090 +       mm->dumpable = 0;
5091 +
5092 +       retval = coredump_wait(exit_code);
5093 +       if (retval < 0)
5094 +               goto fail;
5095 +
5096 +       /*
5097 +        * Clear any false indication of pending signals that might
5098 +        * be seen by the filesystem code called to write the core file.
5099 +        */
5100 +       clear_thread_flag(TIF_SIGPENDING);
5101 +
5102 +       if (current->signal->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
5103 +               goto fail_unlock;
5104 +
5105 +       /*
5106 +        * lock_kernel() because format_corename() is controlled by sysctl, which
5107 +        * uses lock_kernel()
5108 +        */
5109 +       lock_kernel();
5110 +       ispipe = format_corename(corename, core_pattern, signr);
5111 +       unlock_kernel();
5112 +       if (ispipe) {
5113 +               /* SIGPIPE can happen, but it's just never processed */
5114 +               if(call_usermodehelper_pipe(corename+1, NULL, NULL, &file)) {
5115 +                       printk(KERN_INFO "Core dump to %s pipe failed\n",
5116 +                              corename);
5117 +                       goto fail_unlock;
5118 +               }
5119 +       } else
5120 +               file = filp_open(corename,
5121 +                                O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
5122 +                                0600);
5123 +       if (IS_ERR(file))
5124 +               goto fail_unlock;
5125 +       inode = file->f_path.dentry->d_inode;
5126 +       if (inode->i_nlink > 1)
5127 +               goto close_fail;        /* multiple links - don't dump */
5128 +       if (!ispipe && d_unhashed(file->f_path.dentry))
5129 +               goto close_fail;
5130 +
5131 +       /* AK: actually i see no reason to not allow this for named pipes etc.,
5132 +          but keep the previous behaviour for now. */
5133 +       if (!ispipe && !S_ISREG(inode->i_mode))
5134 +               goto close_fail;
5135 +       /*
5136 +        * Dont allow local users get cute and trick others to coredump
5137 +        * into their pre-created files:
5138 +        */
5139 +       if (inode->i_uid != current->fsuid)
5140 +               goto close_fail;
5141 +       if (!file->f_op)
5142 +               goto close_fail;
5143 +       if (!file->f_op->write)
5144 +               goto close_fail;
5145 +       if (!ispipe && do_truncate(file->f_path.dentry, 0, 0, file) != 0)
5146 +               goto close_fail;
5147 +
5148 +       retval = binfmt->core_dump(signr, regs, file);
5149 +
5150 +       if (retval)
5151 +               current->signal->group_exit_code |= 0x80;
5152 +close_fail:
5153 +       filp_close(file, NULL);
5154 +fail_unlock:
5155 +       current->fsuid = fsuid;
5156 +       complete_all(&mm->core_done);
5157 +fail:
5158 +       return retval;
5159 +}
5160 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/include/linux/arrays.h linux-2.6.22-590/include/linux/arrays.h
5161 --- linux-2.6.22-580/include/linux/arrays.h     1969-12-31 19:00:00.000000000 -0500
5162 +++ linux-2.6.22-590/include/linux/arrays.h     2009-02-18 09:57:23.000000000 -0500
5163 @@ -0,0 +1,36 @@
5164 +#ifndef __ARRAYS_H__
5165 +#define __ARRAYS_H__
5166 +#include <linux/list.h>
5167 +
5168 +#define SAMPLING_METHOD_DEFAULT 0
5169 +#define SAMPLING_METHOD_LOG 1
5170 +
5171 +/* Every probe has an array handler */
5172 +
5173 +/* XXX - Optimize this structure */
5174 +
5175 +extern void (*rec_event)(void *,unsigned int);
5176 +struct array_handler {
5177 +       struct list_head link;
5178 +       unsigned int (*hash_func)(void *);
5179 +       unsigned int (*sampling_func)(void *,int,void *);
5180 +       unsigned short size;
5181 +       unsigned int threshold;
5182 +       unsigned char **expcount;
5183 +       unsigned int sampling_method;
5184 +       unsigned int **arrays;
5185 +       unsigned int arraysize;
5186 +       unsigned int num_samples[2];
5187 +       void **epoch_samples; /* size-sized lists of samples */
5188 +       unsigned int (*serialize)(void *, void *);
5189 +       unsigned char code[5];
5190 +};
5191 +
5192 +struct event {
5193 +       struct list_head link;
5194 +       void *event_data;
5195 +       unsigned int count;
5196 +       unsigned int event_type;
5197 +       struct task_struct *task;
5198 +};
5199 +#endif
5200 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/include/linux/mutex.h linux-2.6.22-590/include/linux/mutex.h
5201 --- linux-2.6.22-580/include/linux/mutex.h      2007-07-08 19:32:17.000000000 -0400
5202 +++ linux-2.6.22-590/include/linux/mutex.h      2009-02-18 09:57:23.000000000 -0500
5203 @@ -53,6 +53,10 @@
5204         struct thread_info      *owner;
5205         const char              *name;
5206         void                    *magic;
5207 +#else
5208 +#ifdef CONFIG_CHOPSTIX
5209 +       struct thread_info      *owner;
5210 +#endif
5211  #endif
5212  #ifdef CONFIG_DEBUG_LOCK_ALLOC
5213         struct lockdep_map      dep_map;
5214 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/include/linux/sched.h linux-2.6.22-590/include/linux/sched.h
5215 --- linux-2.6.22-580/include/linux/sched.h      2009-02-18 09:56:02.000000000 -0500
5216 +++ linux-2.6.22-590/include/linux/sched.h      2009-02-18 09:57:23.000000000 -0500
5217 @@ -850,6 +850,10 @@
5218  #endif
5219         unsigned long sleep_avg;
5220         unsigned long long timestamp, last_ran;
5221 +#ifdef CONFIG_CHOPSTIX
5222 +       unsigned long last_interrupted, last_ran_j;
5223 +#endif
5224 +
5225         unsigned long long sched_time; /* sched_clock time spent running */
5226         enum sleep_type sleep_type;
5227  
5228 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/include/linux/sched.h.orig linux-2.6.22-590/include/linux/sched.h.orig
5229 --- linux-2.6.22-580/include/linux/sched.h.orig 1969-12-31 19:00:00.000000000 -0500
5230 +++ linux-2.6.22-590/include/linux/sched.h.orig 2009-02-18 09:56:02.000000000 -0500
5231 @@ -0,0 +1,1737 @@
5232 +#ifndef _LINUX_SCHED_H
5233 +#define _LINUX_SCHED_H
5234 +
5235 +#include <linux/auxvec.h>      /* For AT_VECTOR_SIZE */
5236 +
5237 +/*
5238 + * cloning flags:
5239 + */
5240 +#define CSIGNAL                0x000000ff      /* signal mask to be sent at exit */
5241 +#define CLONE_VM       0x00000100      /* set if VM shared between processes */
5242 +#define CLONE_FS       0x00000200      /* set if fs info shared between processes */
5243 +#define CLONE_FILES    0x00000400      /* set if open files shared between processes */
5244 +#define CLONE_SIGHAND  0x00000800      /* set if signal handlers and blocked signals shared */
5245 +#define CLONE_PTRACE   0x00002000      /* set if we want to let tracing continue on the child too */
5246 +#define CLONE_VFORK    0x00004000      /* set if the parent wants the child to wake it up on mm_release */
5247 +#define CLONE_PARENT   0x00008000      /* set if we want to have the same parent as the cloner */
5248 +#define CLONE_THREAD   0x00010000      /* Same thread group? */
5249 +#define CLONE_NEWNS    0x00020000      /* New namespace group? */
5250 +#define CLONE_SYSVSEM  0x00040000      /* share system V SEM_UNDO semantics */
5251 +#define CLONE_SETTLS   0x00080000      /* create a new TLS for the child */
5252 +#define CLONE_PARENT_SETTID    0x00100000      /* set the TID in the parent */
5253 +#define CLONE_CHILD_CLEARTID   0x00200000      /* clear the TID in the child */
5254 +#define CLONE_DETACHED         0x00400000      /* Unused, ignored */
5255 +#define CLONE_UNTRACED         0x00800000      /* set if the tracing process can't force CLONE_PTRACE on this clone */
5256 +#define CLONE_CHILD_SETTID     0x01000000      /* set the TID in the child */
5257 +#define CLONE_STOPPED          0x02000000      /* Start in stopped state */
5258 +#define CLONE_NEWUTS           0x04000000      /* New utsname group? */
5259 +#define CLONE_NEWIPC           0x08000000      /* New ipcs */
5260 +#define CLONE_KTHREAD          0x10000000      /* clone a kernel thread */
5261 +
5262 +/*
5263 + * Scheduling policies
5264 + */
5265 +#define SCHED_NORMAL           0
5266 +#define SCHED_FIFO             1
5267 +#define SCHED_RR               2
5268 +#define SCHED_BATCH            3
5269 +
5270 +#ifdef __KERNEL__
5271 +
5272 +struct sched_param {
5273 +       int sched_priority;
5274 +};
5275 +
5276 +#include <asm/param.h> /* for HZ */
5277 +
5278 +#include <linux/capability.h>
5279 +#include <linux/threads.h>
5280 +#include <linux/kernel.h>
5281 +#include <linux/types.h>
5282 +#include <linux/timex.h>
5283 +#include <linux/jiffies.h>
5284 +#include <linux/rbtree.h>
5285 +#include <linux/thread_info.h>
5286 +#include <linux/cpumask.h>
5287 +#include <linux/errno.h>
5288 +#include <linux/nodemask.h>
5289 +
5290 +#include <asm/system.h>
5291 +#include <asm/semaphore.h>
5292 +#include <asm/page.h>
5293 +#include <asm/ptrace.h>
5294 +#include <asm/mmu.h>
5295 +#include <asm/cputime.h>
5296 +
5297 +#include <linux/smp.h>
5298 +#include <linux/sem.h>
5299 +#include <linux/signal.h>
5300 +#include <linux/securebits.h>
5301 +#include <linux/fs_struct.h>
5302 +#include <linux/compiler.h>
5303 +#include <linux/completion.h>
5304 +#include <linux/pid.h>
5305 +#include <linux/percpu.h>
5306 +#include <linux/topology.h>
5307 +#include <linux/seccomp.h>
5308 +#include <linux/rcupdate.h>
5309 +#include <linux/futex.h>
5310 +#include <linux/rtmutex.h>
5311 +
5312 +#include <linux/time.h>
5313 +#include <linux/param.h>
5314 +#include <linux/resource.h>
5315 +#include <linux/timer.h>
5316 +#include <linux/hrtimer.h>
5317 +#include <linux/task_io_accounting.h>
5318 +
5319 +#include <asm/processor.h>
5320 +
5321 +struct exec_domain;
5322 +struct futex_pi_state;
5323 +struct bio;
5324 +
5325 +/*
5326 + * List of flags we want to share for kernel threads,
5327 + * if only because they are not used by them anyway.
5328 + */
5329 +#define CLONE_KERNEL   (CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_KTHREAD)
5330 +
5331 +/*
5332 + * These are the constant used to fake the fixed-point load-average
5333 + * counting. Some notes:
5334 + *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
5335 + *    a load-average precision of 10 bits integer + 11 bits fractional
5336 + *  - if you want to count load-averages more often, you need more
5337 + *    precision, or rounding will get you. With 2-second counting freq,
5338 + *    the EXP_n values would be 1981, 2034 and 2043 if still using only
5339 + *    11 bit fractions.
5340 + */
5341 +extern unsigned long avenrun[];                /* Load averages */
5342 +
5343 +#define FSHIFT         11              /* nr of bits of precision */
5344 +#define FIXED_1                (1<<FSHIFT)     /* 1.0 as fixed-point */
5345 +#define LOAD_FREQ      (5*HZ)          /* 5 sec intervals */
5346 +#define EXP_1          1884            /* 1/exp(5sec/1min) as fixed-point */
5347 +#define EXP_5          2014            /* 1/exp(5sec/5min) */
5348 +#define EXP_15         2037            /* 1/exp(5sec/15min) */
5349 +
5350 +#define CALC_LOAD(load,exp,n) \
5351 +       load *= exp; \
5352 +       load += n*(FIXED_1-exp); \
5353 +       load >>= FSHIFT;
5354 +
5355 +extern unsigned long total_forks;
5356 +extern int nr_threads;
5357 +DECLARE_PER_CPU(unsigned long, process_counts);
5358 +extern int nr_processes(void);
5359 +extern unsigned long nr_running(void);
5360 +extern unsigned long nr_uninterruptible(void);
5361 +extern unsigned long nr_active(void);
5362 +extern unsigned long nr_iowait(void);
5363 +extern unsigned long weighted_cpuload(const int cpu);
5364 +
5365 +
5366 +/*
5367 + * Task state bitmask. NOTE! These bits are also
5368 + * encoded in fs/proc/array.c: get_task_state().
5369 + *
5370 + * We have two separate sets of flags: task->state
5371 + * is about runnability, while task->exit_state are
5372 + * about the task exiting. Confusing, but this way
5373 + * modifying one set can't modify the other one by
5374 + * mistake.
5375 + */
5376 +#define TASK_RUNNING           0
5377 +#define TASK_INTERRUPTIBLE     1
5378 +#define TASK_UNINTERRUPTIBLE   2
5379 +#define TASK_STOPPED           4
5380 +#define TASK_TRACED            8
5381 +#define TASK_ONHOLD            16
5382 +/* in tsk->exit_state */
5383 +#define EXIT_ZOMBIE            32
5384 +#define EXIT_DEAD              64
5385 +/* in tsk->state again */
5386 +#define TASK_NONINTERACTIVE    128
5387 +#define TASK_DEAD              256
5388 +
5389 +#define __set_task_state(tsk, state_value)             \
5390 +       do { (tsk)->state = (state_value); } while (0)
5391 +#define set_task_state(tsk, state_value)               \
5392 +       set_mb((tsk)->state, (state_value))
5393 +
5394 +/*
5395 + * set_current_state() includes a barrier so that the write of current->state
5396 + * is correctly serialised wrt the caller's subsequent test of whether to
5397 + * actually sleep:
5398 + *
5399 + *     set_current_state(TASK_UNINTERRUPTIBLE);
5400 + *     if (do_i_need_to_sleep())
5401 + *             schedule();
5402 + *
5403 + * If the caller does not need such serialisation then use __set_current_state()
5404 + */
5405 +#define __set_current_state(state_value)                       \
5406 +       do { current->state = (state_value); } while (0)
5407 +#define set_current_state(state_value)         \
5408 +       set_mb(current->state, (state_value))
5409 +
5410 +/* Task command name length */
5411 +#define TASK_COMM_LEN 16
5412 +
5413 +#include <linux/spinlock.h>
5414 +
5415 +/*
5416 + * This serializes "schedule()" and also protects
5417 + * the run-queue from deletions/modifications (but
5418 + * _adding_ to the beginning of the run-queue has
5419 + * a separate lock).
5420 + */
5421 +extern rwlock_t tasklist_lock;
5422 +extern spinlock_t mmlist_lock;
5423 +
5424 +struct task_struct;
5425 +
5426 +extern void sched_init(void);
5427 +extern void sched_init_smp(void);
5428 +extern void init_idle(struct task_struct *idle, int cpu);
5429 +
5430 +extern cpumask_t nohz_cpu_mask;
5431 +#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
5432 +extern int select_nohz_load_balancer(int cpu);
5433 +#else
5434 +static inline int select_nohz_load_balancer(int cpu)
5435 +{
5436 +       return 0;
5437 +}
5438 +#endif
5439 +
5440 +/*
5441 + * Only dump TASK_* tasks. (0 for all tasks)
5442 + */
5443 +extern void show_state_filter(unsigned long state_filter);
5444 +
5445 +static inline void show_state(void)
5446 +{
5447 +       show_state_filter(0);
5448 +}
5449 +
5450 +extern void show_regs(struct pt_regs *);
5451 +
5452 +/*
5453 + * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
5454 + * task), SP is the stack pointer of the first frame that should be shown in the back
5455 + * trace (or NULL if the entire call-chain of the task should be shown).
5456 + */
5457 +extern void show_stack(struct task_struct *task, unsigned long *sp);
5458 +
5459 +void io_schedule(void);
5460 +long io_schedule_timeout(long timeout);
5461 +
5462 +extern void cpu_init (void);
5463 +extern void trap_init(void);
5464 +extern void update_process_times(int user);
5465 +extern void scheduler_tick(void);
5466 +
5467 +#ifdef CONFIG_DETECT_SOFTLOCKUP
5468 +extern void softlockup_tick(void);
5469 +extern void spawn_softlockup_task(void);
5470 +extern void touch_softlockup_watchdog(void);
5471 +extern void touch_all_softlockup_watchdogs(void);
5472 +#else
5473 +static inline void softlockup_tick(void)
5474 +{
5475 +}
5476 +static inline void spawn_softlockup_task(void)
5477 +{
5478 +}
5479 +static inline void touch_softlockup_watchdog(void)
5480 +{
5481 +}
5482 +static inline void touch_all_softlockup_watchdogs(void)
5483 +{
5484 +}
5485 +#endif
5486 +
5487 +
5488 +/* Attach to any functions which should be ignored in wchan output. */
5489 +#define __sched                __attribute__((__section__(".sched.text")))
5490 +/* Is this address in the __sched functions? */
5491 +extern int in_sched_functions(unsigned long addr);
5492 +
5493 +#define        MAX_SCHEDULE_TIMEOUT    LONG_MAX
5494 +extern signed long FASTCALL(schedule_timeout(signed long timeout));
5495 +extern signed long schedule_timeout_interruptible(signed long timeout);
5496 +extern signed long schedule_timeout_uninterruptible(signed long timeout);
5497 +asmlinkage void schedule(void);
5498 +
5499 +struct nsproxy;
5500 +
5501 +/* Maximum number of active map areas.. This is a random (large) number */
5502 +#define DEFAULT_MAX_MAP_COUNT  65536
5503 +
5504 +extern int sysctl_max_map_count;
5505 +
5506 +#include <linux/aio.h>
5507 +
5508 +extern unsigned long
5509 +arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
5510 +                      unsigned long, unsigned long);
5511 +extern unsigned long
5512 +arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
5513 +                         unsigned long len, unsigned long pgoff,
5514 +                         unsigned long flags);
5515 +extern void arch_unmap_area(struct mm_struct *, unsigned long);
5516 +extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
5517 +
5518 +#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
5519 +/*
5520 + * The mm counters are not protected by its page_table_lock,
5521 + * so must be incremented atomically.
5522 + */
5523 +typedef atomic_long_t mm_counter_t;
5524 +#define __set_mm_counter(mm, member, value) \
5525 +       atomic_long_set(&(mm)->_##member, value)
5526 +#define get_mm_counter(mm, member) \
5527 +       ((unsigned long)atomic_long_read(&(mm)->_##member))
5528 +
5529 +#else  /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
5530 +/*
5531 + * The mm counters are protected by its page_table_lock,
5532 + * so can be incremented directly.
5533 + */
5534 +typedef unsigned long mm_counter_t;
5535 +#define __set_mm_counter(mm, member, value) (mm)->_##member = (value)
5536 +#define get_mm_counter(mm, member) ((mm)->_##member)
5537 +
5538 +#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
5539 +
5540 +#define set_mm_counter(mm, member, value) \
5541 +       vx_ ## member ## pages_sub((mm), (get_mm_counter(mm, member) - value))
5542 +#define add_mm_counter(mm, member, value) \
5543 +       vx_ ## member ## pages_add((mm), (value))
5544 +#define inc_mm_counter(mm, member) vx_ ## member ## pages_inc((mm))
5545 +#define dec_mm_counter(mm, member) vx_ ## member ## pages_dec((mm))
5546 +
5547 +#define get_mm_rss(mm)                                 \
5548 +       (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
5549 +#define update_hiwater_rss(mm) do {                    \
5550 +       unsigned long _rss = get_mm_rss(mm);            \
5551 +       if ((mm)->hiwater_rss < _rss)                   \
5552 +               (mm)->hiwater_rss = _rss;               \
5553 +} while (0)
5554 +#define update_hiwater_vm(mm)  do {                    \
5555 +       if ((mm)->hiwater_vm < (mm)->total_vm)          \
5556 +               (mm)->hiwater_vm = (mm)->total_vm;      \
5557 +} while (0)
5558 +
5559 +struct mm_struct {
5560 +       struct vm_area_struct * mmap;           /* list of VMAs */
5561 +       struct rb_root mm_rb;
5562 +       struct vm_area_struct * mmap_cache;     /* last find_vma result */
5563 +       unsigned long (*get_unmapped_area) (struct file *filp,
5564 +                               unsigned long addr, unsigned long len,
5565 +                               unsigned long pgoff, unsigned long flags);
5566 +       void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
5567 +       unsigned long mmap_base;                /* base of mmap area */
5568 +       unsigned long task_size;                /* size of task vm space */
5569 +       unsigned long cached_hole_size;         /* if non-zero, the largest hole below free_area_cache */
5570 +       unsigned long free_area_cache;          /* first hole of size cached_hole_size or larger */
5571 +       pgd_t * pgd;
5572 +       atomic_t mm_users;                      /* How many users with user space? */
5573 +       atomic_t mm_count;                      /* How many references to "struct mm_struct" (users count as 1) */
5574 +       int map_count;                          /* number of VMAs */
5575 +       struct rw_semaphore mmap_sem;
5576 +       spinlock_t page_table_lock;             /* Protects page tables and some counters */
5577 +
5578 +       struct list_head mmlist;                /* List of maybe swapped mm's.  These are globally strung
5579 +                                                * together off init_mm.mmlist, and are protected
5580 +                                                * by mmlist_lock
5581 +                                                */
5582 +
5583 +       /* Special counters, in some configurations protected by the
5584 +        * page_table_lock, in other configurations by being atomic.
5585 +        */
5586 +       mm_counter_t _file_rss;
5587 +       mm_counter_t _anon_rss;
5588 +
5589 +       unsigned long hiwater_rss;      /* High-watermark of RSS usage */
5590 +       unsigned long hiwater_vm;       /* High-water virtual memory usage */
5591 +
5592 +       unsigned long total_vm, locked_vm, shared_vm, exec_vm;
5593 +       unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
5594 +       unsigned long start_code, end_code, start_data, end_data;
5595 +       unsigned long start_brk, brk, start_stack;
5596 +       unsigned long arg_start, arg_end, env_start, env_end;
5597 +
5598 +       unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
5599 +
5600 +       cpumask_t cpu_vm_mask;
5601 +
5602 +       /* Architecture-specific MM context */
5603 +       mm_context_t context;
5604 +       struct vx_info *mm_vx_info;
5605 +
5606 +       /* Swap token stuff */
5607 +       /*
5608 +        * Last value of global fault stamp as seen by this process.
5609 +        * In other words, this value gives an indication of how long
5610 +        * it has been since this task got the token.
5611 +        * Look at mm/thrash.c
5612 +        */
5613 +       unsigned int faultstamp;
5614 +       unsigned int token_priority;
5615 +       unsigned int last_interval;
5616 +
5617 +       unsigned char dumpable:2;
5618 +
5619 +       /* coredumping support */
5620 +       int core_waiters;
5621 +       struct completion *core_startup_done, core_done;
5622 +
5623 +       /* aio bits */
5624 +       rwlock_t                ioctx_list_lock;
5625 +       struct kioctx           *ioctx_list;
5626 +};
5627 +
5628 +struct sighand_struct {
5629 +       atomic_t                count;
5630 +       struct k_sigaction      action[_NSIG];
5631 +       spinlock_t              siglock;
5632 +       struct list_head        signalfd_list;
5633 +};
5634 +
5635 +struct pacct_struct {
5636 +       int                     ac_flag;
5637 +       long                    ac_exitcode;
5638 +       unsigned long           ac_mem;
5639 +       cputime_t               ac_utime, ac_stime;
5640 +       unsigned long           ac_minflt, ac_majflt;
5641 +};
5642 +
5643 +/*
5644 + * NOTE! "signal_struct" does not have it's own
5645 + * locking, because a shared signal_struct always
5646 + * implies a shared sighand_struct, so locking
5647 + * sighand_struct is always a proper superset of
5648 + * the locking of signal_struct.
5649 + */
5650 +struct signal_struct {
5651 +       atomic_t                count;
5652 +       atomic_t                live;
5653 +
5654 +       wait_queue_head_t       wait_chldexit;  /* for wait4() */
5655 +
5656 +       /* current thread group signal load-balancing target: */
5657 +       struct task_struct      *curr_target;
5658 +
5659 +       /* shared signal handling: */
5660 +       struct sigpending       shared_pending;
5661 +
5662 +       /* thread group exit support */
5663 +       int                     group_exit_code;
5664 +       /* overloaded:
5665 +        * - notify group_exit_task when ->count is equal to notify_count
5666 +        * - everyone except group_exit_task is stopped during signal delivery
5667 +        *   of fatal signals, group_exit_task processes the signal.
5668 +        */
5669 +       struct task_struct      *group_exit_task;
5670 +       int                     notify_count;
5671 +
5672 +       /* thread group stop support, overloads group_exit_code too */
5673 +       int                     group_stop_count;
5674 +       unsigned int            flags; /* see SIGNAL_* flags below */
5675 +
5676 +       /* POSIX.1b Interval Timers */
5677 +       struct list_head posix_timers;
5678 +
5679 +       /* ITIMER_REAL timer for the process */
5680 +       struct hrtimer real_timer;
5681 +       struct task_struct *tsk;
5682 +       ktime_t it_real_incr;
5683 +
5684 +       /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */
5685 +       cputime_t it_prof_expires, it_virt_expires;
5686 +       cputime_t it_prof_incr, it_virt_incr;
5687 +
5688 +       /* job control IDs */
5689 +       pid_t pgrp;
5690 +       struct pid *tty_old_pgrp;
5691 +
5692 +       union {
5693 +               pid_t session __deprecated;
5694 +               pid_t __session;
5695 +       };
5696 +
5697 +       /* boolean value for session group leader */
5698 +       int leader;
5699 +
5700 +       struct tty_struct *tty; /* NULL if no tty */
5701 +
5702 +       /*
5703 +        * Cumulative resource counters for dead threads in the group,
5704 +        * and for reaped dead child processes forked by this group.
5705 +        * Live threads maintain their own counters and add to these
5706 +        * in __exit_signal, except for the group leader.
5707 +        */
5708 +       cputime_t utime, stime, cutime, cstime;
5709 +       unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
5710 +       unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
5711 +       unsigned long inblock, oublock, cinblock, coublock;
5712 +
5713 +       /*
5714 +        * Cumulative ns of scheduled CPU time for dead threads in the
5715 +        * group, not including a zombie group leader.  (This only differs
5716 +        * from jiffies_to_ns(utime + stime) if sched_clock uses something
5717 +        * other than jiffies.)
5718 +        */
5719 +       unsigned long long sched_time;
5720 +
5721 +       /*
5722 +        * We don't bother to synchronize most readers of this at all,
5723 +        * because there is no reader checking a limit that actually needs
5724 +        * to get both rlim_cur and rlim_max atomically, and either one
5725 +        * alone is a single word that can safely be read normally.
5726 +        * getrlimit/setrlimit use task_lock(current->group_leader) to
5727 +        * protect this instead of the siglock, because they really
5728 +        * have no need to disable irqs.
5729 +        */
5730 +       struct rlimit rlim[RLIM_NLIMITS];
5731 +
5732 +       struct list_head cpu_timers[3];
5733 +
5734 +       /* keep the process-shared keyrings here so that they do the right
5735 +        * thing in threads created with CLONE_THREAD */
5736 +#ifdef CONFIG_KEYS
5737 +       struct key *session_keyring;    /* keyring inherited over fork */
5738 +       struct key *process_keyring;    /* keyring private to this process */
5739 +#endif
5740 +#ifdef CONFIG_BSD_PROCESS_ACCT
5741 +       struct pacct_struct pacct;      /* per-process accounting information */
5742 +#endif
5743 +#ifdef CONFIG_TASKSTATS
5744 +       struct taskstats *stats;
5745 +#endif
5746 +};
5747 +
5748 +/* Context switch must be unlocked if interrupts are to be enabled */
5749 +#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
5750 +# define __ARCH_WANT_UNLOCKED_CTXSW
5751 +#endif
5752 +
5753 +/*
5754 + * Bits in flags field of signal_struct.
5755 + */
5756 +#define SIGNAL_STOP_STOPPED    0x00000001 /* job control stop in effect */
5757 +#define SIGNAL_STOP_DEQUEUED   0x00000002 /* stop signal dequeued */
5758 +#define SIGNAL_STOP_CONTINUED  0x00000004 /* SIGCONT since WCONTINUED reap */
5759 +#define SIGNAL_GROUP_EXIT      0x00000008 /* group exit in progress */
5760 +
5761 +
5762 +/*
5763 + * Priority of a process goes from 0..MAX_PRIO-1, valid RT
5764 + * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
5765 + * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
5766 + * values are inverted: lower p->prio value means higher priority.
5767 + *
5768 + * The MAX_USER_RT_PRIO value allows the actual maximum
5769 + * RT priority to be separate from the value exported to
5770 + * user-space.  This allows kernel threads to set their
5771 + * priority to a value higher than any user task. Note:
5772 + * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
5773 + */
5774 +
5775 +#define MAX_USER_RT_PRIO       100
5776 +#define MAX_RT_PRIO            MAX_USER_RT_PRIO
5777 +
5778 +#define MAX_PRIO               (MAX_RT_PRIO + 40)
5779 +
5780 +#define rt_prio(prio)          unlikely((prio) < MAX_RT_PRIO)
5781 +#define rt_task(p)             rt_prio((p)->prio)
5782 +#define batch_task(p)          (unlikely((p)->policy == SCHED_BATCH))
5783 +#define is_rt_policy(p)                ((p) != SCHED_NORMAL && (p) != SCHED_BATCH)
5784 +#define has_rt_policy(p)       unlikely(is_rt_policy((p)->policy))
5785 +
5786 +/*
5787 + * Some day this will be a full-fledged user tracking system..
5788 + */
5789 +struct user_struct {
5790 +       atomic_t __count;       /* reference count */
5791 +       atomic_t processes;     /* How many processes does this user have? */
5792 +       atomic_t files;         /* How many open files does this user have? */
5793 +       atomic_t sigpending;    /* How many pending signals does this user have? */
5794 +#ifdef CONFIG_INOTIFY_USER
5795 +       atomic_t inotify_watches; /* How many inotify watches does this user have? */
5796 +       atomic_t inotify_devs;  /* How many inotify devs does this user have opened? */
5797 +#endif
5798 +       /* protected by mq_lock */
5799 +       unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
5800 +       unsigned long locked_shm; /* How many pages of mlocked shm ? */
5801 +
5802 +#ifdef CONFIG_KEYS
5803 +       struct key *uid_keyring;        /* UID specific keyring */
5804 +       struct key *session_keyring;    /* UID's default session keyring */
5805 +#endif
5806 +
5807 +       /* Hash table maintenance information */
5808 +       struct list_head uidhash_list;
5809 +       uid_t uid;
5810 +       xid_t xid;
5811 +};
5812 +
5813 +extern struct user_struct *find_user(xid_t, uid_t);
5814 +
5815 +extern struct user_struct root_user;
5816 +#define INIT_USER (&root_user)
5817 +
5818 +struct backing_dev_info;
5819 +struct reclaim_state;
5820 +
5821 +#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
5822 +struct sched_info {
5823 +       /* cumulative counters */
5824 +       unsigned long   cpu_time,       /* time spent on the cpu */
5825 +                       run_delay,      /* time spent waiting on a runqueue */
5826 +                       pcnt;           /* # of timeslices run on this cpu */
5827 +
5828 +       /* timestamps */
5829 +       unsigned long   last_arrival,   /* when we last ran on a cpu */
5830 +                       last_queued;    /* when we were last queued to run */
5831 +};
5832 +#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
5833 +
5834 +#ifdef CONFIG_SCHEDSTATS
5835 +extern const struct file_operations proc_schedstat_operations;
5836 +#endif /* CONFIG_SCHEDSTATS */
5837 +
5838 +#ifdef CONFIG_TASK_DELAY_ACCT
5839 +struct task_delay_info {
5840 +       spinlock_t      lock;
5841 +       unsigned int    flags;  /* Private per-task flags */
5842 +
5843 +       /* For each stat XXX, add following, aligned appropriately
5844 +        *
5845 +        * struct timespec XXX_start, XXX_end;
5846 +        * u64 XXX_delay;
5847 +        * u32 XXX_count;
5848 +        *
5849 +        * Atomicity of updates to XXX_delay, XXX_count protected by
5850 +        * single lock above (split into XXX_lock if contention is an issue).
5851 +        */
5852 +
5853 +       /*
5854 +        * XXX_count is incremented on every XXX operation, the delay
5855 +        * associated with the operation is added to XXX_delay.
5856 +        * XXX_delay contains the accumulated delay time in nanoseconds.
5857 +        */
5858 +       struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */
5859 +       u64 blkio_delay;        /* wait for sync block io completion */
5860 +       u64 swapin_delay;       /* wait for swapin block io completion */
5861 +       u32 blkio_count;        /* total count of the number of sync block */
5862 +                               /* io operations performed */
5863 +       u32 swapin_count;       /* total count of the number of swapin block */
5864 +                               /* io operations performed */
5865 +};
5866 +#endif /* CONFIG_TASK_DELAY_ACCT */
5867 +
5868 +static inline int sched_info_on(void)
5869 +{
5870 +#ifdef CONFIG_SCHEDSTATS
5871 +       return 1;
5872 +#elif defined(CONFIG_TASK_DELAY_ACCT)
5873 +       extern int delayacct_on;
5874 +       return delayacct_on;
5875 +#else
5876 +       return 0;
5877 +#endif
5878 +}
5879 +
5880 +enum idle_type
5881 +{
5882 +       SCHED_IDLE,
5883 +       NOT_IDLE,
5884 +       NEWLY_IDLE,
5885 +       MAX_IDLE_TYPES
5886 +};
5887 +
5888 +/*
5889 + * sched-domains (multiprocessor balancing) declarations:
5890 + */
5891 +#define SCHED_LOAD_SCALE       128UL   /* increase resolution of load */
5892 +
5893 +#ifdef CONFIG_SMP
5894 +#define SD_LOAD_BALANCE                1       /* Do load balancing on this domain. */
5895 +#define SD_BALANCE_NEWIDLE     2       /* Balance when about to become idle */
5896 +#define SD_BALANCE_EXEC                4       /* Balance on exec */
5897 +#define SD_BALANCE_FORK                8       /* Balance on fork, clone */
5898 +#define SD_WAKE_IDLE           16      /* Wake to idle CPU on task wakeup */
5899 +#define SD_WAKE_AFFINE         32      /* Wake task to waking CPU */
5900 +#define SD_WAKE_BALANCE                64      /* Perform balancing at task wakeup */
5901 +#define SD_SHARE_CPUPOWER      128     /* Domain members share cpu power */
5902 +#define SD_POWERSAVINGS_BALANCE        256     /* Balance for power savings */
5903 +#define SD_SHARE_PKG_RESOURCES 512     /* Domain members share cpu pkg resources */
5904 +#define SD_SERIALIZE           1024    /* Only a single load balancing instance */
5905 +
5906 +#define BALANCE_FOR_MC_POWER   \
5907 +       (sched_smt_power_savings ? SD_POWERSAVINGS_BALANCE : 0)
5908 +
5909 +#define BALANCE_FOR_PKG_POWER  \
5910 +       ((sched_mc_power_savings || sched_smt_power_savings) ?  \
5911 +        SD_POWERSAVINGS_BALANCE : 0)
5912 +
5913 +#define test_sd_parent(sd, flag)       ((sd->parent &&         \
5914 +                                        (sd->parent->flags & flag)) ? 1 : 0)
5915 +
5916 +
5917 +struct sched_group {
5918 +       struct sched_group *next;       /* Must be a circular list */
5919 +       cpumask_t cpumask;
5920 +
5921 +       /*
5922 +        * CPU power of this group, SCHED_LOAD_SCALE being max power for a
5923 +        * single CPU. This is read only (except for setup, hotplug CPU).
5924 +        * Note : Never change cpu_power without recompute its reciprocal
5925 +        */
5926 +       unsigned int __cpu_power;
5927 +       /*
5928 +        * reciprocal value of cpu_power to avoid expensive divides
5929 +        * (see include/linux/reciprocal_div.h)
5930 +        */
5931 +       u32 reciprocal_cpu_power;
5932 +};
5933 +
5934 +struct sched_domain {
5935 +       /* These fields must be setup */
5936 +       struct sched_domain *parent;    /* top domain must be null terminated */
5937 +       struct sched_domain *child;     /* bottom domain must be null terminated */
5938 +       struct sched_group *groups;     /* the balancing groups of the domain */
5939 +       cpumask_t span;                 /* span of all CPUs in this domain */
5940 +       unsigned long min_interval;     /* Minimum balance interval ms */
5941 +       unsigned long max_interval;     /* Maximum balance interval ms */
5942 +       unsigned int busy_factor;       /* less balancing by factor if busy */
5943 +       unsigned int imbalance_pct;     /* No balance until over watermark */
5944 +       unsigned long long cache_hot_time; /* Task considered cache hot (ns) */
5945 +       unsigned int cache_nice_tries;  /* Leave cache hot tasks for # tries */
5946 +       unsigned int busy_idx;
5947 +       unsigned int idle_idx;
5948 +       unsigned int newidle_idx;
5949 +       unsigned int wake_idx;
5950 +       unsigned int forkexec_idx;
5951 +       int flags;                      /* See SD_* */
5952 +
5953 +       /* Runtime fields. */
5954 +       unsigned long last_balance;     /* init to jiffies. units in jiffies */
5955 +       unsigned int balance_interval;  /* initialise to 1. units in ms. */
5956 +       unsigned int nr_balance_failed; /* initialise to 0 */
5957 +
5958 +#ifdef CONFIG_SCHEDSTATS
5959 +       /* load_balance() stats */
5960 +       unsigned long lb_cnt[MAX_IDLE_TYPES];
5961 +       unsigned long lb_failed[MAX_IDLE_TYPES];
5962 +       unsigned long lb_balanced[MAX_IDLE_TYPES];
5963 +       unsigned long lb_imbalance[MAX_IDLE_TYPES];
5964 +       unsigned long lb_gained[MAX_IDLE_TYPES];
5965 +       unsigned long lb_hot_gained[MAX_IDLE_TYPES];
5966 +       unsigned long lb_nobusyg[MAX_IDLE_TYPES];
5967 +       unsigned long lb_nobusyq[MAX_IDLE_TYPES];
5968 +
5969 +       /* Active load balancing */
5970 +       unsigned long alb_cnt;
5971 +       unsigned long alb_failed;
5972 +       unsigned long alb_pushed;
5973 +
5974 +       /* SD_BALANCE_EXEC stats */
5975 +       unsigned long sbe_cnt;
5976 +       unsigned long sbe_balanced;
5977 +       unsigned long sbe_pushed;
5978 +
5979 +       /* SD_BALANCE_FORK stats */
5980 +       unsigned long sbf_cnt;
5981 +       unsigned long sbf_balanced;
5982 +       unsigned long sbf_pushed;
5983 +
5984 +       /* try_to_wake_up() stats */
5985 +       unsigned long ttwu_wake_remote;
5986 +       unsigned long ttwu_move_affine;
5987 +       unsigned long ttwu_move_balance;
5988 +#endif
5989 +};
5990 +
5991 +extern int partition_sched_domains(cpumask_t *partition1,
5992 +                                   cpumask_t *partition2);
5993 +
5994 +/*
5995 + * Maximum cache size the migration-costs auto-tuning code will
5996 + * search from:
5997 + */
5998 +extern unsigned int max_cache_size;
5999 +
6000 +#endif /* CONFIG_SMP */
6001 +
6002 +
6003 +struct io_context;                     /* See blkdev.h */
6004 +struct cpuset;
6005 +
6006 +#define NGROUPS_SMALL          32
6007 +#define NGROUPS_PER_BLOCK      ((int)(PAGE_SIZE / sizeof(gid_t)))
6008 +struct group_info {
6009 +       int ngroups;
6010 +       atomic_t usage;
6011 +       gid_t small_block[NGROUPS_SMALL];
6012 +       int nblocks;
6013 +       gid_t *blocks[0];
6014 +};
6015 +
6016 +/*
6017 + * get_group_info() must be called with the owning task locked (via task_lock())
6018 + * when task != current.  The reason being that the vast majority of callers are
6019 + * looking at current->group_info, which can not be changed except by the
6020 + * current task.  Changing current->group_info requires the task lock, too.
6021 + */
6022 +#define get_group_info(group_info) do { \
6023 +       atomic_inc(&(group_info)->usage); \
6024 +} while (0)
6025 +
6026 +#define put_group_info(group_info) do { \
6027 +       if (atomic_dec_and_test(&(group_info)->usage)) \
6028 +               groups_free(group_info); \
6029 +} while (0)
6030 +
6031 +extern struct group_info *groups_alloc(int gidsetsize);
6032 +extern void groups_free(struct group_info *group_info);
6033 +extern int set_current_groups(struct group_info *group_info);
6034 +extern int groups_search(struct group_info *group_info, gid_t grp);
6035 +/* access the groups "array" with this macro */
6036 +#define GROUP_AT(gi, i) \
6037 +    ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
6038 +
6039 +#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
6040 +extern void prefetch_stack(struct task_struct *t);
6041 +#else
6042 +static inline void prefetch_stack(struct task_struct *t) { }
6043 +#endif
6044 +
6045 +struct audit_context;          /* See audit.c */
6046 +struct mempolicy;
6047 +struct pipe_inode_info;
6048 +struct uts_namespace;
6049 +
6050 +enum sleep_type {
6051 +       SLEEP_NORMAL,
6052 +       SLEEP_NONINTERACTIVE,
6053 +       SLEEP_INTERACTIVE,
6054 +       SLEEP_INTERRUPTED,
6055 +};
6056 +
6057 +struct prio_array;
6058 +
6059 +struct task_struct {
6060 +       volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
6061 +       void *stack;
6062 +       atomic_t usage;
6063 +       unsigned int flags;     /* per process flags, defined below */
6064 +       unsigned int ptrace;
6065 +
6066 +       int lock_depth;         /* BKL lock depth */
6067 +
6068 +#ifdef CONFIG_SMP
6069 +#ifdef __ARCH_WANT_UNLOCKED_CTXSW
6070 +       int oncpu;
6071 +#endif
6072 +#endif
6073 +       int load_weight;        /* for niceness load balancing purposes */
6074 +       int prio, static_prio, normal_prio;
6075 +       struct list_head run_list;
6076 +       struct prio_array *array;
6077 +
6078 +       unsigned short ioprio;
6079 +#ifdef CONFIG_BLK_DEV_IO_TRACE
6080 +       unsigned int btrace_seq;
6081 +#endif
6082 +       unsigned long sleep_avg;
6083 +       unsigned long long timestamp, last_ran;
6084 +       unsigned long long sched_time; /* sched_clock time spent running */
6085 +       enum sleep_type sleep_type;
6086 +
6087 +       unsigned int policy;
6088 +       cpumask_t cpus_allowed;
6089 +       unsigned int time_slice, first_time_slice;
6090 +
6091 +#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
6092 +       struct sched_info sched_info;
6093 +#endif
6094 +
6095 +       struct list_head tasks;
6096 +       /*
6097 +        * ptrace_list/ptrace_children forms the list of my children
6098 +        * that were stolen by a ptracer.
6099 +        */
6100 +       struct list_head ptrace_children;
6101 +       struct list_head ptrace_list;
6102 +
6103 +       struct mm_struct *mm, *active_mm;
6104 +
6105 +/* task state */
6106 +       struct linux_binfmt *binfmt;
6107 +       int exit_state;
6108 +       int exit_code, exit_signal;
6109 +       int pdeath_signal;  /*  The signal sent when the parent dies  */
6110 +       /* ??? */
6111 +       unsigned int personality;
6112 +       unsigned did_exec:1;
6113 +       pid_t pid;
6114 +       pid_t tgid;
6115 +
6116 +#ifdef CONFIG_CC_STACKPROTECTOR
6117 +       /* Canary value for the -fstack-protector gcc feature */
6118 +       unsigned long stack_canary;
6119 +#endif
6120 +       /* 
6121 +        * pointers to (original) parent process, youngest child, younger sibling,
6122 +        * older sibling, respectively.  (p->father can be replaced with 
6123 +        * p->parent->pid)
6124 +        */
6125 +       struct task_struct *real_parent; /* real parent process (when being debugged) */
6126 +       struct task_struct *parent;     /* parent process */
6127 +       /*
6128 +        * children/sibling forms the list of my children plus the
6129 +        * tasks I'm ptracing.
6130 +        */
6131 +       struct list_head children;      /* list of my children */
6132 +       struct list_head sibling;       /* linkage in my parent's children list */
6133 +       struct task_struct *group_leader;       /* threadgroup leader */
6134 +
6135 +       /* PID/PID hash table linkage. */
6136 +       struct pid_link pids[PIDTYPE_MAX];
6137 +       struct list_head thread_group;
6138 +
6139 +       struct completion *vfork_done;          /* for vfork() */
6140 +       int __user *set_child_tid;              /* CLONE_CHILD_SETTID */
6141 +       int __user *clear_child_tid;            /* CLONE_CHILD_CLEARTID */
6142 +
6143 +       unsigned int rt_priority;
6144 +       cputime_t utime, stime;
6145 +       unsigned long nvcsw, nivcsw; /* context switch counts */
6146 +       struct timespec start_time;
6147 +/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
6148 +       unsigned long min_flt, maj_flt;
6149 +
6150 +       cputime_t it_prof_expires, it_virt_expires;
6151 +       unsigned long long it_sched_expires;
6152 +       struct list_head cpu_timers[3];
6153 +
6154 +/* process credentials */
6155 +       uid_t uid,euid,suid,fsuid;
6156 +       gid_t gid,egid,sgid,fsgid;
6157 +       struct group_info *group_info;
6158 +       kernel_cap_t   cap_effective, cap_inheritable, cap_permitted;
6159 +       unsigned keep_capabilities:1;
6160 +       struct user_struct *user;
6161 +#ifdef CONFIG_KEYS
6162 +       struct key *request_key_auth;   /* assumed request_key authority */
6163 +       struct key *thread_keyring;     /* keyring private to this thread */
6164 +       unsigned char jit_keyring;      /* default keyring to attach requested keys to */
6165 +#endif
6166 +       /*
6167 +        * fpu_counter contains the number of consecutive context switches
6168 +        * that the FPU is used. If this is over a threshold, the lazy fpu
6169 +        * saving becomes unlazy to save the trap. This is an unsigned char
6170 +        * so that after 256 times the counter wraps and the behavior turns
6171 +        * lazy again; this to deal with bursty apps that only use FPU for
6172 +        * a short time
6173 +        */
6174 +       unsigned char fpu_counter;
6175 +       int oomkilladj; /* OOM kill score adjustment (bit shift). */
6176 +       char comm[TASK_COMM_LEN]; /* executable name excluding path
6177 +                                    - access with [gs]et_task_comm (which lock
6178 +                                      it with task_lock())
6179 +                                    - initialized normally by flush_old_exec */
6180 +/* file system info */
6181 +       int link_count, total_link_count;
6182 +#ifdef CONFIG_SYSVIPC
6183 +/* ipc stuff */
6184 +       struct sysv_sem sysvsem;
6185 +#endif
6186 +/* CPU-specific state of this task */
6187 +       struct thread_struct thread;
6188 +/* filesystem information */
6189 +       struct fs_struct *fs;
6190 +/* open file information */
6191 +       struct files_struct *files;
6192 +/* namespaces */
6193 +       struct nsproxy *nsproxy;
6194 +/* signal handlers */
6195 +       struct signal_struct *signal;
6196 +       struct sighand_struct *sighand;
6197 +
6198 +       sigset_t blocked, real_blocked;
6199 +       sigset_t saved_sigmask;         /* To be restored with TIF_RESTORE_SIGMASK */
6200 +       struct sigpending pending;
6201 +
6202 +       unsigned long sas_ss_sp;
6203 +       size_t sas_ss_size;
6204 +       int (*notifier)(void *priv);
6205 +       void *notifier_data;
6206 +       sigset_t *notifier_mask;
6207 +       
6208 +       void *security;
6209 +       struct audit_context *audit_context;
6210 +
6211 +/* vserver context data */
6212 +       struct vx_info *vx_info;
6213 +       struct nx_info *nx_info;
6214 +
6215 +       xid_t xid;
6216 +       nid_t nid;
6217 +       tag_t tag;
6218 +
6219 +       seccomp_t seccomp;
6220 +
6221 +/* Thread group tracking */
6222 +       u32 parent_exec_id;
6223 +       u32 self_exec_id;
6224 +/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
6225 +       spinlock_t alloc_lock;
6226 +
6227 +       /* Protection of the PI data structures: */
6228 +       spinlock_t pi_lock;
6229 +
6230 +#ifdef CONFIG_RT_MUTEXES
6231 +       /* PI waiters blocked on a rt_mutex held by this task */
6232 +       struct plist_head pi_waiters;
6233 +       /* Deadlock detection and priority inheritance handling */
6234 +       struct rt_mutex_waiter *pi_blocked_on;
6235 +#endif
6236 +
6237 +#ifdef CONFIG_DEBUG_MUTEXES
6238 +       /* mutex deadlock detection */
6239 +       struct mutex_waiter *blocked_on;
6240 +#endif
6241 +#ifdef CONFIG_TRACE_IRQFLAGS
6242 +       unsigned int irq_events;
6243 +       int hardirqs_enabled;
6244 +       unsigned long hardirq_enable_ip;
6245 +       unsigned int hardirq_enable_event;
6246 +       unsigned long hardirq_disable_ip;
6247 +       unsigned int hardirq_disable_event;
6248 +       int softirqs_enabled;
6249 +       unsigned long softirq_disable_ip;
6250 +       unsigned int softirq_disable_event;
6251 +       unsigned long softirq_enable_ip;
6252 +       unsigned int softirq_enable_event;
6253 +       int hardirq_context;
6254 +       int softirq_context;
6255 +#endif
6256 +#ifdef CONFIG_LOCKDEP
6257 +# define MAX_LOCK_DEPTH 30UL
6258 +       u64 curr_chain_key;
6259 +       int lockdep_depth;
6260 +       struct held_lock held_locks[MAX_LOCK_DEPTH];
6261 +       unsigned int lockdep_recursion;
6262 +#endif
6263 +
6264 +/* journalling filesystem info */
6265 +       void *journal_info;
6266 +
6267 +/* stacked block device info */
6268 +       struct bio *bio_list, **bio_tail;
6269 +
6270 +/* VM state */
6271 +       struct reclaim_state *reclaim_state;
6272 +
6273 +       struct backing_dev_info *backing_dev_info;
6274 +
6275 +       struct io_context *io_context;
6276 +
6277 +       unsigned long ptrace_message;
6278 +       siginfo_t *last_siginfo; /* For ptrace use.  */
6279 +/*
6280 + * current io wait handle: wait queue entry to use for io waits
6281 + * If this thread is processing aio, this points at the waitqueue
6282 + * inside the currently handled kiocb. It may be NULL (i.e. default
6283 + * to a stack based synchronous wait) if its doing sync IO.
6284 + */
6285 +       wait_queue_t *io_wait;
6286 +#ifdef CONFIG_TASK_XACCT
6287 +/* i/o counters(bytes read/written, #syscalls */
6288 +       u64 rchar, wchar, syscr, syscw;
6289 +#endif
6290 +       struct task_io_accounting ioac;
6291 +#if defined(CONFIG_TASK_XACCT)
6292 +       u64 acct_rss_mem1;      /* accumulated rss usage */
6293 +       u64 acct_vm_mem1;       /* accumulated virtual memory usage */
6294 +       cputime_t acct_stimexpd;/* stime since last update */
6295 +#endif
6296 +#ifdef CONFIG_NUMA
6297 +       struct mempolicy *mempolicy;
6298 +       short il_next;
6299 +#endif
6300 +#ifdef CONFIG_CPUSETS
6301 +       struct cpuset *cpuset;
6302 +       nodemask_t mems_allowed;
6303 +       int cpuset_mems_generation;
6304 +       int cpuset_mem_spread_rotor;
6305 +#endif
6306 +       struct robust_list_head __user *robust_list;
6307 +#ifdef CONFIG_COMPAT
6308 +       struct compat_robust_list_head __user *compat_robust_list;
6309 +#endif
6310 +       struct list_head pi_state_list;
6311 +       struct futex_pi_state *pi_state_cache;
6312 +
6313 +       atomic_t fs_excl;       /* holding fs exclusive resources */
6314 +       struct rcu_head rcu;
6315 +
6316 +       /*
6317 +        * cache last used pipe for splice
6318 +        */
6319 +       struct pipe_inode_info *splice_pipe;
6320 +#ifdef CONFIG_TASK_DELAY_ACCT
6321 +       struct task_delay_info *delays;
6322 +#endif
6323 +#ifdef CONFIG_FAULT_INJECTION
6324 +       int make_it_fail;
6325 +#endif
6326 +};
6327 +
6328 +static inline pid_t process_group(struct task_struct *tsk)
6329 +{
6330 +       return tsk->signal->pgrp;
6331 +}
6332 +
6333 +static inline pid_t signal_session(struct signal_struct *sig)
6334 +{
6335 +       return sig->__session;
6336 +}
6337 +
6338 +static inline pid_t process_session(struct task_struct *tsk)
6339 +{
6340 +       return signal_session(tsk->signal);
6341 +}
6342 +
6343 +static inline void set_signal_session(struct signal_struct *sig, pid_t session)
6344 +{
6345 +       sig->__session = session;
6346 +}
6347 +
6348 +static inline struct pid *task_pid(struct task_struct *task)
6349 +{
6350 +       return task->pids[PIDTYPE_PID].pid;
6351 +}
6352 +
6353 +static inline struct pid *task_tgid(struct task_struct *task)
6354 +{
6355 +       return task->group_leader->pids[PIDTYPE_PID].pid;
6356 +}
6357 +
6358 +static inline struct pid *task_pgrp(struct task_struct *task)
6359 +{
6360 +       return task->group_leader->pids[PIDTYPE_PGID].pid;
6361 +}
6362 +
6363 +static inline struct pid *task_session(struct task_struct *task)
6364 +{
6365 +       return task->group_leader->pids[PIDTYPE_SID].pid;
6366 +}
6367 +
6368 +/**
6369 + * pid_alive - check that a task structure is not stale
6370 + * @p: Task structure to be checked.
6371 + *
6372 + * Test if a process is not yet dead (at most zombie state)
6373 + * If pid_alive fails, then pointers within the task structure
6374 + * can be stale and must not be dereferenced.
6375 + */
6376 +static inline int pid_alive(struct task_struct *p)
6377 +{
6378 +       return p->pids[PIDTYPE_PID].pid != NULL;
6379 +}
6380 +
6381 +/**
6382 + * is_init - check if a task structure is init
6383 + * @tsk: Task structure to be checked.
6384 + *
6385 + * Check if a task structure is the first user space task the kernel created.
6386 + */
6387 +static inline int is_init(struct task_struct *tsk)
6388 +{
6389 +       return tsk->pid == 1;
6390 +}
6391 +
6392 +extern struct pid *cad_pid;
6393 +
6394 +extern void free_task(struct task_struct *tsk);
6395 +#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
6396 +
6397 +extern void __put_task_struct(struct task_struct *t);
6398 +
6399 +static inline void put_task_struct(struct task_struct *t)
6400 +{
6401 +       if (atomic_dec_and_test(&t->usage))
6402 +               __put_task_struct(t);
6403 +}
6404 +
6405 +/*
6406 + * Per process flags
6407 + */
6408 +#define PF_ALIGNWARN   0x00000001      /* Print alignment warning msgs */
6409 +                                       /* Not implemented yet, only for 486*/
6410 +#define PF_STARTING    0x00000002      /* being created */
6411 +#define PF_EXITING     0x00000004      /* getting shut down */
6412 +#define PF_EXITPIDONE  0x00000008      /* pi exit done on shut down */
6413 +#define PF_FORKNOEXEC  0x00000040      /* forked but didn't exec */
6414 +#define PF_SUPERPRIV   0x00000100      /* used super-user privileges */
6415 +#define PF_DUMPCORE    0x00000200      /* dumped core */
6416 +#define PF_SIGNALED    0x00000400      /* killed by a signal */
6417 +#define PF_MEMALLOC    0x00000800      /* Allocating memory */
6418 +#define PF_FLUSHER     0x00001000      /* responsible for disk writeback */
6419 +#define PF_USED_MATH   0x00002000      /* if unset the fpu must be initialized before use */
6420 +#define PF_NOFREEZE    0x00008000      /* this thread should not be frozen */
6421 +#define PF_FROZEN      0x00010000      /* frozen for system suspend */
6422 +#define PF_FSTRANS     0x00020000      /* inside a filesystem transaction */
6423 +#define PF_KSWAPD      0x00040000      /* I am kswapd */
6424 +#define PF_SWAPOFF     0x00080000      /* I am in swapoff */
6425 +#define PF_LESS_THROTTLE 0x00100000    /* Throttle me less: I clean memory */
6426 +#define PF_BORROWED_MM 0x00200000      /* I am a kthread doing use_mm */
6427 +#define PF_RANDOMIZE   0x00400000      /* randomize virtual address space */
6428 +#define PF_SWAPWRITE   0x00800000      /* Allowed to write to swap */
6429 +#define PF_SPREAD_PAGE 0x01000000      /* Spread page cache over cpuset */
6430 +#define PF_SPREAD_SLAB 0x02000000      /* Spread some slab caches over cpuset */
6431 +#define PF_MEMPOLICY   0x10000000      /* Non-default NUMA mempolicy */
6432 +#define PF_MUTEX_TESTER        0x20000000      /* Thread belongs to the rt mutex tester */
6433 +#define PF_FREEZER_SKIP        0x40000000      /* Freezer should not count it as freezeable */
6434 +
6435 +/*
6436 + * Only the _current_ task can read/write to tsk->flags, but other
6437 + * tasks can access tsk->flags in readonly mode for example
6438 + * with tsk_used_math (like during threaded core dumping).
6439 + * There is however an exception to this rule during ptrace
6440 + * or during fork: the ptracer task is allowed to write to the
6441 + * child->flags of its traced child (same goes for fork, the parent
6442 + * can write to the child->flags), because we're guaranteed the
6443 + * child is not running and in turn not changing child->flags
6444 + * at the same time the parent does it.
6445 + */
6446 +#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
6447 +#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
6448 +#define clear_used_math() clear_stopped_child_used_math(current)
6449 +#define set_used_math() set_stopped_child_used_math(current)
6450 +#define conditional_stopped_child_used_math(condition, child) \
6451 +       do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
6452 +#define conditional_used_math(condition) \
6453 +       conditional_stopped_child_used_math(condition, current)
6454 +#define copy_to_stopped_child_used_math(child) \
6455 +       do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
6456 +/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
6457 +#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
6458 +#define used_math() tsk_used_math(current)
6459 +
6460 +#ifdef CONFIG_SMP
6461 +extern int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask);
6462 +#else
6463 +static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
6464 +{
6465 +       if (!cpu_isset(0, new_mask))
6466 +               return -EINVAL;
6467 +       return 0;
6468 +}
6469 +#endif
6470 +
6471 +extern unsigned long long sched_clock(void);
6472 +extern unsigned long long
6473 +current_sched_time(const struct task_struct *current_task);
6474 +
6475 +/* sched_exec is called by processes performing an exec */
6476 +#ifdef CONFIG_SMP
6477 +extern void sched_exec(void);
6478 +#else
6479 +#define sched_exec()   {}
6480 +#endif
6481 +
6482 +#ifdef CONFIG_HOTPLUG_CPU
6483 +extern void idle_task_exit(void);
6484 +#else
6485 +static inline void idle_task_exit(void) {}
6486 +#endif
6487 +
6488 +extern void sched_idle_next(void);
6489 +
6490 +#ifdef CONFIG_RT_MUTEXES
6491 +extern int rt_mutex_getprio(struct task_struct *p);
6492 +extern void rt_mutex_setprio(struct task_struct *p, int prio);
6493 +extern void rt_mutex_adjust_pi(struct task_struct *p);
6494 +#else
6495 +static inline int rt_mutex_getprio(struct task_struct *p)
6496 +{
6497 +       return p->normal_prio;
6498 +}
6499 +# define rt_mutex_adjust_pi(p)         do { } while (0)
6500 +#endif
6501 +
6502 +extern void set_user_nice(struct task_struct *p, long nice);
6503 +extern int task_prio(const struct task_struct *p);
6504 +extern int task_nice(const struct task_struct *p);
6505 +extern int can_nice(const struct task_struct *p, const int nice);
6506 +extern int task_curr(const struct task_struct *p);
6507 +extern int idle_cpu(int cpu);
6508 +extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
6509 +extern struct task_struct *idle_task(int cpu);
6510 +extern struct task_struct *curr_task(int cpu);
6511 +extern void set_curr_task(int cpu, struct task_struct *p);
6512 +
6513 +void yield(void);
6514 +
6515 +/*
6516 + * The default (Linux) execution domain.
6517 + */
6518 +extern struct exec_domain      default_exec_domain;
6519 +
6520 +union thread_union {
6521 +       struct thread_info thread_info;
6522 +       unsigned long stack[THREAD_SIZE/sizeof(long)];
6523 +};
6524 +
6525 +#ifndef __HAVE_ARCH_KSTACK_END
6526 +static inline int kstack_end(void *addr)
6527 +{
6528 +       /* Reliable end of stack detection:
6529 +        * Some APM bios versions misalign the stack
6530 +        */
6531 +       return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
6532 +}
6533 +#endif
6534 +
6535 +extern union thread_union init_thread_union;
6536 +extern struct task_struct init_task;
6537 +
6538 +extern struct   mm_struct init_mm;
6539 +
6540 +#define find_task_by_real_pid(nr) \
6541 +       find_task_by_pid_type(PIDTYPE_REALPID, nr)
6542 +#define find_task_by_pid(nr) \
6543 +       find_task_by_pid_type(PIDTYPE_PID, nr)
6544 +
6545 +extern struct task_struct *find_task_by_pid_type(int type, int pid);
6546 +extern void __set_special_pids(pid_t session, pid_t pgrp);
6547 +
6548 +/* per-UID process charging. */
6549 +extern struct user_struct * alloc_uid(xid_t, uid_t);
6550 +static inline struct user_struct *get_uid(struct user_struct *u)
6551 +{
6552 +       atomic_inc(&u->__count);
6553 +       return u;
6554 +}
6555 +extern void free_uid(struct user_struct *);
6556 +extern void switch_uid(struct user_struct *);
6557 +
6558 +#include <asm/current.h>
6559 +
6560 +extern void do_timer(unsigned long ticks);
6561 +
6562 +extern int FASTCALL(wake_up_state(struct task_struct * tsk, unsigned int state));
6563 +extern int FASTCALL(wake_up_process(struct task_struct * tsk));
6564 +extern void FASTCALL(wake_up_new_task(struct task_struct * tsk,
6565 +                                               unsigned long clone_flags));
6566 +#ifdef CONFIG_SMP
6567 + extern void kick_process(struct task_struct *tsk);
6568 +#else
6569 + static inline void kick_process(struct task_struct *tsk) { }
6570 +#endif
6571 +extern void FASTCALL(sched_fork(struct task_struct * p, int clone_flags));
6572 +extern void FASTCALL(sched_exit(struct task_struct * p));
6573 +
6574 +extern int in_group_p(gid_t);
6575 +extern int in_egroup_p(gid_t);
6576 +
6577 +extern void proc_caches_init(void);
6578 +extern void flush_signals(struct task_struct *);
6579 +extern void ignore_signals(struct task_struct *);
6580 +extern void flush_signal_handlers(struct task_struct *, int force_default);
6581 +extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
6582 +
6583 +static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
6584 +{
6585 +       unsigned long flags;
6586 +       int ret;
6587 +
6588 +       spin_lock_irqsave(&tsk->sighand->siglock, flags);
6589 +       ret = dequeue_signal(tsk, mask, info);
6590 +       spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
6591 +
6592 +       return ret;
6593 +}      
6594 +
6595 +extern void block_all_signals(int (*notifier)(void *priv), void *priv,
6596 +                             sigset_t *mask);
6597 +extern void unblock_all_signals(void);
6598 +extern void release_task(struct task_struct * p);
6599 +extern int send_sig_info(int, struct siginfo *, struct task_struct *);
6600 +extern int send_group_sig_info(int, struct siginfo *, struct task_struct *);
6601 +extern int force_sigsegv(int, struct task_struct *);
6602 +extern int force_sig_info(int, struct siginfo *, struct task_struct *);
6603 +extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
6604 +extern int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
6605 +extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
6606 +extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
6607 +extern int kill_pgrp(struct pid *pid, int sig, int priv);
6608 +extern int kill_pid(struct pid *pid, int sig, int priv);
6609 +extern int kill_proc_info(int, struct siginfo *, pid_t);
6610 +extern void do_notify_parent(struct task_struct *, int);
6611 +extern void force_sig(int, struct task_struct *);
6612 +extern void force_sig_specific(int, struct task_struct *);
6613 +extern int send_sig(int, struct task_struct *, int);
6614 +extern void zap_other_threads(struct task_struct *p);
6615 +extern int kill_proc(pid_t, int, int);
6616 +extern struct sigqueue *sigqueue_alloc(void);
6617 +extern void sigqueue_free(struct sigqueue *);
6618 +extern int send_sigqueue(int, struct sigqueue *,  struct task_struct *);
6619 +extern int send_group_sigqueue(int, struct sigqueue *,  struct task_struct *);
6620 +extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
6621 +extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
6622 +
6623 +static inline int kill_cad_pid(int sig, int priv)
6624 +{
6625 +       return kill_pid(cad_pid, sig, priv);
6626 +}
6627 +
6628 +/* These can be the second arg to send_sig_info/send_group_sig_info.  */
6629 +#define SEND_SIG_NOINFO ((struct siginfo *) 0)
6630 +#define SEND_SIG_PRIV  ((struct siginfo *) 1)
6631 +#define SEND_SIG_FORCED        ((struct siginfo *) 2)
6632 +
6633 +static inline int is_si_special(const struct siginfo *info)
6634 +{
6635 +       return info <= SEND_SIG_FORCED;
6636 +}
6637 +
6638 +/* True if we are on the alternate signal stack.  */
6639 +
6640 +static inline int on_sig_stack(unsigned long sp)
6641 +{
6642 +       return (sp - current->sas_ss_sp < current->sas_ss_size);
6643 +}
6644 +
6645 +static inline int sas_ss_flags(unsigned long sp)
6646 +{
6647 +       return (current->sas_ss_size == 0 ? SS_DISABLE
6648 +               : on_sig_stack(sp) ? SS_ONSTACK : 0);
6649 +}
6650 +
6651 +/*
6652 + * Routines for handling mm_structs
6653 + */
6654 +extern struct mm_struct * mm_alloc(void);
6655 +
6656 +/* mmdrop drops the mm and the page tables */
6657 +extern void FASTCALL(__mmdrop(struct mm_struct *));
6658 +static inline void mmdrop(struct mm_struct * mm)
6659 +{
6660 +       if (atomic_dec_and_test(&mm->mm_count))
6661 +               __mmdrop(mm);
6662 +}
6663 +
6664 +/* mmput gets rid of the mappings and all user-space */
6665 +extern void mmput(struct mm_struct *);
6666 +/* Grab a reference to a task's mm, if it is not already going away */
6667 +extern struct mm_struct *get_task_mm(struct task_struct *task);
6668 +/* Remove the current tasks stale references to the old mm_struct */
6669 +extern void mm_release(struct task_struct *, struct mm_struct *);
6670 +
6671 +extern int  copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
6672 +extern void flush_thread(void);
6673 +extern void exit_thread(void);
6674 +
6675 +extern void exit_files(struct task_struct *);
6676 +extern void __cleanup_signal(struct signal_struct *);
6677 +extern void __cleanup_sighand(struct sighand_struct *);
6678 +extern void exit_itimers(struct signal_struct *);
6679 +
6680 +extern NORET_TYPE void do_group_exit(int);
6681 +
6682 +extern void daemonize(const char *, ...);
6683 +extern int allow_signal(int);
6684 +extern int disallow_signal(int);
6685 +
6686 +extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
6687 +extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
6688 +struct task_struct *fork_idle(int);
6689 +
6690 +extern void set_task_comm(struct task_struct *tsk, char *from);
6691 +extern void get_task_comm(char *to, struct task_struct *tsk);
6692 +
6693 +#ifdef CONFIG_SMP
6694 +extern void wait_task_inactive(struct task_struct * p);
6695 +#else
6696 +#define wait_task_inactive(p)  do { } while (0)
6697 +#endif
6698 +
6699 +#define remove_parent(p)       list_del_init(&(p)->sibling)
6700 +#define add_parent(p)          list_add_tail(&(p)->sibling,&(p)->parent->children)
6701 +
6702 +#define next_task(p)   list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
6703 +
6704 +#define for_each_process(p) \
6705 +       for (p = &init_task ; (p = next_task(p)) != &init_task ; )
6706 +
6707 +/*
6708 + * Careful: do_each_thread/while_each_thread is a double loop so
6709 + *          'break' will not work as expected - use goto instead.
6710 + */
6711 +#define do_each_thread(g, t) \
6712 +       for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
6713 +
6714 +#define while_each_thread(g, t) \
6715 +       while ((t = next_thread(t)) != g)
6716 +
6717 +/* de_thread depends on thread_group_leader not being a pid based check */
6718 +#define thread_group_leader(p) (p == p->group_leader)
6719 +
6720 +/* Do to the insanities of de_thread it is possible for a process
6721 + * to have the pid of the thread group leader without actually being
6722 + * the thread group leader.  For iteration through the pids in proc
6723 + * all we care about is that we have a task with the appropriate
6724 + * pid, we don't actually care if we have the right task.
6725 + */
6726 +static inline int has_group_leader_pid(struct task_struct *p)
6727 +{
6728 +       return p->pid == p->tgid;
6729 +}
6730 +
6731 +static inline struct task_struct *next_thread(const struct task_struct *p)
6732 +{
6733 +       return list_entry(rcu_dereference(p->thread_group.next),
6734 +                         struct task_struct, thread_group);
6735 +}
6736 +
6737 +static inline int thread_group_empty(struct task_struct *p)
6738 +{
6739 +       return list_empty(&p->thread_group);
6740 +}
6741 +
6742 +#define delay_group_leader(p) \
6743 +               (thread_group_leader(p) && !thread_group_empty(p))
6744 +
6745 +/*
6746 + * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
6747 + * subscriptions and synchronises with wait4().  Also used in procfs.  Also
6748 + * pins the final release of task.io_context.  Also protects ->cpuset.
6749 + *
6750 + * Nests both inside and outside of read_lock(&tasklist_lock).
6751 + * It must not be nested with write_lock_irq(&tasklist_lock),
6752 + * neither inside nor outside.
6753 + */
6754 +static inline void task_lock(struct task_struct *p)
6755 +{
6756 +       spin_lock(&p->alloc_lock);
6757 +}
6758 +
6759 +static inline void task_unlock(struct task_struct *p)
6760 +{
6761 +       spin_unlock(&p->alloc_lock);
6762 +}
6763 +
6764 +extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
6765 +                                                       unsigned long *flags);
6766 +
6767 +static inline void unlock_task_sighand(struct task_struct *tsk,
6768 +                                               unsigned long *flags)
6769 +{
6770 +       spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
6771 +}
6772 +
6773 +#ifndef __HAVE_THREAD_FUNCTIONS
6774 +
6775 +#define task_thread_info(task) ((struct thread_info *)(task)->stack)
6776 +#define task_stack_page(task)  ((task)->stack)
6777 +
6778 +static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
6779 +{
6780 +       *task_thread_info(p) = *task_thread_info(org);
6781 +       task_thread_info(p)->task = p;
6782 +}
6783 +
6784 +static inline unsigned long *end_of_stack(struct task_struct *p)
6785 +{
6786 +       return (unsigned long *)(task_thread_info(p) + 1);
6787 +}
6788 +
6789 +#endif
6790 +
6791 +/* set thread flags in other task's structures
6792 + * - see asm/thread_info.h for TIF_xxxx flags available
6793 + */
6794 +static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
6795 +{
6796 +       set_ti_thread_flag(task_thread_info(tsk), flag);
6797 +}
6798 +
6799 +static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
6800 +{
6801 +       clear_ti_thread_flag(task_thread_info(tsk), flag);
6802 +}
6803 +
6804 +static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
6805 +{
6806 +       return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
6807 +}
6808 +
6809 +static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
6810 +{
6811 +       return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
6812 +}
6813 +
6814 +static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
6815 +{
6816 +       return test_ti_thread_flag(task_thread_info(tsk), flag);
6817 +}
6818 +
6819 +static inline void set_tsk_need_resched(struct task_struct *tsk)
6820 +{
6821 +       set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
6822 +}
6823 +
6824 +static inline void clear_tsk_need_resched(struct task_struct *tsk)
6825 +{
6826 +       clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
6827 +}
6828 +
6829 +static inline int signal_pending(struct task_struct *p)
6830 +{
6831 +       return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
6832 +}
6833 +  
6834 +static inline int need_resched(void)
6835 +{
6836 +       return unlikely(test_thread_flag(TIF_NEED_RESCHED));
6837 +}
6838 +
6839 +/*
6840 + * cond_resched() and cond_resched_lock(): latency reduction via
6841 + * explicit rescheduling in places that are safe. The return
6842 + * value indicates whether a reschedule was done in fact.
6843 + * cond_resched_lock() will drop the spinlock before scheduling,
6844 + * cond_resched_softirq() will enable bhs before scheduling.
6845 + */
6846 +extern int cond_resched(void);
6847 +extern int cond_resched_lock(spinlock_t * lock);
6848 +extern int cond_resched_softirq(void);
6849 +
6850 +/*
6851 + * Does a critical section need to be broken due to another
6852 + * task waiting?:
6853 + */
6854 +#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP)
6855 +# define need_lockbreak(lock) ((lock)->break_lock)
6856 +#else
6857 +# define need_lockbreak(lock) 0
6858 +#endif
6859 +
6860 +/*
6861 + * Does a critical section need to be broken due to another
6862 + * task waiting or preemption being signalled:
6863 + */
6864 +static inline int lock_need_resched(spinlock_t *lock)
6865 +{
6866 +       if (need_lockbreak(lock) || need_resched())
6867 +               return 1;
6868 +       return 0;
6869 +}
6870 +
6871 +/*
6872 + * Reevaluate whether the task has signals pending delivery.
6873 + * Wake the task if so.
6874 + * This is required every time the blocked sigset_t changes.
6875 + * callers must hold sighand->siglock.
6876 + */
6877 +extern void recalc_sigpending_and_wake(struct task_struct *t);
6878 +extern void recalc_sigpending(void);
6879 +
6880 +extern void signal_wake_up(struct task_struct *t, int resume_stopped);
6881 +
6882 +/*
6883 + * Wrappers for p->thread_info->cpu access. No-op on UP.
6884 + */
6885 +#ifdef CONFIG_SMP
6886 +
6887 +static inline unsigned int task_cpu(const struct task_struct *p)
6888 +{
6889 +       return task_thread_info(p)->cpu;
6890 +}
6891 +
6892 +static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
6893 +{
6894 +       task_thread_info(p)->cpu = cpu;
6895 +}
6896 +
6897 +#else
6898 +
6899 +static inline unsigned int task_cpu(const struct task_struct *p)
6900 +{
6901 +       return 0;
6902 +}
6903 +
6904 +static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
6905 +{
6906 +}
6907 +
6908 +#endif /* CONFIG_SMP */
6909 +
6910 +#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
6911 +extern void arch_pick_mmap_layout(struct mm_struct *mm);
6912 +#else
6913 +static inline void arch_pick_mmap_layout(struct mm_struct *mm)
6914 +{
6915 +       mm->mmap_base = TASK_UNMAPPED_BASE;
6916 +       mm->get_unmapped_area = arch_get_unmapped_area;
6917 +       mm->unmap_area = arch_unmap_area;
6918 +}
6919 +#endif
6920 +
6921 +extern long sched_setaffinity(pid_t pid, cpumask_t new_mask);
6922 +extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
6923 +
6924 +extern int sched_mc_power_savings, sched_smt_power_savings;
6925 +
6926 +extern void normalize_rt_tasks(void);
6927 +
6928 +#ifdef CONFIG_TASK_XACCT
6929 +static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
6930 +{
6931 +       tsk->rchar += amt;
6932 +}
6933 +
6934 +static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
6935 +{
6936 +       tsk->wchar += amt;
6937 +}
6938 +
6939 +static inline void inc_syscr(struct task_struct *tsk)
6940 +{
6941 +       tsk->syscr++;
6942 +}
6943 +
6944 +static inline void inc_syscw(struct task_struct *tsk)
6945 +{
6946 +       tsk->syscw++;
6947 +}
6948 +#else
6949 +static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
6950 +{
6951 +}
6952 +
6953 +static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
6954 +{
6955 +}
6956 +
6957 +static inline void inc_syscr(struct task_struct *tsk)
6958 +{
6959 +}
6960 +
6961 +static inline void inc_syscw(struct task_struct *tsk)
6962 +{
6963 +}
6964 +#endif
6965 +
6966 +#endif /* __KERNEL__ */
6967 +
6968 +#endif
6969 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/kernel/mutex.c linux-2.6.22-590/kernel/mutex.c
6970 --- linux-2.6.22-580/kernel/mutex.c     2007-07-08 19:32:17.000000000 -0400
6971 +++ linux-2.6.22-590/kernel/mutex.c     2009-02-18 09:57:23.000000000 -0500
6972 @@ -18,6 +18,17 @@
6973  #include <linux/spinlock.h>
6974  #include <linux/interrupt.h>
6975  #include <linux/debug_locks.h>
6976 +#include <linux/arrays.h>
6977 +
6978 +#undef CONFIG_CHOPSTIX
6979 +#ifdef CONFIG_CHOPSTIX
6980 +struct event_spec {
6981 +       unsigned long pc;
6982 +       unsigned long dcookie;
6983 +       unsigned count;
6984 +       unsigned char reason;
6985 +};
6986 +#endif
6987  
6988  /*
6989   * In the DEBUG case we are using the "NULL fastpath" for mutexes,
6990 @@ -43,6 +54,9 @@
6991  __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
6992  {
6993         atomic_set(&lock->count, 1);
6994 +#ifdef CONFIG_CHOPSTIX
6995 +       lock->owner=NULL;
6996 +#endif
6997         spin_lock_init(&lock->wait_lock);
6998         INIT_LIST_HEAD(&lock->wait_list);
6999  
7000 @@ -88,6 +102,7 @@
7001          * The locking fastpath is the 1->0 transition from
7002          * 'unlocked' into 'locked' state.
7003          */
7004 +
7005         __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
7006  }
7007  
7008 @@ -168,6 +183,27 @@
7009                 }
7010                 __set_task_state(task, state);
7011  
7012 +#ifdef CONFIG_CHOPSTIX
7013 +               if (rec_event) {
7014 +                       if (lock->owner) {
7015 +                               struct event event;
7016 +                               struct event_spec espec;
7017 +                               struct task_struct *p = lock->owner->task;
7018 +                               /*spin_lock(&p->alloc_lock);*/
7019 +                               espec.reason = 0; /* lock */
7020 +                               event.event_data=&espec;
7021 +                               event.task = p;
7022 +                               espec.pc=lock;
7023 +                               event.event_type=5; 
7024 +                               (*rec_event)(&event, 1);
7025 +                               /*spin_unlock(&p->alloc_lock);*/
7026 +
7027 +                       }
7028 +                       else 
7029 +                               BUG();
7030 +               }
7031 +#endif
7032 +
7033                 /* didnt get the lock, go to sleep: */
7034                 spin_unlock_mutex(&lock->wait_lock, flags);
7035                 schedule();
7036 @@ -177,6 +213,9 @@
7037         /* got the lock - rejoice! */
7038         mutex_remove_waiter(lock, &waiter, task_thread_info(task));
7039         debug_mutex_set_owner(lock, task_thread_info(task));
7040 +#ifdef CONFIG_CHOPSTIX
7041 +       lock->owner = task_thread_info(task);
7042 +#endif
7043  
7044         /* set it to 0 if there are no waiters left: */
7045         if (likely(list_empty(&lock->wait_list)))
7046 @@ -202,6 +241,7 @@
7047  mutex_lock_nested(struct mutex *lock, unsigned int subclass)
7048  {
7049         might_sleep();
7050 +
7051         __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
7052  }
7053  
7054 @@ -211,6 +251,7 @@
7055  mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
7056  {
7057         might_sleep();
7058 +
7059         return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass);
7060  }
7061  
7062 @@ -246,6 +287,23 @@
7063  
7064                 debug_mutex_wake_waiter(lock, waiter);
7065  
7066 +#ifdef CONFIG_CHOPSTIX
7067 +               if (rec_event) {
7068 +                       if (lock->owner) {
7069 +                               struct event event;
7070 +                               struct event_spec espec;
7071 +
7072 +                               espec.reason = 1; /* unlock */
7073 +                               event.event_data=&espec;
7074 +                               event.task = lock->owner->task;
7075 +                               espec.pc=lock;
7076 +                               event.event_type=5; 
7077 +                               (*rec_event)(&event, 1);
7078 +                       }
7079 +                       else 
7080 +                               BUG();
7081 +               }
7082 +#endif
7083                 wake_up_process(waiter->task);
7084         }
7085  
7086 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/kernel/sched.c linux-2.6.22-590/kernel/sched.c
7087 --- linux-2.6.22-580/kernel/sched.c     2009-02-18 09:56:02.000000000 -0500
7088 +++ linux-2.6.22-590/kernel/sched.c     2009-02-18 09:57:23.000000000 -0500
7089 @@ -10,7 +10,7 @@
7090   *  1998-11-19 Implemented schedule_timeout() and related stuff
7091   *             by Andrea Arcangeli
7092   *  2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
7093 - *             hybrid priority-list and round-robin design with
7094 + *             hybrid priority-list and round-robin deventn with
7095   *             an array-switch method of distributing timeslices
7096   *             and per-CPU runqueues.  Cleanups and useful suggestions
7097   *             by Davide Libenzi, preemptible kernel bits by Robert Love.
7098 @@ -23,6 +23,7 @@
7099  #include <linux/nmi.h>
7100  #include <linux/init.h>
7101  #include <asm/uaccess.h>
7102 +#include <linux/arrays.h>
7103  #include <linux/highmem.h>
7104  #include <linux/smp_lock.h>
7105  #include <asm/mmu_context.h>
7106 @@ -59,6 +60,9 @@
7107  #include <linux/vs_sched.h>
7108  #include <linux/vs_cvirt.h>
7109  
7110 +#define INTERRUPTIBLE   -1
7111 +#define RUNNING         0
7112 +
7113  /*
7114   * Scheduler clock - returns current time in nanosec units.
7115   * This is default implementation.
7116 @@ -431,6 +435,7 @@
7117  
7118  repeat_lock_task:
7119         rq = task_rq(p);
7120 +
7121         spin_lock(&rq->lock);
7122         if (unlikely(rq != task_rq(p))) {
7123                 spin_unlock(&rq->lock);
7124 @@ -1741,6 +1746,21 @@
7125          * event cannot wake it up and insert it on the runqueue either.
7126          */
7127         p->state = TASK_RUNNING;
7128 +#ifdef CONFIG_CHOPSTIX
7129 +    /* The jiffy of last interruption */
7130 +    if (p->state & TASK_UNINTERRUPTIBLE) {
7131 +                               p->last_interrupted=jiffies;
7132 +       }
7133 +    else
7134 +    if (p->state & TASK_INTERRUPTIBLE) {
7135 +                               p->last_interrupted=INTERRUPTIBLE;
7136 +       }
7137 +    else
7138 +           p->last_interrupted=RUNNING;
7139 +
7140 +    /* The jiffy of last execution */ 
7141 +       p->last_ran_j=jiffies;
7142 +#endif
7143  
7144         /*
7145          * Make sure we do not leak PI boosting priority to the child:
7146 @@ -3608,6 +3628,7 @@
7147  
7148  #endif
7149  
7150 +
7151  static inline int interactive_sleep(enum sleep_type sleep_type)
7152  {
7153         return (sleep_type == SLEEP_INTERACTIVE ||
7154 @@ -3617,16 +3638,28 @@
7155  /*
7156   * schedule() is the main scheduler function.
7157   */
7158 +
7159 +#ifdef CONFIG_CHOPSTIX
7160 +extern void (*rec_event)(void *,unsigned int);
7161 +struct event_spec {
7162 +       unsigned long pc;
7163 +       unsigned long dcookie;
7164 +       unsigned count;
7165 +       unsigned char reason;
7166 +};
7167 +#endif
7168 +
7169  asmlinkage void __sched schedule(void)
7170  {
7171         struct task_struct *prev, *next;
7172         struct prio_array *array;
7173         struct list_head *queue;
7174         unsigned long long now;
7175 -       unsigned long run_time;
7176 +       unsigned long run_time, diff;
7177         int cpu, idx, new_prio;
7178         long *switch_count;
7179         struct rq *rq;
7180 +       int sampling_reason;
7181  
7182         /*
7183          * Test if we are atomic.  Since do_exit() needs to call into
7184 @@ -3680,6 +3713,7 @@
7185         switch_count = &prev->nivcsw;
7186         if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
7187                 switch_count = &prev->nvcsw;
7188 +
7189                 if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
7190                                 unlikely(signal_pending(prev))))
7191                         prev->state = TASK_RUNNING;
7192 @@ -3689,6 +3723,17 @@
7193                                 vx_uninterruptible_inc(prev);
7194                         }
7195                         deactivate_task(prev, rq);
7196 +#ifdef CONFIG_CHOPSTIX
7197 +            /* An uninterruptible process just yielded. Record the current jiffie */
7198 +                       if (prev->state & TASK_UNINTERRUPTIBLE) {
7199 +                               prev->last_interrupted=jiffies;
7200 +                       }
7201 +            /* An interruptible process just yielded, or it got preempted. 
7202 +             * Mark it as interruptible */
7203 +                       else if (prev->state & TASK_INTERRUPTIBLE) {
7204 +                               prev->last_interrupted=INTERRUPTIBLE;
7205 +                       }
7206 +#endif
7207                 }
7208         }
7209  
7210 @@ -3765,6 +3810,40 @@
7211                 prev->sleep_avg = 0;
7212         prev->timestamp = prev->last_ran = now;
7213  
7214 +#ifdef CONFIG_CHOPSTIX
7215 +       /* Run only if the Chopstix module so decrees it */
7216 +       if (rec_event) {
7217 +               prev->last_ran_j = jiffies;
7218 +               if (next->last_interrupted!=INTERRUPTIBLE) {
7219 +                       if (next->last_interrupted!=RUNNING) {
7220 +                               diff = (jiffies-next->last_interrupted);
7221 +                               sampling_reason = 0;/* BLOCKING */
7222 +                       }
7223 +                       else {
7224 +                               diff = jiffies-next->last_ran_j; 
7225 +                               sampling_reason = 1;/* PREEMPTION */
7226 +                       }
7227 +
7228 +                       if (diff >= HZ/10) {
7229 +                               struct event event;
7230 +                               struct event_spec espec;
7231 +                               unsigned long eip;
7232 +
7233 +                               espec.reason = sampling_reason;
7234 +                               eip = next->thread.esp & 4095;
7235 +                               event.event_data=&espec;
7236 +                               event.task=next;
7237 +                               espec.pc=eip;
7238 +                               event.event_type=2; 
7239 +                               /* index in the event array currently set up */
7240 +                               /* make sure the counters are loaded in the order we want them to show up*/ 
7241 +                               (*rec_event)(&event, diff);
7242 +                       }
7243 +               }
7244 +        /* next has been elected to run */
7245 +               next->last_interrupted=0;
7246 +       }
7247 +#endif
7248         sched_info_switch(prev, next);
7249         if (likely(prev != next)) {
7250                 next->timestamp = next->last_ran = now;
7251 @@ -4664,6 +4743,7 @@
7252         get_task_struct(p);
7253         read_unlock(&tasklist_lock);
7254  
7255 +
7256         retval = -EPERM;
7257         if ((current->euid != p->euid) && (current->euid != p->uid) &&
7258                         !capable(CAP_SYS_NICE))
7259 @@ -5032,6 +5112,7 @@
7260         jiffies_to_timespec(p->policy == SCHED_FIFO ?
7261                                 0 : task_timeslice(p), &t);
7262         read_unlock(&tasklist_lock);
7263 +
7264         retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
7265  out_nounlock:
7266         return retval;
7267 @@ -7275,3 +7356,14 @@
7268  }
7269  
7270  #endif
7271 +
7272 +#ifdef CONFIG_CHOPSTIX
7273 +void (*rec_event)(void *,unsigned int) = NULL;
7274 +
7275 +/* To support safe calling from asm */
7276 +asmlinkage void rec_event_asm (struct event *event_signature_in, unsigned int count) {
7277 +    (*rec_event)(event_signature_in, count);
7278 +}
7279 +EXPORT_SYMBOL(rec_event);
7280 +EXPORT_SYMBOL(in_sched_functions);
7281 +#endif
7282 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/kernel/sched.c.orig linux-2.6.22-590/kernel/sched.c.orig
7283 --- linux-2.6.22-580/kernel/sched.c.orig        1969-12-31 19:00:00.000000000 -0500
7284 +++ linux-2.6.22-590/kernel/sched.c.orig        2009-02-18 09:56:02.000000000 -0500
7285 @@ -0,0 +1,7277 @@
7286 +/*
7287 + *  kernel/sched.c
7288 + *
7289 + *  Kernel scheduler and related syscalls
7290 + *
7291 + *  Copyright (C) 1991-2002  Linus Torvalds
7292 + *
7293 + *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
7294 + *             make semaphores SMP safe
7295 + *  1998-11-19 Implemented schedule_timeout() and related stuff
7296 + *             by Andrea Arcangeli
7297 + *  2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
7298 + *             hybrid priority-list and round-robin design with
7299 + *             an array-switch method of distributing timeslices
7300 + *             and per-CPU runqueues.  Cleanups and useful suggestions
7301 + *             by Davide Libenzi, preemptible kernel bits by Robert Love.
7302 + *  2003-09-03 Interactivity tuning by Con Kolivas.
7303 + *  2004-04-02 Scheduler domains code by Nick Piggin
7304 + */
7305 +
7306 +#include <linux/mm.h>
7307 +#include <linux/module.h>
7308 +#include <linux/nmi.h>
7309 +#include <linux/init.h>
7310 +#include <asm/uaccess.h>
7311 +#include <linux/highmem.h>
7312 +#include <linux/smp_lock.h>
7313 +#include <asm/mmu_context.h>
7314 +#include <linux/interrupt.h>
7315 +#include <linux/capability.h>
7316 +#include <linux/completion.h>
7317 +#include <linux/kernel_stat.h>
7318 +#include <linux/debug_locks.h>
7319 +#include <linux/security.h>
7320 +#include <linux/notifier.h>
7321 +#include <linux/profile.h>
7322 +#include <linux/freezer.h>
7323 +#include <linux/vmalloc.h>
7324 +#include <linux/blkdev.h>
7325 +#include <linux/delay.h>
7326 +#include <linux/smp.h>
7327 +#include <linux/threads.h>
7328 +#include <linux/timer.h>
7329 +#include <linux/rcupdate.h>
7330 +#include <linux/cpu.h>
7331 +#include <linux/cpuset.h>
7332 +#include <linux/percpu.h>
7333 +#include <linux/kthread.h>
7334 +#include <linux/seq_file.h>
7335 +#include <linux/syscalls.h>
7336 +#include <linux/times.h>
7337 +#include <linux/tsacct_kern.h>
7338 +#include <linux/kprobes.h>
7339 +#include <linux/delayacct.h>
7340 +#include <linux/reciprocal_div.h>
7341 +
7342 +#include <asm/tlb.h>
7343 +#include <asm/unistd.h>
7344 +#include <linux/vs_sched.h>
7345 +#include <linux/vs_cvirt.h>
7346 +
7347 +/*
7348 + * Scheduler clock - returns current time in nanosec units.
7349 + * This is default implementation.
7350 + * Architectures and sub-architectures can override this.
7351 + */
7352 +unsigned long long __attribute__((weak)) sched_clock(void)
7353 +{
7354 +       return (unsigned long long)jiffies * (1000000000 / HZ);
7355 +}
7356 +
7357 +/*
7358 + * Convert user-nice values [ -20 ... 0 ... 19 ]
7359 + * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
7360 + * and back.
7361 + */
7362 +#define NICE_TO_PRIO(nice)     (MAX_RT_PRIO + (nice) + 20)
7363 +#define PRIO_TO_NICE(prio)     ((prio) - MAX_RT_PRIO - 20)
7364 +#define TASK_NICE(p)           PRIO_TO_NICE((p)->static_prio)
7365 +
7366 +/*
7367 + * 'User priority' is the nice value converted to something we
7368 + * can work with better when scaling various scheduler parameters,
7369 + * it's a [ 0 ... 39 ] range.
7370 + */
7371 +#define USER_PRIO(p)           ((p)-MAX_RT_PRIO)
7372 +#define TASK_USER_PRIO(p)      USER_PRIO((p)->static_prio)
7373 +#define MAX_USER_PRIO          (USER_PRIO(MAX_PRIO))
7374 +
7375 +/*
7376 + * Some helpers for converting nanosecond timing to jiffy resolution
7377 + */
7378 +#define NS_TO_JIFFIES(TIME)    ((TIME) / (1000000000 / HZ))
7379 +#define JIFFIES_TO_NS(TIME)    ((TIME) * (1000000000 / HZ))
7380 +
7381 +/*
7382 + * These are the 'tuning knobs' of the scheduler:
7383 + *
7384 + * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
7385 + * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
7386 + * Timeslices get refilled after they expire.
7387 + */
7388 +#define MIN_TIMESLICE          max(5 * HZ / 1000, 1)
7389 +#define DEF_TIMESLICE          (100 * HZ / 1000)
7390 +#define ON_RUNQUEUE_WEIGHT      30
7391 +#define CHILD_PENALTY           95
7392 +#define PARENT_PENALTY         100
7393 +#define EXIT_WEIGHT              3
7394 +#define PRIO_BONUS_RATIO        25
7395 +#define MAX_BONUS              (MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
7396 +#define INTERACTIVE_DELTA        2
7397 +#define MAX_SLEEP_AVG          (DEF_TIMESLICE * MAX_BONUS)
7398 +#define STARVATION_LIMIT       (MAX_SLEEP_AVG)
7399 +#define NS_MAX_SLEEP_AVG       (JIFFIES_TO_NS(MAX_SLEEP_AVG))
7400 +
7401 +/*
7402 + * If a task is 'interactive' then we reinsert it in the active
7403 + * array after it has expired its current timeslice. (it will not
7404 + * continue to run immediately, it will still roundrobin with
7405 + * other interactive tasks.)
7406 + *
7407 + * This part scales the interactivity limit depending on niceness.
7408 + *
7409 + * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
7410 + * Here are a few examples of different nice levels:
7411 + *
7412 + *  TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
7413 + *  TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
7414 + *  TASK_INTERACTIVE(  0): [1,1,1,1,0,0,0,0,0,0,0]
7415 + *  TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
7416 + *  TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
7417 + *
7418 + * (the X axis represents the possible -5 ... 0 ... +5 dynamic
7419 + *  priority range a task can explore, a value of '1' means the
7420 + *  task is rated interactive.)
7421 + *
7422 + * Ie. nice +19 tasks can never get 'interactive' enough to be
7423 + * reinserted into the active array. And only heavily CPU-hog nice -20
7424 + * tasks will be expired. Default nice 0 tasks are somewhere between,
7425 + * it takes some effort for them to get interactive, but it's not
7426 + * too hard.
7427 + */
7428 +
7429 +#define CURRENT_BONUS(p) \
7430 +       (NS_TO_JIFFIES((p)->sleep_avg) * MAX_BONUS / \
7431 +               MAX_SLEEP_AVG)
7432 +
7433 +#define GRANULARITY    (10 * HZ / 1000 ? : 1)
7434 +
7435 +#ifdef CONFIG_SMP
7436 +#define TIMESLICE_GRANULARITY(p)       (GRANULARITY * \
7437 +               (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
7438 +                       num_online_cpus())
7439 +#else
7440 +#define TIMESLICE_GRANULARITY(p)       (GRANULARITY * \
7441 +               (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
7442 +#endif
7443 +
7444 +#define SCALE(v1,v1_max,v2_max) \
7445 +       (v1) * (v2_max) / (v1_max)
7446 +
7447 +#define DELTA(p) \
7448 +       (SCALE(TASK_NICE(p) + 20, 40, MAX_BONUS) - 20 * MAX_BONUS / 40 + \
7449 +               INTERACTIVE_DELTA)
7450 +
7451 +#define TASK_INTERACTIVE(p) \
7452 +       ((p)->prio <= (p)->static_prio - DELTA(p))
7453 +
7454 +#define INTERACTIVE_SLEEP(p) \
7455 +       (JIFFIES_TO_NS(MAX_SLEEP_AVG * \
7456 +               (MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
7457 +
7458 +#define TASK_PREEMPTS_CURR(p, rq) \
7459 +       ((p)->prio < (rq)->curr->prio)
7460 +
7461 +#define SCALE_PRIO(x, prio) \
7462 +       max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
7463 +
7464 +static unsigned int static_prio_timeslice(int static_prio)
7465 +{
7466 +       if (static_prio < NICE_TO_PRIO(0))
7467 +               return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
7468 +       else
7469 +               return SCALE_PRIO(DEF_TIMESLICE, static_prio);
7470 +}
7471 +
7472 +#ifdef CONFIG_SMP
7473 +/*
7474 + * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
7475 + * Since cpu_power is a 'constant', we can use a reciprocal divide.
7476 + */
7477 +static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load)
7478 +{
7479 +       return reciprocal_divide(load, sg->reciprocal_cpu_power);
7480 +}
7481 +
7482 +/*
7483 + * Each time a sched group cpu_power is changed,
7484 + * we must compute its reciprocal value
7485 + */
7486 +static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
7487 +{
7488 +       sg->__cpu_power += val;
7489 +       sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power);
7490 +}
7491 +#endif
7492 +
7493 +/*
7494 + * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
7495 + * to time slice values: [800ms ... 100ms ... 5ms]
7496 + *
7497 + * The higher a thread's priority, the bigger timeslices
7498 + * it gets during one round of execution. But even the lowest
7499 + * priority thread gets MIN_TIMESLICE worth of execution time.
7500 + */
7501 +
7502 +static inline unsigned int task_timeslice(struct task_struct *p)
7503 +{
7504 +       return static_prio_timeslice(p->static_prio);
7505 +}
7506 +
7507 +/*
7508 + * These are the runqueue data structures:
7509 + */
7510 +
7511 +struct prio_array {
7512 +       unsigned int nr_active;
7513 +       DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */
7514 +       struct list_head queue[MAX_PRIO];
7515 +};
7516 +
7517 +/*
7518 + * This is the main, per-CPU runqueue data structure.
7519 + *
7520 + * Locking rule: those places that want to lock multiple runqueues
7521 + * (such as the load balancing or the thread migration code), lock
7522 + * acquire operations must be ordered by ascending &runqueue.
7523 + */
7524 +struct rq {
7525 +       spinlock_t lock;
7526 +
7527 +       /*
7528 +        * nr_running and cpu_load should be in the same cacheline because
7529 +        * remote CPUs use both these fields when doing load calculation.
7530 +        */
7531 +       unsigned long nr_running;
7532 +       unsigned long raw_weighted_load;
7533 +#ifdef CONFIG_SMP
7534 +       unsigned long cpu_load[3];
7535 +       unsigned char idle_at_tick;
7536 +#ifdef CONFIG_NO_HZ
7537 +       unsigned char in_nohz_recently;
7538 +#endif
7539 +#endif
7540 +       unsigned long long nr_switches;
7541 +
7542 +       /*
7543 +        * This is part of a global counter where only the total sum
7544 +        * over all CPUs matters. A task can increase this counter on
7545 +        * one CPU and if it got migrated afterwards it may decrease
7546 +        * it on another CPU. Always updated under the runqueue lock:
7547 +        */
7548 +       unsigned long nr_uninterruptible;
7549 +
7550 +       unsigned long expired_timestamp;
7551 +       /* Cached timestamp set by update_cpu_clock() */
7552 +       unsigned long long most_recent_timestamp;
7553 +       struct task_struct *curr, *idle;
7554 +       unsigned long next_balance;
7555 +       struct mm_struct *prev_mm;
7556 +       struct prio_array *active, *expired, arrays[2];
7557 +       int best_expired_prio;
7558 +       atomic_t nr_iowait;
7559 +
7560 +#ifdef CONFIG_SMP
7561 +       struct sched_domain *sd;
7562 +
7563 +       /* For active balancing */
7564 +       int active_balance;
7565 +       int push_cpu;
7566 +       int cpu;                /* cpu of this runqueue */
7567 +
7568 +       struct task_struct *migration_thread;
7569 +       struct list_head migration_queue;
7570 +#endif
7571 +       unsigned long norm_time;
7572 +       unsigned long idle_time;
7573 +#ifdef CONFIG_VSERVER_IDLETIME
7574 +       int idle_skip;
7575 +#endif
7576 +#ifdef CONFIG_VSERVER_HARDCPU
7577 +       struct list_head hold_queue;
7578 +       unsigned long nr_onhold;
7579 +       int idle_tokens;
7580 +#endif
7581 +
7582 +#ifdef CONFIG_SCHEDSTATS
7583 +       /* latency stats */
7584 +       struct sched_info rq_sched_info;
7585 +
7586 +       /* sys_sched_yield() stats */
7587 +       unsigned long yld_exp_empty;
7588 +       unsigned long yld_act_empty;
7589 +       unsigned long yld_both_empty;
7590 +       unsigned long yld_cnt;
7591 +
7592 +       /* schedule() stats */
7593 +       unsigned long sched_switch;
7594 +       unsigned long sched_cnt;
7595 +       unsigned long sched_goidle;
7596 +
7597 +       /* try_to_wake_up() stats */
7598 +       unsigned long ttwu_cnt;
7599 +       unsigned long ttwu_local;
7600 +#endif
7601 +       struct lock_class_key rq_lock_key;
7602 +};
7603 +
7604 +static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
7605 +static DEFINE_MUTEX(sched_hotcpu_mutex);
7606 +
7607 +static inline int cpu_of(struct rq *rq)
7608 +{
7609 +#ifdef CONFIG_SMP
7610 +       return rq->cpu;
7611 +#else
7612 +       return 0;
7613 +#endif
7614 +}
7615 +
7616 +/*
7617 + * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
7618 + * See detach_destroy_domains: synchronize_sched for details.
7619 + *
7620 + * The domain tree of any CPU may only be accessed from within
7621 + * preempt-disabled sections.
7622 + */
7623 +#define for_each_domain(cpu, __sd) \
7624 +       for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
7625 +
7626 +#define cpu_rq(cpu)            (&per_cpu(runqueues, (cpu)))
7627 +#define this_rq()              (&__get_cpu_var(runqueues))
7628 +#define task_rq(p)             cpu_rq(task_cpu(p))
7629 +#define cpu_curr(cpu)          (cpu_rq(cpu)->curr)
7630 +
7631 +#ifndef prepare_arch_switch
7632 +# define prepare_arch_switch(next)     do { } while (0)
7633 +#endif
7634 +#ifndef finish_arch_switch
7635 +# define finish_arch_switch(prev)      do { } while (0)
7636 +#endif
7637 +
7638 +#ifndef __ARCH_WANT_UNLOCKED_CTXSW
7639 +static inline int task_running(struct rq *rq, struct task_struct *p)
7640 +{
7641 +       return rq->curr == p;
7642 +}
7643 +
7644 +static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
7645 +{
7646 +}
7647 +
7648 +static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
7649 +{
7650 +#ifdef CONFIG_DEBUG_SPINLOCK
7651 +       /* this is a valid case when another task releases the spinlock */
7652 +       rq->lock.owner = current;
7653 +#endif
7654 +       /*
7655 +        * If we are tracking spinlock dependencies then we have to
7656 +        * fix up the runqueue lock - which gets 'carried over' from
7657 +        * prev into current:
7658 +        */
7659 +       spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
7660 +
7661 +       spin_unlock_irq(&rq->lock);
7662 +}
7663 +
7664 +#else /* __ARCH_WANT_UNLOCKED_CTXSW */
7665 +static inline int task_running(struct rq *rq, struct task_struct *p)
7666 +{
7667 +#ifdef CONFIG_SMP
7668 +       return p->oncpu;
7669 +#else
7670 +       return rq->curr == p;
7671 +#endif
7672 +}
7673 +
7674 +static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
7675 +{
7676 +#ifdef CONFIG_SMP
7677 +       /*
7678 +        * We can optimise this out completely for !SMP, because the
7679 +        * SMP rebalancing from interrupt is the only thing that cares
7680 +        * here.
7681 +        */
7682 +       next->oncpu = 1;
7683 +#endif
7684 +#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
7685 +       spin_unlock_irq(&rq->lock);
7686 +#else
7687 +       spin_unlock(&rq->lock);
7688 +#endif
7689 +}
7690 +
7691 +static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
7692 +{
7693 +#ifdef CONFIG_SMP
7694 +       /*
7695 +        * After ->oncpu is cleared, the task can be moved to a different CPU.
7696 +        * We must ensure this doesn't happen until the switch is completely
7697 +        * finished.
7698 +        */
7699 +       smp_wmb();
7700 +       prev->oncpu = 0;
7701 +#endif
7702 +#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
7703 +       local_irq_enable();
7704 +#endif
7705 +}
7706 +#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
7707 +
7708 +/*
7709 + * __task_rq_lock - lock the runqueue a given task resides on.
7710 + * Must be called interrupts disabled.
7711 + */
7712 +static inline struct rq *__task_rq_lock(struct task_struct *p)
7713 +       __acquires(rq->lock)
7714 +{
7715 +       struct rq *rq;
7716 +
7717 +repeat_lock_task:
7718 +       rq = task_rq(p);
7719 +       spin_lock(&rq->lock);
7720 +       if (unlikely(rq != task_rq(p))) {
7721 +               spin_unlock(&rq->lock);
7722 +               goto repeat_lock_task;
7723 +       }
7724 +       return rq;
7725 +}
7726 +
7727 +/*
7728 + * task_rq_lock - lock the runqueue a given task resides on and disable
7729 + * interrupts.  Note the ordering: we can safely lookup the task_rq without
7730 + * explicitly disabling preemption.
7731 + */
7732 +static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
7733 +       __acquires(rq->lock)
7734 +{
7735 +       struct rq *rq;
7736 +
7737 +repeat_lock_task:
7738 +       local_irq_save(*flags);
7739 +       rq = task_rq(p);
7740 +       spin_lock(&rq->lock);
7741 +       if (unlikely(rq != task_rq(p))) {
7742 +               spin_unlock_irqrestore(&rq->lock, *flags);
7743 +               goto repeat_lock_task;
7744 +       }
7745 +       return rq;
7746 +}
7747 +
7748 +static inline void __task_rq_unlock(struct rq *rq)
7749 +       __releases(rq->lock)
7750 +{
7751 +       spin_unlock(&rq->lock);
7752 +}
7753 +
7754 +static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
7755 +       __releases(rq->lock)
7756 +{
7757 +       spin_unlock_irqrestore(&rq->lock, *flags);
7758 +}
7759 +
7760 +#ifdef CONFIG_SCHEDSTATS
7761 +/*
7762 + * bump this up when changing the output format or the meaning of an existing
7763 + * format, so that tools can adapt (or abort)
7764 + */
7765 +#define SCHEDSTAT_VERSION 14
7766 +
7767 +static int show_schedstat(struct seq_file *seq, void *v)
7768 +{
7769 +       int cpu;
7770 +
7771 +       seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
7772 +       seq_printf(seq, "timestamp %lu\n", jiffies);
7773 +       for_each_online_cpu(cpu) {
7774 +               struct rq *rq = cpu_rq(cpu);
7775 +#ifdef CONFIG_SMP
7776 +               struct sched_domain *sd;
7777 +               int dcnt = 0;
7778 +#endif
7779 +
7780 +               /* runqueue-specific stats */
7781 +               seq_printf(seq,
7782 +                   "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
7783 +                   cpu, rq->yld_both_empty,
7784 +                   rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt,
7785 +                   rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
7786 +                   rq->ttwu_cnt, rq->ttwu_local,
7787 +                   rq->rq_sched_info.cpu_time,
7788 +                   rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
7789 +
7790 +               seq_printf(seq, "\n");
7791 +
7792 +#ifdef CONFIG_SMP
7793 +               /* domain-specific stats */
7794 +               preempt_disable();
7795 +               for_each_domain(cpu, sd) {
7796 +                       enum idle_type itype;
7797 +                       char mask_str[NR_CPUS];
7798 +
7799 +                       cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
7800 +                       seq_printf(seq, "domain%d %s", dcnt++, mask_str);
7801 +                       for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES;
7802 +                                       itype++) {
7803 +                               seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu "
7804 +                                               "%lu",
7805 +                                   sd->lb_cnt[itype],
7806 +                                   sd->lb_balanced[itype],
7807 +                                   sd->lb_failed[itype],
7808 +                                   sd->lb_imbalance[itype],
7809 +                                   sd->lb_gained[itype],
7810 +                                   sd->lb_hot_gained[itype],
7811 +                                   sd->lb_nobusyq[itype],
7812 +                                   sd->lb_nobusyg[itype]);
7813 +                       }
7814 +                       seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu"
7815 +                           " %lu %lu %lu\n",
7816 +                           sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
7817 +                           sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
7818 +                           sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
7819 +                           sd->ttwu_wake_remote, sd->ttwu_move_affine,
7820 +                           sd->ttwu_move_balance);
7821 +               }
7822 +               preempt_enable();
7823 +#endif
7824 +       }
7825 +       return 0;
7826 +}
7827 +
7828 +static int schedstat_open(struct inode *inode, struct file *file)
7829 +{
7830 +       unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
7831 +       char *buf = kmalloc(size, GFP_KERNEL);
7832 +       struct seq_file *m;
7833 +       int res;
7834 +
7835 +       if (!buf)
7836 +               return -ENOMEM;
7837 +       res = single_open(file, show_schedstat, NULL);
7838 +       if (!res) {
7839 +               m = file->private_data;
7840 +               m->buf = buf;
7841 +               m->size = size;
7842 +       } else
7843 +               kfree(buf);
7844 +       return res;
7845 +}
7846 +
7847 +const struct file_operations proc_schedstat_operations = {
7848 +       .open    = schedstat_open,
7849 +       .read    = seq_read,
7850 +       .llseek  = seq_lseek,
7851 +       .release = single_release,
7852 +};
7853 +
7854 +/*
7855 + * Expects runqueue lock to be held for atomicity of update
7856 + */
7857 +static inline void
7858 +rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
7859 +{
7860 +       if (rq) {
7861 +               rq->rq_sched_info.run_delay += delta_jiffies;
7862 +               rq->rq_sched_info.pcnt++;
7863 +       }
7864 +}
7865 +
7866 +/*
7867 + * Expects runqueue lock to be held for atomicity of update
7868 + */
7869 +static inline void
7870 +rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
7871 +{
7872 +       if (rq)
7873 +               rq->rq_sched_info.cpu_time += delta_jiffies;
7874 +}
7875 +# define schedstat_inc(rq, field)      do { (rq)->field++; } while (0)
7876 +# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
7877 +#else /* !CONFIG_SCHEDSTATS */
7878 +static inline void
7879 +rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
7880 +{}
7881 +static inline void
7882 +rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
7883 +{}
7884 +# define schedstat_inc(rq, field)      do { } while (0)
7885 +# define schedstat_add(rq, field, amt) do { } while (0)
7886 +#endif
7887 +
7888 +/*
7889 + * this_rq_lock - lock this runqueue and disable interrupts.
7890 + */
7891 +static inline struct rq *this_rq_lock(void)
7892 +       __acquires(rq->lock)
7893 +{
7894 +       struct rq *rq;
7895 +
7896 +       local_irq_disable();
7897 +       rq = this_rq();
7898 +       spin_lock(&rq->lock);
7899 +
7900 +       return rq;
7901 +}
7902 +
7903 +#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
7904 +/*
7905 + * Called when a process is dequeued from the active array and given
7906 + * the cpu.  We should note that with the exception of interactive
7907 + * tasks, the expired queue will become the active queue after the active
7908 + * queue is empty, without explicitly dequeuing and requeuing tasks in the
7909 + * expired queue.  (Interactive tasks may be requeued directly to the
7910 + * active queue, thus delaying tasks in the expired queue from running;
7911 + * see scheduler_tick()).
7912 + *
7913 + * This function is only called from sched_info_arrive(), rather than
7914 + * dequeue_task(). Even though a task may be queued and dequeued multiple
7915 + * times as it is shuffled about, we're really interested in knowing how
7916 + * long it was from the *first* time it was queued to the time that it
7917 + * finally hit a cpu.
7918 + */
7919 +static inline void sched_info_dequeued(struct task_struct *t)
7920 +{
7921 +       t->sched_info.last_queued = 0;
7922 +}
7923 +
7924 +/*
7925 + * Called when a task finally hits the cpu.  We can now calculate how
7926 + * long it was waiting to run.  We also note when it began so that we
7927 + * can keep stats on how long its timeslice is.
7928 + */
7929 +static void sched_info_arrive(struct task_struct *t)
7930 +{
7931 +       unsigned long now = jiffies, delta_jiffies = 0;
7932 +
7933 +       if (t->sched_info.last_queued)
7934 +               delta_jiffies = now - t->sched_info.last_queued;
7935 +       sched_info_dequeued(t);
7936 +       t->sched_info.run_delay += delta_jiffies;
7937 +       t->sched_info.last_arrival = now;
7938 +       t->sched_info.pcnt++;
7939 +
7940 +       rq_sched_info_arrive(task_rq(t), delta_jiffies);
7941 +}
7942 +
7943 +/*
7944 + * Called when a process is queued into either the active or expired
7945 + * array.  The time is noted and later used to determine how long we
7946 + * had to wait for us to reach the cpu.  Since the expired queue will
7947 + * become the active queue after active queue is empty, without dequeuing
7948 + * and requeuing any tasks, we are interested in queuing to either. It
7949 + * is unusual but not impossible for tasks to be dequeued and immediately
7950 + * requeued in the same or another array: this can happen in sched_yield(),
7951 + * set_user_nice(), and even load_balance() as it moves tasks from runqueue
7952 + * to runqueue.
7953 + *
7954 + * This function is only called from enqueue_task(), but also only updates
7955 + * the timestamp if it is already not set.  It's assumed that
7956 + * sched_info_dequeued() will clear that stamp when appropriate.
7957 + */
7958 +static inline void sched_info_queued(struct task_struct *t)
7959 +{
7960 +       if (unlikely(sched_info_on()))
7961 +               if (!t->sched_info.last_queued)
7962 +                       t->sched_info.last_queued = jiffies;
7963 +}
7964 +
7965 +/*
7966 + * Called when a process ceases being the active-running process, either
7967 + * voluntarily or involuntarily.  Now we can calculate how long we ran.
7968 + */
7969 +static inline void sched_info_depart(struct task_struct *t)
7970 +{
7971 +       unsigned long delta_jiffies = jiffies - t->sched_info.last_arrival;
7972 +
7973 +       t->sched_info.cpu_time += delta_jiffies;
7974 +       rq_sched_info_depart(task_rq(t), delta_jiffies);
7975 +}
7976 +
7977 +/*
7978 + * Called when tasks are switched involuntarily due, typically, to expiring
7979 + * their time slice.  (This may also be called when switching to or from
7980 + * the idle task.)  We are only called when prev != next.
7981 + */
7982 +static inline void
7983 +__sched_info_switch(struct task_struct *prev, struct task_struct *next)
7984 +{
7985 +       struct rq *rq = task_rq(prev);
7986 +
7987 +       /*
7988 +        * prev now departs the cpu.  It's not interesting to record
7989 +        * stats about how efficient we were at scheduling the idle
7990 +        * process, however.
7991 +        */
7992 +       if (prev != rq->idle)
7993 +               sched_info_depart(prev);
7994 +
7995 +       if (next != rq->idle)
7996 +               sched_info_arrive(next);
7997 +}
7998 +static inline void
7999 +sched_info_switch(struct task_struct *prev, struct task_struct *next)
8000 +{
8001 +       if (unlikely(sched_info_on()))
8002 +               __sched_info_switch(prev, next);
8003 +}
8004 +#else
8005 +#define sched_info_queued(t)           do { } while (0)
8006 +#define sched_info_switch(t, next)     do { } while (0)
8007 +#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
8008 +
8009 +/*
8010 + * Adding/removing a task to/from a priority array:
8011 + */
8012 +static void dequeue_task(struct task_struct *p, struct prio_array *array)
8013 +{
8014 +       BUG_ON(p->state & TASK_ONHOLD);
8015 +       array->nr_active--;
8016 +       list_del(&p->run_list);
8017 +       if (list_empty(array->queue + p->prio))
8018 +               __clear_bit(p->prio, array->bitmap);
8019 +}
8020 +
8021 +static void enqueue_task(struct task_struct *p, struct prio_array *array)
8022 +{
8023 +       BUG_ON(p->state & TASK_ONHOLD);
8024 +       sched_info_queued(p);
8025 +       list_add_tail(&p->run_list, array->queue + p->prio);
8026 +       __set_bit(p->prio, array->bitmap);
8027 +       array->nr_active++;
8028 +       p->array = array;
8029 +}
8030 +
8031 +/*
8032 + * Put task to the end of the run list without the overhead of dequeue
8033 + * followed by enqueue.
8034 + */
8035 +static void requeue_task(struct task_struct *p, struct prio_array *array)
8036 +{
8037 +       BUG_ON(p->state & TASK_ONHOLD);
8038 +       list_move_tail(&p->run_list, array->queue + p->prio);
8039 +}
8040 +
8041 +static inline void
8042 +enqueue_task_head(struct task_struct *p, struct prio_array *array)
8043 +{
8044 +       BUG_ON(p->state & TASK_ONHOLD);
8045 +       list_add(&p->run_list, array->queue + p->prio);
8046 +       __set_bit(p->prio, array->bitmap);
8047 +       array->nr_active++;
8048 +       p->array = array;
8049 +}
8050 +
8051 +/*
8052 + * __normal_prio - return the priority that is based on the static
8053 + * priority but is modified by bonuses/penalties.
8054 + *
8055 + * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
8056 + * into the -5 ... 0 ... +5 bonus/penalty range.
8057 + *
8058 + * We use 25% of the full 0...39 priority range so that:
8059 + *
8060 + * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
8061 + * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
8062 + *
8063 + * Both properties are important to certain workloads.
8064 + */
8065 +
8066 +static inline int __normal_prio(struct task_struct *p)
8067 +{
8068 +       int bonus, prio;
8069 +
8070 +       bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
8071 +
8072 +       prio = p->static_prio - bonus;
8073 +
8074 +       /* adjust effective priority */
8075 +       prio = vx_adjust_prio(p, prio, MAX_USER_PRIO);
8076 +
8077 +       if (prio < MAX_RT_PRIO)
8078 +               prio = MAX_RT_PRIO;
8079 +       if (prio > MAX_PRIO-1)
8080 +               prio = MAX_PRIO-1;
8081 +       return prio;
8082 +}
8083 +
8084 +/*
8085 + * To aid in avoiding the subversion of "niceness" due to uneven distribution
8086 + * of tasks with abnormal "nice" values across CPUs the contribution that
8087 + * each task makes to its run queue's load is weighted according to its
8088 + * scheduling class and "nice" value.  For SCHED_NORMAL tasks this is just a
8089 + * scaled version of the new time slice allocation that they receive on time
8090 + * slice expiry etc.
8091 + */
8092 +
8093 +/*
8094 + * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
8095 + * If static_prio_timeslice() is ever changed to break this assumption then
8096 + * this code will need modification
8097 + */
8098 +#define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
8099 +#define LOAD_WEIGHT(lp) \
8100 +       (((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
8101 +#define PRIO_TO_LOAD_WEIGHT(prio) \
8102 +       LOAD_WEIGHT(static_prio_timeslice(prio))
8103 +#define RTPRIO_TO_LOAD_WEIGHT(rp) \
8104 +       (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
8105 +
8106 +static void set_load_weight(struct task_struct *p)
8107 +{
8108 +       if (has_rt_policy(p)) {
8109 +#ifdef CONFIG_SMP
8110 +               if (p == task_rq(p)->migration_thread)
8111 +                       /*
8112 +                        * The migration thread does the actual balancing.
8113 +                        * Giving its load any weight will skew balancing
8114 +                        * adversely.
8115 +                        */
8116 +                       p->load_weight = 0;
8117 +               else
8118 +#endif
8119 +                       p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
8120 +       } else
8121 +               p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
8122 +}
8123 +
8124 +static inline void
8125 +inc_raw_weighted_load(struct rq *rq, const struct task_struct *p)
8126 +{
8127 +       rq->raw_weighted_load += p->load_weight;
8128 +}
8129 +
8130 +static inline void
8131 +dec_raw_weighted_load(struct rq *rq, const struct task_struct *p)
8132 +{
8133 +       rq->raw_weighted_load -= p->load_weight;
8134 +}
8135 +
8136 +static inline void inc_nr_running(struct task_struct *p, struct rq *rq)
8137 +{
8138 +       rq->nr_running++;
8139 +       inc_raw_weighted_load(rq, p);
8140 +}
8141 +
8142 +static inline void dec_nr_running(struct task_struct *p, struct rq *rq)
8143 +{
8144 +       rq->nr_running--;
8145 +       dec_raw_weighted_load(rq, p);
8146 +}
8147 +
8148 +/*
8149 + * Calculate the expected normal priority: i.e. priority
8150 + * without taking RT-inheritance into account. Might be
8151 + * boosted by interactivity modifiers. Changes upon fork,
8152 + * setprio syscalls, and whenever the interactivity
8153 + * estimator recalculates.
8154 + */
8155 +static inline int normal_prio(struct task_struct *p)
8156 +{
8157 +       int prio;
8158 +
8159 +       if (has_rt_policy(p))
8160 +               prio = MAX_RT_PRIO-1 - p->rt_priority;
8161 +       else
8162 +               prio = __normal_prio(p);
8163 +       return prio;
8164 +}
8165 +
8166 +/*
8167 + * Calculate the current priority, i.e. the priority
8168 + * taken into account by the scheduler. This value might
8169 + * be boosted by RT tasks, or might be boosted by
8170 + * interactivity modifiers. Will be RT if the task got
8171 + * RT-boosted. If not then it returns p->normal_prio.
8172 + */
8173 +static int effective_prio(struct task_struct *p)
8174 +{
8175 +       p->normal_prio = normal_prio(p);
8176 +       /*
8177 +        * If we are RT tasks or we were boosted to RT priority,
8178 +        * keep the priority unchanged. Otherwise, update priority
8179 +        * to the normal priority:
8180 +        */
8181 +       if (!rt_prio(p->prio))
8182 +               return p->normal_prio;
8183 +       return p->prio;
8184 +}
8185 +
8186 +#include "sched_mon.h"
8187 +
8188 +
8189 +/*
8190 + * __activate_task - move a task to the runqueue.
8191 + */
8192 +static void __activate_task(struct task_struct *p, struct rq *rq)
8193 +{
8194 +       struct prio_array *target = rq->active;
8195 +
8196 +       if (batch_task(p))
8197 +               target = rq->expired;
8198 +       vxm_activate_task(p, rq);
8199 +       enqueue_task(p, target);
8200 +       inc_nr_running(p, rq);
8201 +}
8202 +
8203 +/*
8204 + * __activate_idle_task - move idle task to the _front_ of runqueue.
8205 + */
8206 +static inline void __activate_idle_task(struct task_struct *p, struct rq *rq)
8207 +{
8208 +       vxm_activate_idle(p, rq);
8209 +       enqueue_task_head(p, rq->active);
8210 +       inc_nr_running(p, rq);
8211 +}
8212 +
8213 +/*
8214 + * Recalculate p->normal_prio and p->prio after having slept,
8215 + * updating the sleep-average too:
8216 + */
8217 +static int recalc_task_prio(struct task_struct *p, unsigned long long now)
8218 +{
8219 +       /* Caller must always ensure 'now >= p->timestamp' */
8220 +       unsigned long sleep_time = now - p->timestamp;
8221 +
8222 +       if (batch_task(p))
8223 +               sleep_time = 0;
8224 +
8225 +       if (likely(sleep_time > 0)) {
8226 +               /*
8227 +                * This ceiling is set to the lowest priority that would allow
8228 +                * a task to be reinserted into the active array on timeslice
8229 +                * completion.
8230 +                */
8231 +               unsigned long ceiling = INTERACTIVE_SLEEP(p);
8232 +
8233 +               if (p->mm && sleep_time > ceiling && p->sleep_avg < ceiling) {
8234 +                       /*
8235 +                        * Prevents user tasks from achieving best priority
8236 +                        * with one single large enough sleep.
8237 +                        */
8238 +                       p->sleep_avg = ceiling;
8239 +                       /*
8240 +                        * Using INTERACTIVE_SLEEP() as a ceiling places a
8241 +                        * nice(0) task 1ms sleep away from promotion, and
8242 +                        * gives it 700ms to round-robin with no chance of
8243 +                        * being demoted.  This is more than generous, so
8244 +                        * mark this sleep as non-interactive to prevent the
8245 +                        * on-runqueue bonus logic from intervening should
8246 +                        * this task not receive cpu immediately.
8247 +                        */
8248 +                       p->sleep_type = SLEEP_NONINTERACTIVE;
8249 +               } else {
8250 +                       /*
8251 +                        * Tasks waking from uninterruptible sleep are
8252 +                        * limited in their sleep_avg rise as they
8253 +                        * are likely to be waiting on I/O
8254 +                        */
8255 +                       if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) {
8256 +                               if (p->sleep_avg >= ceiling)
8257 +                                       sleep_time = 0;
8258 +                               else if (p->sleep_avg + sleep_time >=
8259 +                                        ceiling) {
8260 +                                               p->sleep_avg = ceiling;
8261 +                                               sleep_time = 0;
8262 +                               }
8263 +                       }
8264 +
8265 +                       /*
8266 +                        * This code gives a bonus to interactive tasks.
8267 +                        *
8268 +                        * The boost works by updating the 'average sleep time'
8269 +                        * value here, based on ->timestamp. The more time a
8270 +                        * task spends sleeping, the higher the average gets -
8271 +                        * and the higher the priority boost gets as well.
8272 +                        */
8273 +                       p->sleep_avg += sleep_time;
8274 +
8275 +               }
8276 +               if (p->sleep_avg > NS_MAX_SLEEP_AVG)
8277 +                       p->sleep_avg = NS_MAX_SLEEP_AVG;
8278 +       }
8279 +
8280 +       return effective_prio(p);
8281 +}
8282 +
8283 +/*
8284 + * activate_task - move a task to the runqueue and do priority recalculation
8285 + *
8286 + * Update all the scheduling statistics stuff. (sleep average
8287 + * calculation, priority modifiers, etc.)
8288 + */
8289 +static void activate_task(struct task_struct *p, struct rq *rq, int local)
8290 +{
8291 +       unsigned long long now;
8292 +
8293 +       if (rt_task(p))
8294 +               goto out;
8295 +
8296 +       now = sched_clock();
8297 +#ifdef CONFIG_SMP
8298 +       if (!local) {
8299 +               /* Compensate for drifting sched_clock */
8300 +               struct rq *this_rq = this_rq();
8301 +               now = (now - this_rq->most_recent_timestamp)
8302 +                       + rq->most_recent_timestamp;
8303 +       }
8304 +#endif
8305 +
8306 +       /*
8307 +        * Sleep time is in units of nanosecs, so shift by 20 to get a
8308 +        * milliseconds-range estimation of the amount of time that the task
8309 +        * spent sleeping:
8310 +        */
8311 +       if (unlikely(prof_on == SLEEP_PROFILING)) {
8312 +               if (p->state == TASK_UNINTERRUPTIBLE)
8313 +                       profile_hits(SLEEP_PROFILING, (void *)get_wchan(p),
8314 +                                    (now - p->timestamp) >> 20);
8315 +       }
8316 +
8317 +       p->prio = recalc_task_prio(p, now);
8318 +
8319 +       /*
8320 +        * This checks to make sure it's not an uninterruptible task
8321 +        * that is now waking up.
8322 +        */
8323 +       if (p->sleep_type == SLEEP_NORMAL) {
8324 +               /*
8325 +                * Tasks which were woken up by interrupts (ie. hw events)
8326 +                * are most likely of interactive nature. So we give them
8327 +                * the credit of extending their sleep time to the period
8328 +                * of time they spend on the runqueue, waiting for execution
8329 +                * on a CPU, first time around:
8330 +                */
8331 +               if (in_interrupt())
8332 +                       p->sleep_type = SLEEP_INTERRUPTED;
8333 +               else {
8334 +                       /*
8335 +                        * Normal first-time wakeups get a credit too for
8336 +                        * on-runqueue time, but it will be weighted down:
8337 +                        */
8338 +                       p->sleep_type = SLEEP_INTERACTIVE;
8339 +               }
8340 +       }
8341 +       p->timestamp = now;
8342 +out:
8343 +       vx_activate_task(p);
8344 +       __activate_task(p, rq);
8345 +}
8346 +
8347 +/*
8348 + * __deactivate_task - remove a task from the runqueue.
8349 + */
8350 +static void __deactivate_task(struct task_struct *p, struct rq *rq)
8351 +{
8352 +       dec_nr_running(p, rq);
8353 +       dequeue_task(p, p->array);
8354 +       vxm_deactivate_task(p, rq);
8355 +       p->array = NULL;
8356 +}
8357 +
8358 +static inline
8359 +void deactivate_task(struct task_struct *p, struct rq *rq)
8360 +{
8361 +       vx_deactivate_task(p);
8362 +       __deactivate_task(p, rq);
8363 +}
8364 +
8365 +#include "sched_hard.h"
8366 +
8367 +/*
8368 + * resched_task - mark a task 'to be rescheduled now'.
8369 + *
8370 + * On UP this means the setting of the need_resched flag, on SMP it
8371 + * might also involve a cross-CPU call to trigger the scheduler on
8372 + * the target CPU.
8373 + */
8374 +#ifdef CONFIG_SMP
8375 +
8376 +#ifndef tsk_is_polling
8377 +#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
8378 +#endif
8379 +
8380 +static void resched_task(struct task_struct *p)
8381 +{
8382 +       int cpu;
8383 +
8384 +       assert_spin_locked(&task_rq(p)->lock);
8385 +
8386 +       if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
8387 +               return;
8388 +
8389 +       set_tsk_thread_flag(p, TIF_NEED_RESCHED);
8390 +
8391 +       cpu = task_cpu(p);
8392 +       if (cpu == smp_processor_id())
8393 +               return;
8394 +
8395 +       /* NEED_RESCHED must be visible before we test polling */
8396 +       smp_mb();
8397 +       if (!tsk_is_polling(p))
8398 +               smp_send_reschedule(cpu);
8399 +}
8400 +
8401 +static void resched_cpu(int cpu)
8402 +{
8403 +       struct rq *rq = cpu_rq(cpu);
8404 +       unsigned long flags;
8405 +
8406 +       if (!spin_trylock_irqsave(&rq->lock, flags))
8407 +               return;
8408 +       resched_task(cpu_curr(cpu));
8409 +       spin_unlock_irqrestore(&rq->lock, flags);
8410 +}
8411 +#else
8412 +static inline void resched_task(struct task_struct *p)
8413 +{
8414 +       assert_spin_locked(&task_rq(p)->lock);
8415 +       set_tsk_need_resched(p);
8416 +}
8417 +#endif
8418 +
8419 +/**
8420 + * task_curr - is this task currently executing on a CPU?
8421 + * @p: the task in question.
8422 + */
8423 +inline int task_curr(const struct task_struct *p)
8424 +{
8425 +       return cpu_curr(task_cpu(p)) == p;
8426 +}
8427 +
8428 +/* Used instead of source_load when we know the type == 0 */
8429 +unsigned long weighted_cpuload(const int cpu)
8430 +{
8431 +       return cpu_rq(cpu)->raw_weighted_load;
8432 +}
8433 +
8434 +#ifdef CONFIG_SMP
8435 +struct migration_req {
8436 +       struct list_head list;
8437 +
8438 +       struct task_struct *task;
8439 +       int dest_cpu;
8440 +
8441 +       struct completion done;
8442 +};
8443 +
8444 +/*
8445 + * The task's runqueue lock must be held.
8446 + * Returns true if you have to wait for migration thread.
8447 + */
8448 +static int
8449 +migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
8450 +{
8451 +       struct rq *rq = task_rq(p);
8452 +
8453 +       vxm_migrate_task(p, rq, dest_cpu);
8454 +       /*
8455 +        * If the task is not on a runqueue (and not running), then
8456 +        * it is sufficient to simply update the task's cpu field.
8457 +        */
8458 +       if (!p->array && !task_running(rq, p)) {
8459 +               set_task_cpu(p, dest_cpu);
8460 +               return 0;
8461 +       }
8462 +
8463 +       init_completion(&req->done);
8464 +       req->task = p;
8465 +       req->dest_cpu = dest_cpu;
8466 +       list_add(&req->list, &rq->migration_queue);
8467 +
8468 +       return 1;
8469 +}
8470 +
8471 +/*
8472 + * wait_task_inactive - wait for a thread to unschedule.
8473 + *
8474 + * The caller must ensure that the task *will* unschedule sometime soon,
8475 + * else this function might spin for a *long* time. This function can't
8476 + * be called with interrupts off, or it may introduce deadlock with
8477 + * smp_call_function() if an IPI is sent by the same process we are
8478 + * waiting to become inactive.
8479 + */
8480 +void wait_task_inactive(struct task_struct *p)
8481 +{
8482 +       unsigned long flags;
8483 +       struct rq *rq;
8484 +       struct prio_array *array;
8485 +       int running;
8486 +
8487 +repeat:
8488 +       /*
8489 +        * We do the initial early heuristics without holding
8490 +        * any task-queue locks at all. We'll only try to get
8491 +        * the runqueue lock when things look like they will
8492 +        * work out!
8493 +        */
8494 +       rq = task_rq(p);
8495 +
8496 +       /*
8497 +        * If the task is actively running on another CPU
8498 +        * still, just relax and busy-wait without holding
8499 +        * any locks.
8500 +        *
8501 +        * NOTE! Since we don't hold any locks, it's not
8502 +        * even sure that "rq" stays as the right runqueue!
8503 +        * But we don't care, since "task_running()" will
8504 +        * return false if the runqueue has changed and p
8505 +        * is actually now running somewhere else!
8506 +        */
8507 +       while (task_running(rq, p))
8508 +               cpu_relax();
8509 +
8510 +       /*
8511 +        * Ok, time to look more closely! We need the rq
8512 +        * lock now, to be *sure*. If we're wrong, we'll
8513 +        * just go back and repeat.
8514 +        */
8515 +       rq = task_rq_lock(p, &flags);
8516 +       running = task_running(rq, p);
8517 +       array = p->array;
8518 +       task_rq_unlock(rq, &flags);
8519 +
8520 +       /*
8521 +        * Was it really running after all now that we
8522 +        * checked with the proper locks actually held?
8523 +        *
8524 +        * Oops. Go back and try again..
8525 +        */
8526 +       if (unlikely(running)) {
8527 +               cpu_relax();
8528 +               goto repeat;
8529 +       }
8530 +
8531 +       /*
8532 +        * It's not enough that it's not actively running,
8533 +        * it must be off the runqueue _entirely_, and not
8534 +        * preempted!
8535 +        *
8536 +        * So if it wa still runnable (but just not actively
8537 +        * running right now), it's preempted, and we should
8538 +        * yield - it could be a while.
8539 +        */
8540 +       if (unlikely(array)) {
8541 +               yield();
8542 +               goto repeat;
8543 +       }
8544 +
8545 +       /*
8546 +        * Ahh, all good. It wasn't running, and it wasn't
8547 +        * runnable, which means that it will never become
8548 +        * running in the future either. We're all done!
8549 +        */
8550 +}
8551 +
8552 +/***
8553 + * kick_process - kick a running thread to enter/exit the kernel
8554 + * @p: the to-be-kicked thread
8555 + *
8556 + * Cause a process which is running on another CPU to enter
8557 + * kernel-mode, without any delay. (to get signals handled.)
8558 + *
8559 + * NOTE: this function doesnt have to take the runqueue lock,
8560 + * because all it wants to ensure is that the remote task enters
8561 + * the kernel. If the IPI races and the task has been migrated
8562 + * to another CPU then no harm is done and the purpose has been
8563 + * achieved as well.
8564 + */
8565 +void kick_process(struct task_struct *p)
8566 +{
8567 +       int cpu;
8568 +
8569 +       preempt_disable();
8570 +       cpu = task_cpu(p);
8571 +       if ((cpu != smp_processor_id()) && task_curr(p))
8572 +               smp_send_reschedule(cpu);
8573 +       preempt_enable();
8574 +}
8575 +
8576 +/*
8577 + * Return a low guess at the load of a migration-source cpu weighted
8578 + * according to the scheduling class and "nice" value.
8579 + *
8580 + * We want to under-estimate the load of migration sources, to
8581 + * balance conservatively.
8582 + */
8583 +static inline unsigned long source_load(int cpu, int type)
8584 +{
8585 +       struct rq *rq = cpu_rq(cpu);
8586 +
8587 +       if (type == 0)
8588 +               return rq->raw_weighted_load;
8589 +
8590 +       return min(rq->cpu_load[type-1], rq->raw_weighted_load);
8591 +}
8592 +
8593 +/*
8594 + * Return a high guess at the load of a migration-target cpu weighted
8595 + * according to the scheduling class and "nice" value.
8596 + */
8597 +static inline unsigned long target_load(int cpu, int type)
8598 +{
8599 +       struct rq *rq = cpu_rq(cpu);
8600 +
8601 +       if (type == 0)
8602 +               return rq->raw_weighted_load;
8603 +
8604 +       return max(rq->cpu_load[type-1], rq->raw_weighted_load);
8605 +}
8606 +
8607 +/*
8608 + * Return the average load per task on the cpu's run queue
8609 + */
8610 +static inline unsigned long cpu_avg_load_per_task(int cpu)
8611 +{
8612 +       struct rq *rq = cpu_rq(cpu);
8613 +       unsigned long n = rq->nr_running;
8614 +
8615 +       return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE;
8616 +}
8617 +
8618 +/*
8619 + * find_idlest_group finds and returns the least busy CPU group within the
8620 + * domain.
8621 + */
8622 +static struct sched_group *
8623 +find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
8624 +{
8625 +       struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
8626 +       unsigned long min_load = ULONG_MAX, this_load = 0;
8627 +       int load_idx = sd->forkexec_idx;
8628 +       int imbalance = 100 + (sd->imbalance_pct-100)/2;
8629 +
8630 +       do {
8631 +               unsigned long load, avg_load;
8632 +               int local_group;
8633 +               int i;
8634 +
8635 +               /* Skip over this group if it has no CPUs allowed */
8636 +               if (!cpus_intersects(group->cpumask, p->cpus_allowed))
8637 +                       goto nextgroup;
8638 +
8639 +               local_group = cpu_isset(this_cpu, group->cpumask);
8640 +
8641 +               /* Tally up the load of all CPUs in the group */
8642 +               avg_load = 0;
8643 +
8644 +               for_each_cpu_mask(i, group->cpumask) {
8645 +                       /* Bias balancing toward cpus of our domain */
8646 +                       if (local_group)
8647 +                               load = source_load(i, load_idx);
8648 +                       else
8649 +                               load = target_load(i, load_idx);
8650 +
8651 +                       avg_load += load;
8652 +               }
8653 +
8654 +               /* Adjust by relative CPU power of the group */
8655 +               avg_load = sg_div_cpu_power(group,
8656 +                               avg_load * SCHED_LOAD_SCALE);
8657 +
8658 +               if (local_group) {
8659 +                       this_load = avg_load;
8660 +                       this = group;
8661 +               } else if (avg_load < min_load) {
8662 +                       min_load = avg_load;
8663 +                       idlest = group;
8664 +               }
8665 +nextgroup:
8666 +               group = group->next;
8667 +       } while (group != sd->groups);
8668 +
8669 +       if (!idlest || 100*this_load < imbalance*min_load)
8670 +               return NULL;
8671 +       return idlest;
8672 +}
8673 +
8674 +/*
8675 + * find_idlest_cpu - find the idlest cpu among the cpus in group.
8676 + */
8677 +static int
8678 +find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
8679 +{
8680 +       cpumask_t tmp;
8681 +       unsigned long load, min_load = ULONG_MAX;
8682 +       int idlest = -1;
8683 +       int i;
8684 +
8685 +       /* Traverse only the allowed CPUs */
8686 +       cpus_and(tmp, group->cpumask, p->cpus_allowed);
8687 +
8688 +       for_each_cpu_mask(i, tmp) {
8689 +               load = weighted_cpuload(i);
8690 +
8691 +               if (load < min_load || (load == min_load && i == this_cpu)) {
8692 +                       min_load = load;
8693 +                       idlest = i;
8694 +               }
8695 +       }
8696 +
8697 +       return idlest;
8698 +}
8699 +
8700 +/*
8701 + * sched_balance_self: balance the current task (running on cpu) in domains
8702 + * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
8703 + * SD_BALANCE_EXEC.
8704 + *
8705 + * Balance, ie. select the least loaded group.
8706 + *
8707 + * Returns the target CPU number, or the same CPU if no balancing is needed.
8708 + *
8709 + * preempt must be disabled.
8710 + */
8711 +static int sched_balance_self(int cpu, int flag)
8712 +{
8713 +       struct task_struct *t = current;
8714 +       struct sched_domain *tmp, *sd = NULL;
8715 +
8716 +       for_each_domain(cpu, tmp) {
8717 +               /*
8718 +                * If power savings logic is enabled for a domain, stop there.
8719 +                */
8720 +               if (tmp->flags & SD_POWERSAVINGS_BALANCE)
8721 +                       break;
8722 +               if (tmp->flags & flag)
8723 +                       sd = tmp;
8724 +       }
8725 +
8726 +       while (sd) {
8727 +               cpumask_t span;
8728 +               struct sched_group *group;
8729 +               int new_cpu, weight;
8730 +
8731 +               if (!(sd->flags & flag)) {
8732 +                       sd = sd->child;
8733 +                       continue;
8734 +               }
8735 +
8736 +               span = sd->span;
8737 +               group = find_idlest_group(sd, t, cpu);
8738 +               if (!group) {
8739 +                       sd = sd->child;
8740 +                       continue;
8741 +               }
8742 +
8743 +               new_cpu = find_idlest_cpu(group, t, cpu);
8744 +               if (new_cpu == -1 || new_cpu == cpu) {
8745 +                       /* Now try balancing at a lower domain level of cpu */
8746 +                       sd = sd->child;
8747 +                       continue;
8748 +               }
8749 +
8750 +               /* Now try balancing at a lower domain level of new_cpu */
8751 +               cpu = new_cpu;
8752 +               sd = NULL;
8753 +               weight = cpus_weight(span);
8754 +               for_each_domain(cpu, tmp) {
8755 +                       if (weight <= cpus_weight(tmp->span))
8756 +                               break;
8757 +                       if (tmp->flags & flag)
8758 +                               sd = tmp;
8759 +               }
8760 +               /* while loop will break here if sd == NULL */
8761 +       }
8762 +
8763 +       return cpu;
8764 +}
8765 +
8766 +#endif /* CONFIG_SMP */
8767 +
8768 +/*
8769 + * wake_idle() will wake a task on an idle cpu if task->cpu is
8770 + * not idle and an idle cpu is available.  The span of cpus to
8771 + * search starts with cpus closest then further out as needed,
8772 + * so we always favor a closer, idle cpu.
8773 + *
8774 + * Returns the CPU we should wake onto.
8775 + */
8776 +#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
8777 +static int wake_idle(int cpu, struct task_struct *p)
8778 +{
8779 +       cpumask_t tmp;
8780 +       struct sched_domain *sd;
8781 +       int i;
8782 +
8783 +       /*
8784 +        * If it is idle, then it is the best cpu to run this task.
8785 +        *
8786 +        * This cpu is also the best, if it has more than one task already.
8787 +        * Siblings must be also busy(in most cases) as they didn't already
8788 +        * pickup the extra load from this cpu and hence we need not check
8789 +        * sibling runqueue info. This will avoid the checks and cache miss
8790 +        * penalities associated with that.
8791 +        */
8792 +       if (idle_cpu(cpu) || cpu_rq(cpu)->nr_running > 1)
8793 +               return cpu;
8794 +
8795 +       for_each_domain(cpu, sd) {
8796 +               if (sd->flags & SD_WAKE_IDLE) {
8797 +                       cpus_and(tmp, sd->span, p->cpus_allowed);
8798 +                       for_each_cpu_mask(i, tmp) {
8799 +                               if (idle_cpu(i))
8800 +                                       return i;
8801 +                       }
8802 +               }
8803 +               else
8804 +                       break;
8805 +       }
8806 +       return cpu;
8807 +}
8808 +#else
8809 +static inline int wake_idle(int cpu, struct task_struct *p)
8810 +{
8811 +       return cpu;
8812 +}
8813 +#endif
8814 +
8815 +/***
8816 + * try_to_wake_up - wake up a thread
8817 + * @p: the to-be-woken-up thread
8818 + * @state: the mask of task states that can be woken
8819 + * @sync: do a synchronous wakeup?
8820 + *
8821 + * Put it on the run-queue if it's not already there. The "current"
8822 + * thread is always on the run-queue (except when the actual
8823 + * re-schedule is in progress), and as such you're allowed to do
8824 + * the simpler "current->state = TASK_RUNNING" to mark yourself
8825 + * runnable without the overhead of this.
8826 + *
8827 + * returns failure only if the task is already active.
8828 + */
8829 +static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
8830 +{
8831 +       int cpu, this_cpu, success = 0;
8832 +       unsigned long flags;
8833 +       long old_state;
8834 +       struct rq *rq;
8835 +#ifdef CONFIG_SMP
8836 +       struct sched_domain *sd, *this_sd = NULL;
8837 +       unsigned long load, this_load;
8838 +       int new_cpu;
8839 +#endif
8840 +
8841 +       rq = task_rq_lock(p, &flags);
8842 +       old_state = p->state;
8843 +
8844 +       /* we need to unhold suspended tasks */
8845 +       if (old_state & TASK_ONHOLD) {
8846 +               vx_unhold_task(p, rq);
8847 +               old_state = p->state;
8848 +       }
8849 +       if (!(old_state & state))
8850 +               goto out;
8851 +
8852 +       if (p->array)
8853 +               goto out_running;
8854 +
8855 +       cpu = task_cpu(p);
8856 +       this_cpu = smp_processor_id();
8857 +
8858 +#ifdef CONFIG_SMP
8859 +       if (unlikely(task_running(rq, p)))
8860 +               goto out_activate;
8861 +
8862 +       new_cpu = cpu;
8863 +
8864 +       schedstat_inc(rq, ttwu_cnt);
8865 +       if (cpu == this_cpu) {
8866 +               schedstat_inc(rq, ttwu_local);
8867 +               goto out_set_cpu;
8868 +       }
8869 +
8870 +       for_each_domain(this_cpu, sd) {
8871 +               if (cpu_isset(cpu, sd->span)) {
8872 +                       schedstat_inc(sd, ttwu_wake_remote);
8873 +                       this_sd = sd;
8874 +                       break;
8875 +               }
8876 +       }
8877 +
8878 +       if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
8879 +               goto out_set_cpu;
8880 +
8881 +       /*
8882 +        * Check for affine wakeup and passive balancing possibilities.
8883 +        */
8884 +       if (this_sd) {
8885 +               int idx = this_sd->wake_idx;
8886 +               unsigned int imbalance;
8887 +
8888 +               imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
8889 +
8890 +               load = source_load(cpu, idx);
8891 +               this_load = target_load(this_cpu, idx);
8892 +
8893 +               new_cpu = this_cpu; /* Wake to this CPU if we can */
8894 +
8895 +               if (this_sd->flags & SD_WAKE_AFFINE) {
8896 +                       unsigned long tl = this_load;
8897 +                       unsigned long tl_per_task;
8898 +
8899 +                       tl_per_task = cpu_avg_load_per_task(this_cpu);
8900 +
8901 +                       /*
8902 +                        * If sync wakeup then subtract the (maximum possible)
8903 +                        * effect of the currently running task from the load
8904 +                        * of the current CPU:
8905 +                        */
8906 +                       if (sync)
8907 +                               tl -= current->load_weight;
8908 +
8909 +                       if ((tl <= load &&
8910 +                               tl + target_load(cpu, idx) <= tl_per_task) ||
8911 +                               100*(tl + p->load_weight) <= imbalance*load) {
8912 +                               /*
8913 +                                * This domain has SD_WAKE_AFFINE and
8914 +                                * p is cache cold in this domain, and
8915 +                                * there is no bad imbalance.
8916 +                                */
8917 +                               schedstat_inc(this_sd, ttwu_move_affine);
8918 +                               goto out_set_cpu;
8919 +                       }
8920 +               }
8921 +
8922 +               /*
8923 +                * Start passive balancing when half the imbalance_pct
8924 +                * limit is reached.
8925 +                */
8926 +               if (this_sd->flags & SD_WAKE_BALANCE) {
8927 +                       if (imbalance*this_load <= 100*load) {
8928 +                               schedstat_inc(this_sd, ttwu_move_balance);
8929 +                               goto out_set_cpu;
8930 +                       }
8931 +               }
8932 +       }
8933 +
8934 +       new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
8935 +out_set_cpu:
8936 +       new_cpu = wake_idle(new_cpu, p);
8937 +       if (new_cpu != cpu) {
8938 +               set_task_cpu(p, new_cpu);
8939 +               task_rq_unlock(rq, &flags);
8940 +               /* might preempt at this point */
8941 +               rq = task_rq_lock(p, &flags);
8942 +               old_state = p->state;
8943 +               if (!(old_state & state))
8944 +                       goto out;
8945 +               if (p->array)
8946 +                       goto out_running;
8947 +
8948 +               this_cpu = smp_processor_id();
8949 +               cpu = task_cpu(p);
8950 +       }
8951 +
8952 +out_activate:
8953 +#endif /* CONFIG_SMP */
8954 +       if (old_state == TASK_UNINTERRUPTIBLE) {
8955 +               rq->nr_uninterruptible--;
8956 +               vx_uninterruptible_dec(p);
8957 +               /*
8958 +                * Tasks on involuntary sleep don't earn
8959 +                * sleep_avg beyond just interactive state.
8960 +                */
8961 +               p->sleep_type = SLEEP_NONINTERACTIVE;
8962 +       } else
8963 +
8964 +       /*
8965 +        * Tasks that have marked their sleep as noninteractive get
8966 +        * woken up with their sleep average not weighted in an
8967 +        * interactive way.
8968 +        */
8969 +               if (old_state & TASK_NONINTERACTIVE)
8970 +                       p->sleep_type = SLEEP_NONINTERACTIVE;
8971 +
8972 +
8973 +       activate_task(p, rq, cpu == this_cpu);
8974 +       /*
8975 +        * Sync wakeups (i.e. those types of wakeups where the waker
8976 +        * has indicated that it will leave the CPU in short order)
8977 +        * don't trigger a preemption, if the woken up task will run on
8978 +        * this cpu. (in this case the 'I will reschedule' promise of
8979 +        * the waker guarantees that the freshly woken up task is going
8980 +        * to be considered on this CPU.)
8981 +        */
8982 +       if (!sync || cpu != this_cpu) {
8983 +               if (TASK_PREEMPTS_CURR(p, rq))
8984 +                       resched_task(rq->curr);
8985 +       }
8986 +       success = 1;
8987 +
8988 +out_running:
8989 +       p->state = TASK_RUNNING;
8990 +out:
8991 +       task_rq_unlock(rq, &flags);
8992 +
8993 +       return success;
8994 +}
8995 +
8996 +int fastcall wake_up_process(struct task_struct *p)
8997 +{
8998 +       return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
8999 +                                TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
9000 +}
9001 +EXPORT_SYMBOL(wake_up_process);
9002 +
9003 +int fastcall wake_up_state(struct task_struct *p, unsigned int state)
9004 +{
9005 +       return try_to_wake_up(p, state, 0);
9006 +}
9007 +
9008 +static void task_running_tick(struct rq *rq, struct task_struct *p, int cpu);
9009 +/*
9010 + * Perform scheduler related setup for a newly forked process p.
9011 + * p is forked by current.
9012 + */
9013 +void fastcall sched_fork(struct task_struct *p, int clone_flags)
9014 +{
9015 +       int cpu = get_cpu();
9016 +
9017 +#ifdef CONFIG_SMP
9018 +       cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
9019 +#endif
9020 +       set_task_cpu(p, cpu);
9021 +
9022 +       /*
9023 +        * We mark the process as running here, but have not actually
9024 +        * inserted it onto the runqueue yet. This guarantees that
9025 +        * nobody will actually run it, and a signal or other external
9026 +        * event cannot wake it up and insert it on the runqueue either.
9027 +        */
9028 +       p->state = TASK_RUNNING;
9029 +
9030 +       /*
9031 +        * Make sure we do not leak PI boosting priority to the child:
9032 +        */
9033 +       p->prio = current->normal_prio;
9034 +
9035 +       INIT_LIST_HEAD(&p->run_list);
9036 +       p->array = NULL;
9037 +#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
9038 +       if (unlikely(sched_info_on()))
9039 +               memset(&p->sched_info, 0, sizeof(p->sched_info));
9040 +#endif
9041 +#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
9042 +       p->oncpu = 0;
9043 +#endif
9044 +#ifdef CONFIG_PREEMPT
9045 +       /* Want to start with kernel preemption disabled. */
9046 +       task_thread_info(p)->preempt_count = 1;
9047 +#endif
9048 +       /*
9049 +        * Share the timeslice between parent and child, thus the
9050 +        * total amount of pending timeslices in the system doesn't change,
9051 +        * resulting in more scheduling fairness.
9052 +        */
9053 +       local_irq_disable();
9054 +       p->time_slice = (current->time_slice + 1) >> 1;
9055 +       /*
9056 +        * The remainder of the first timeslice might be recovered by
9057 +        * the parent if the child exits early enough.
9058 +        */
9059 +       p->first_time_slice = 1;
9060 +       current->time_slice >>= 1;
9061 +       p->timestamp = sched_clock();
9062 +       if (unlikely(!current->time_slice)) {
9063 +               /*
9064 +                * This case is rare, it happens when the parent has only
9065 +                * a single jiffy left from its timeslice. Taking the
9066 +                * runqueue lock is not a problem.
9067 +                */
9068 +               current->time_slice = 1;
9069 +               task_running_tick(cpu_rq(cpu), current, cpu);
9070 +       }
9071 +       local_irq_enable();
9072 +       put_cpu();
9073 +}
9074 +
9075 +/*
9076 + * wake_up_new_task - wake up a newly created task for the first time.
9077 + *
9078 + * This function will do some initial scheduler statistics housekeeping
9079 + * that must be done for every newly created context, then puts the task
9080 + * on the runqueue and wakes it.
9081 + */
9082 +void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
9083 +{
9084 +       struct rq *rq, *this_rq;
9085 +       unsigned long flags;
9086 +       int this_cpu, cpu;
9087 +
9088 +       rq = task_rq_lock(p, &flags);
9089 +       BUG_ON(p->state != TASK_RUNNING);
9090 +       this_cpu = smp_processor_id();
9091 +       cpu = task_cpu(p);
9092 +
9093 +       /*
9094 +        * We decrease the sleep average of forking parents
9095 +        * and children as well, to keep max-interactive tasks
9096 +        * from forking tasks that are max-interactive. The parent
9097 +        * (current) is done further down, under its lock.
9098 +        */
9099 +       p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
9100 +               CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
9101 +
9102 +       p->prio = effective_prio(p);
9103 +
9104 +       vx_activate_task(p);
9105 +       if (likely(cpu == this_cpu)) {
9106 +               if (!(clone_flags & CLONE_VM)) {
9107 +                       /*
9108 +                        * The VM isn't cloned, so we're in a good position to
9109 +                        * do child-runs-first in anticipation of an exec. This
9110 +                        * usually avoids a lot of COW overhead.
9111 +                        */
9112 +                       if (unlikely(!current->array))
9113 +                               __activate_task(p, rq);
9114 +                       else {
9115 +                               p->prio = current->prio;
9116 +                               BUG_ON(p->state & TASK_ONHOLD);
9117 +                               p->normal_prio = current->normal_prio;
9118 +                               list_add_tail(&p->run_list, &current->run_list);
9119 +                               p->array = current->array;
9120 +                               p->array->nr_active++;
9121 +                               inc_nr_running(p, rq);
9122 +                       }
9123 +                       set_need_resched();
9124 +               } else
9125 +                       /* Run child last */
9126 +                       __activate_task(p, rq);
9127 +               /*
9128 +                * We skip the following code due to cpu == this_cpu
9129 +                *
9130 +                *   task_rq_unlock(rq, &flags);
9131 +                *   this_rq = task_rq_lock(current, &flags);
9132 +                */
9133 +               this_rq = rq;
9134 +       } else {
9135 +               this_rq = cpu_rq(this_cpu);
9136 +
9137 +               /*
9138 +                * Not the local CPU - must adjust timestamp. This should
9139 +                * get optimised away in the !CONFIG_SMP case.
9140 +                */
9141 +               p->timestamp = (p->timestamp - this_rq->most_recent_timestamp)
9142 +                                       + rq->most_recent_timestamp;
9143 +               __activate_task(p, rq);
9144 +               if (TASK_PREEMPTS_CURR(p, rq))
9145 +                       resched_task(rq->curr);
9146 +
9147 +               /*
9148 +                * Parent and child are on different CPUs, now get the
9149 +                * parent runqueue to update the parent's ->sleep_avg:
9150 +                */
9151 +               task_rq_unlock(rq, &flags);
9152 +               this_rq = task_rq_lock(current, &flags);
9153 +       }
9154 +       current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
9155 +               PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
9156 +       task_rq_unlock(this_rq, &flags);
9157 +}
9158 +
9159 +/*
9160 + * Potentially available exiting-child timeslices are
9161 + * retrieved here - this way the parent does not get
9162 + * penalized for creating too many threads.
9163 + *
9164 + * (this cannot be used to 'generate' timeslices
9165 + * artificially, because any timeslice recovered here
9166 + * was given away by the parent in the first place.)
9167 + */
9168 +void fastcall sched_exit(struct task_struct *p)
9169 +{
9170 +       unsigned long flags;
9171 +       struct rq *rq;
9172 +
9173 +       /*
9174 +        * If the child was a (relative-) CPU hog then decrease
9175 +        * the sleep_avg of the parent as well.
9176 +        */
9177 +       rq = task_rq_lock(p->parent, &flags);
9178 +       if (p->first_time_slice && task_cpu(p) == task_cpu(p->parent)) {
9179 +               p->parent->time_slice += p->time_slice;
9180 +               if (unlikely(p->parent->time_slice > task_timeslice(p)))
9181 +                       p->parent->time_slice = task_timeslice(p);
9182 +       }
9183 +       if (p->sleep_avg < p->parent->sleep_avg)
9184 +               p->parent->sleep_avg = p->parent->sleep_avg /
9185 +               (EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
9186 +               (EXIT_WEIGHT + 1);
9187 +       task_rq_unlock(rq, &flags);
9188 +}
9189 +
9190 +/**
9191 + * prepare_task_switch - prepare to switch tasks
9192 + * @rq: the runqueue preparing to switch
9193 + * @next: the task we are going to switch to.
9194 + *
9195 + * This is called with the rq lock held and interrupts off. It must
9196 + * be paired with a subsequent finish_task_switch after the context
9197 + * switch.
9198 + *
9199 + * prepare_task_switch sets up locking and calls architecture specific
9200 + * hooks.
9201 + */
9202 +static inline void prepare_task_switch(struct rq *rq, struct task_struct *next)
9203 +{
9204 +       prepare_lock_switch(rq, next);
9205 +       prepare_arch_switch(next);
9206 +}
9207 +
9208 +/**
9209 + * finish_task_switch - clean up after a task-switch
9210 + * @rq: runqueue associated with task-switch
9211 + * @prev: the thread we just switched away from.
9212 + *
9213 + * finish_task_switch must be called after the context switch, paired
9214 + * with a prepare_task_switch call before the context switch.
9215 + * finish_task_switch will reconcile locking set up by prepare_task_switch,
9216 + * and do any other architecture-specific cleanup actions.
9217 + *
9218 + * Note that we may have delayed dropping an mm in context_switch(). If
9219 + * so, we finish that here outside of the runqueue lock.  (Doing it
9220 + * with the lock held can cause deadlocks; see schedule() for
9221 + * details.)
9222 + */
9223 +static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
9224 +       __releases(rq->lock)
9225 +{
9226 +       struct mm_struct *mm = rq->prev_mm;
9227 +       long prev_state;
9228 +
9229 +       rq->prev_mm = NULL;
9230 +
9231 +       /*
9232 +        * A task struct has one reference for the use as "current".
9233 +        * If a task dies, then it sets TASK_DEAD in tsk->state and calls
9234 +        * schedule one last time. The schedule call will never return, and
9235 +        * the scheduled task must drop that reference.
9236 +        * The test for TASK_DEAD must occur while the runqueue locks are
9237 +        * still held, otherwise prev could be scheduled on another cpu, die
9238 +        * there before we look at prev->state, and then the reference would
9239 +        * be dropped twice.
9240 +        *              Manfred Spraul <manfred@colorfullife.com>
9241 +        */
9242 +       prev_state = prev->state;
9243 +       finish_arch_switch(prev);
9244 +       finish_lock_switch(rq, prev);
9245 +       if (mm)
9246 +               mmdrop(mm);
9247 +       if (unlikely(prev_state == TASK_DEAD)) {
9248 +               /*
9249 +                * Remove function-return probe instances associated with this
9250 +                * task and put them back on the free list.
9251 +                */
9252 +               kprobe_flush_task(prev);
9253 +               put_task_struct(prev);
9254 +       }
9255 +}
9256 +
9257 +/**
9258 + * schedule_tail - first thing a freshly forked thread must call.
9259 + * @prev: the thread we just switched away from.
9260 + */
9261 +asmlinkage void schedule_tail(struct task_struct *prev)
9262 +       __releases(rq->lock)
9263 +{
9264 +       struct rq *rq = this_rq();
9265 +
9266 +       finish_task_switch(rq, prev);
9267 +#ifdef __ARCH_WANT_UNLOCKED_CTXSW
9268 +       /* In this case, finish_task_switch does not reenable preemption */
9269 +       preempt_enable();
9270 +#endif
9271 +       if (current->set_child_tid)
9272 +               put_user(current->pid, current->set_child_tid);
9273 +}
9274 +
9275 +/*
9276 + * context_switch - switch to the new MM and the new
9277 + * thread's register state.
9278 + */
9279 +static inline struct task_struct *
9280 +context_switch(struct rq *rq, struct task_struct *prev,
9281 +              struct task_struct *next)
9282 +{
9283 +       struct mm_struct *mm = next->mm;
9284 +       struct mm_struct *oldmm = prev->active_mm;
9285 +
9286 +       /*
9287 +        * For paravirt, this is coupled with an exit in switch_to to
9288 +        * combine the page table reload and the switch backend into
9289 +        * one hypercall.
9290 +        */
9291 +       arch_enter_lazy_cpu_mode();
9292 +
9293 +       if (!mm) {
9294 +               next->active_mm = oldmm;
9295 +               atomic_inc(&oldmm->mm_count);
9296 +               enter_lazy_tlb(oldmm, next);
9297 +       } else
9298 +               switch_mm(oldmm, mm, next);
9299 +
9300 +       if (!prev->mm) {
9301 +               prev->active_mm = NULL;
9302 +               WARN_ON(rq->prev_mm);
9303 +               rq->prev_mm = oldmm;
9304 +       }
9305 +       /*
9306 +        * Since the runqueue lock will be released by the next
9307 +        * task (which is an invalid locking op but in the case
9308 +        * of the scheduler it's an obvious special-case), so we
9309 +        * do an early lockdep release here:
9310 +        */
9311 +#ifndef __ARCH_WANT_UNLOCKED_CTXSW
9312 +       spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
9313 +#endif
9314 +
9315 +       /* Here we just switch the register state and the stack. */
9316 +       switch_to(prev, next, prev);
9317 +
9318 +       return prev;
9319 +}
9320 +
9321 +/*
9322 + * nr_running, nr_uninterruptible and nr_context_switches:
9323 + *
9324 + * externally visible scheduler statistics: current number of runnable
9325 + * threads, current number of uninterruptible-sleeping threads, total
9326 + * number of context switches performed since bootup.
9327 + */
9328 +unsigned long nr_running(void)
9329 +{
9330 +       unsigned long i, sum = 0;
9331 +
9332 +       for_each_online_cpu(i)
9333 +               sum += cpu_rq(i)->nr_running;
9334 +
9335 +       return sum;
9336 +}
9337 +
9338 +unsigned long nr_uninterruptible(void)
9339 +{
9340 +       unsigned long i, sum = 0;
9341 +
9342 +       for_each_possible_cpu(i)
9343 +               sum += cpu_rq(i)->nr_uninterruptible;
9344 +
9345 +       /*
9346 +        * Since we read the counters lockless, it might be slightly
9347 +        * inaccurate. Do not allow it to go below zero though:
9348 +        */
9349 +       if (unlikely((long)sum < 0))
9350 +               sum = 0;
9351 +
9352 +       return sum;
9353 +}
9354 +
9355 +unsigned long long nr_context_switches(void)
9356 +{
9357 +       int i;
9358 +       unsigned long long sum = 0;
9359 +
9360 +       for_each_possible_cpu(i)
9361 +               sum += cpu_rq(i)->nr_switches;
9362 +
9363 +       return sum;
9364 +}
9365 +
9366 +unsigned long nr_iowait(void)
9367 +{
9368 +       unsigned long i, sum = 0;
9369 +
9370 +       for_each_possible_cpu(i)
9371 +               sum += atomic_read(&cpu_rq(i)->nr_iowait);
9372 +
9373 +       return sum;
9374 +}
9375 +
9376 +unsigned long nr_active(void)
9377 +{
9378 +       unsigned long i, running = 0, uninterruptible = 0;
9379 +
9380 +       for_each_online_cpu(i) {
9381 +               running += cpu_rq(i)->nr_running;
9382 +               uninterruptible += cpu_rq(i)->nr_uninterruptible;
9383 +       }
9384 +
9385 +       if (unlikely((long)uninterruptible < 0))
9386 +               uninterruptible = 0;
9387 +
9388 +       return running + uninterruptible;
9389 +}
9390 +
9391 +#ifdef CONFIG_SMP
9392 +
9393 +/*
9394 + * Is this task likely cache-hot:
9395 + */
9396 +static inline int
9397 +task_hot(struct task_struct *p, unsigned long long now, struct sched_domain *sd)
9398 +{
9399 +       return (long long)(now - p->last_ran) < (long long)sd->cache_hot_time;
9400 +}
9401 +
9402 +/*
9403 + * double_rq_lock - safely lock two runqueues
9404 + *
9405 + * Note this does not disable interrupts like task_rq_lock,
9406 + * you need to do so manually before calling.
9407 + */
9408 +static void double_rq_lock(struct rq *rq1, struct rq *rq2)
9409 +       __acquires(rq1->lock)
9410 +       __acquires(rq2->lock)
9411 +{
9412 +       BUG_ON(!irqs_disabled());
9413 +       if (rq1 == rq2) {
9414 +               spin_lock(&rq1->lock);
9415 +               __acquire(rq2->lock);   /* Fake it out ;) */
9416 +       } else {
9417 +               if (rq1 < rq2) {
9418 +                       spin_lock(&rq1->lock);
9419 +                       spin_lock(&rq2->lock);
9420 +               } else {
9421 +                       spin_lock(&rq2->lock);
9422 +                       spin_lock(&rq1->lock);
9423 +               }
9424 +       }
9425 +}
9426 +
9427 +/*
9428 + * double_rq_unlock - safely unlock two runqueues
9429 + *
9430 + * Note this does not restore interrupts like task_rq_unlock,
9431 + * you need to do so manually after calling.
9432 + */
9433 +static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
9434 +       __releases(rq1->lock)
9435 +       __releases(rq2->lock)
9436 +{
9437 +       spin_unlock(&rq1->lock);
9438 +       if (rq1 != rq2)
9439 +               spin_unlock(&rq2->lock);
9440 +       else
9441 +               __release(rq2->lock);
9442 +}
9443 +
9444 +/*
9445 + * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
9446 + */
9447 +static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
9448 +       __releases(this_rq->lock)
9449 +       __acquires(busiest->lock)
9450 +       __acquires(this_rq->lock)
9451 +{
9452 +       if (unlikely(!irqs_disabled())) {
9453 +               /* printk() doesn't work good under rq->lock */
9454 +               spin_unlock(&this_rq->lock);
9455 +               BUG_ON(1);
9456 +       }
9457 +       if (unlikely(!spin_trylock(&busiest->lock))) {
9458 +               if (busiest < this_rq) {
9459 +                       spin_unlock(&this_rq->lock);
9460 +                       spin_lock(&busiest->lock);
9461 +                       spin_lock(&this_rq->lock);
9462 +               } else
9463 +                       spin_lock(&busiest->lock);
9464 +       }
9465 +}
9466 +
9467 +/*
9468 + * If dest_cpu is allowed for this process, migrate the task to it.
9469 + * This is accomplished by forcing the cpu_allowed mask to only
9470 + * allow dest_cpu, which will force the cpu onto dest_cpu.  Then
9471 + * the cpu_allowed mask is restored.
9472 + */
9473 +static void sched_migrate_task(struct task_struct *p, int dest_cpu)
9474 +{
9475 +       struct migration_req req;
9476 +       unsigned long flags;
9477 +       struct rq *rq;
9478 +
9479 +       rq = task_rq_lock(p, &flags);
9480 +       if (!cpu_isset(dest_cpu, p->cpus_allowed)
9481 +           || unlikely(cpu_is_offline(dest_cpu)))
9482 +               goto out;
9483 +
9484 +       /* force the process onto the specified CPU */
9485 +       if (migrate_task(p, dest_cpu, &req)) {
9486 +               /* Need to wait for migration thread (might exit: take ref). */
9487 +               struct task_struct *mt = rq->migration_thread;
9488 +
9489 +               get_task_struct(mt);
9490 +               task_rq_unlock(rq, &flags);
9491 +               wake_up_process(mt);
9492 +               put_task_struct(mt);
9493 +               wait_for_completion(&req.done);
9494 +
9495 +               return;
9496 +       }
9497 +out:
9498 +       task_rq_unlock(rq, &flags);
9499 +}
9500 +
9501 +/*
9502 + * sched_exec - execve() is a valuable balancing opportunity, because at
9503 + * this point the task has the smallest effective memory and cache footprint.
9504 + */
9505 +void sched_exec(void)
9506 +{
9507 +       int new_cpu, this_cpu = get_cpu();
9508 +       new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
9509 +       put_cpu();
9510 +       if (new_cpu != this_cpu)
9511 +               sched_migrate_task(current, new_cpu);
9512 +}
9513 +
9514 +/*
9515 + * pull_task - move a task from a remote runqueue to the local runqueue.
9516 + * Both runqueues must be locked.
9517 + */
9518 +static void pull_task(struct rq *src_rq, struct prio_array *src_array,
9519 +                     struct task_struct *p, struct rq *this_rq,
9520 +                     struct prio_array *this_array, int this_cpu)
9521 +{
9522 +       dequeue_task(p, src_array);
9523 +       dec_nr_running(p, src_rq);
9524 +       set_task_cpu(p, this_cpu);
9525 +       inc_nr_running(p, this_rq);
9526 +       enqueue_task(p, this_array);
9527 +       p->timestamp = (p->timestamp - src_rq->most_recent_timestamp)
9528 +                               + this_rq->most_recent_timestamp;
9529 +       /*
9530 +        * Note that idle threads have a prio of MAX_PRIO, for this test
9531 +        * to be always true for them.
9532 +        */
9533 +       if (TASK_PREEMPTS_CURR(p, this_rq))
9534 +               resched_task(this_rq->curr);
9535 +}
9536 +
9537 +/*
9538 + * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
9539 + */
9540 +static
9541 +int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
9542 +                    struct sched_domain *sd, enum idle_type idle,
9543 +                    int *all_pinned)
9544 +{
9545 +       /*
9546 +        * We do not migrate tasks that are:
9547 +        * 1) running (obviously), or
9548 +        * 2) cannot be migrated to this CPU due to cpus_allowed, or
9549 +        * 3) are cache-hot on their current CPU.
9550 +        */
9551 +       if (!cpu_isset(this_cpu, p->cpus_allowed))
9552 +               return 0;
9553 +       *all_pinned = 0;
9554 +
9555 +       if (task_running(rq, p))
9556 +               return 0;
9557 +
9558 +       /*
9559 +        * Aggressive migration if:
9560 +        * 1) task is cache cold, or
9561 +        * 2) too many balance attempts have failed.
9562 +        */
9563 +
9564 +       if (sd->nr_balance_failed > sd->cache_nice_tries) {
9565 +#ifdef CONFIG_SCHEDSTATS
9566 +               if (task_hot(p, rq->most_recent_timestamp, sd))
9567 +                       schedstat_inc(sd, lb_hot_gained[idle]);
9568 +#endif
9569 +               return 1;
9570 +       }
9571 +
9572 +       if (task_hot(p, rq->most_recent_timestamp, sd))
9573 +               return 0;
9574 +       return 1;
9575 +}
9576 +
9577 +#define rq_best_prio(rq) min((rq)->curr->prio, (rq)->best_expired_prio)
9578 +
9579 +/*
9580 + * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted
9581 + * load from busiest to this_rq, as part of a balancing operation within
9582 + * "domain". Returns the number of tasks moved.
9583 + *
9584 + * Called with both runqueues locked.
9585 + */
9586 +static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
9587 +                     unsigned long max_nr_move, unsigned long max_load_move,
9588 +                     struct sched_domain *sd, enum idle_type idle,
9589 +                     int *all_pinned)
9590 +{
9591 +       int idx, pulled = 0, pinned = 0, this_best_prio, best_prio,
9592 +           best_prio_seen, skip_for_load;
9593 +       struct prio_array *array, *dst_array;
9594 +       struct list_head *head, *curr;
9595 +       struct task_struct *tmp;
9596 +       long rem_load_move;
9597 +
9598 +       if (max_nr_move == 0 || max_load_move == 0)
9599 +               goto out;
9600 +
9601 +       rem_load_move = max_load_move;
9602 +       pinned = 1;
9603 +       this_best_prio = rq_best_prio(this_rq);
9604 +       best_prio = rq_best_prio(busiest);
9605 +       /*
9606 +        * Enable handling of the case where there is more than one task
9607 +        * with the best priority.   If the current running task is one
9608 +        * of those with prio==best_prio we know it won't be moved
9609 +        * and therefore it's safe to override the skip (based on load) of
9610 +        * any task we find with that prio.
9611 +        */
9612 +       best_prio_seen = best_prio == busiest->curr->prio;
9613 +
9614 +       /*
9615 +        * We first consider expired tasks. Those will likely not be
9616 +        * executed in the near future, and they are most likely to
9617 +        * be cache-cold, thus switching CPUs has the least effect
9618 +        * on them.
9619 +        */
9620 +       if (busiest->expired->nr_active) {
9621 +               array = busiest->expired;
9622 +               dst_array = this_rq->expired;
9623 +       } else {
9624 +               array = busiest->active;
9625 +               dst_array = this_rq->active;
9626 +       }
9627 +
9628 +new_array:
9629 +       /* Start searching at priority 0: */
9630 +       idx = 0;
9631 +skip_bitmap:
9632 +       if (!idx)
9633 +               idx = sched_find_first_bit(array->bitmap);
9634 +       else
9635 +               idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
9636 +       if (idx >= MAX_PRIO) {
9637 +               if (array == busiest->expired && busiest->active->nr_active) {
9638 +                       array = busiest->active;
9639 +                       dst_array = this_rq->active;
9640 +                       goto new_array;
9641 +               }
9642 +               goto out;
9643 +       }
9644 +
9645 +       head = array->queue + idx;
9646 +       curr = head->prev;
9647 +skip_queue:
9648 +       tmp = list_entry(curr, struct task_struct, run_list);
9649 +
9650 +       curr = curr->prev;
9651 +
9652 +       /*
9653 +        * To help distribute high priority tasks accross CPUs we don't
9654 +        * skip a task if it will be the highest priority task (i.e. smallest
9655 +        * prio value) on its new queue regardless of its load weight
9656 +        */
9657 +       skip_for_load = tmp->load_weight > rem_load_move;
9658 +       if (skip_for_load && idx < this_best_prio)
9659 +               skip_for_load = !best_prio_seen && idx == best_prio;
9660 +       if (skip_for_load ||
9661 +           !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
9662 +
9663 +               best_prio_seen |= idx == best_prio;
9664 +               if (curr != head)
9665 +                       goto skip_queue;
9666 +               idx++;
9667 +               goto skip_bitmap;
9668 +       }
9669 +
9670 +       pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
9671 +       pulled++;
9672 +       rem_load_move -= tmp->load_weight;
9673 +
9674 +       /*
9675 +        * We only want to steal up to the prescribed number of tasks
9676 +        * and the prescribed amount of weighted load.
9677 +        */
9678 +       if (pulled < max_nr_move && rem_load_move > 0) {
9679 +               if (idx < this_best_prio)
9680 +                       this_best_prio = idx;
9681 +               if (curr != head)
9682 +                       goto skip_queue;
9683 +               idx++;
9684 +               goto skip_bitmap;
9685 +       }
9686 +out:
9687 +       /*
9688 +        * Right now, this is the only place pull_task() is called,
9689 +        * so we can safely collect pull_task() stats here rather than
9690 +        * inside pull_task().
9691 +        */
9692 +       schedstat_add(sd, lb_gained[idle], pulled);
9693 +
9694 +       if (all_pinned)
9695 +               *all_pinned = pinned;
9696 +       return pulled;
9697 +}
9698 +
9699 +/*
9700 + * find_busiest_group finds and returns the busiest CPU group within the
9701 + * domain. It calculates and returns the amount of weighted load which
9702 + * should be moved to restore balance via the imbalance parameter.
9703 + */
9704 +static struct sched_group *
9705 +find_busiest_group(struct sched_domain *sd, int this_cpu,
9706 +                  unsigned long *imbalance, enum idle_type idle, int *sd_idle,
9707 +                  cpumask_t *cpus, int *balance)
9708 +{
9709 +       struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
9710 +       unsigned long max_load, avg_load, total_load, this_load, total_pwr;
9711 +       unsigned long max_pull;
9712 +       unsigned long busiest_load_per_task, busiest_nr_running;
9713 +       unsigned long this_load_per_task, this_nr_running;
9714 +       int load_idx;
9715 +#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
9716 +       int power_savings_balance = 1;
9717 +       unsigned long leader_nr_running = 0, min_load_per_task = 0;
9718 +       unsigned long min_nr_running = ULONG_MAX;
9719 +       struct sched_group *group_min = NULL, *group_leader = NULL;
9720 +#endif
9721 +
9722 +       max_load = this_load = total_load = total_pwr = 0;
9723 +       busiest_load_per_task = busiest_nr_running = 0;
9724 +       this_load_per_task = this_nr_running = 0;
9725 +       if (idle == NOT_IDLE)
9726 +               load_idx = sd->busy_idx;
9727 +       else if (idle == NEWLY_IDLE)
9728 +               load_idx = sd->newidle_idx;
9729 +       else
9730 +               load_idx = sd->idle_idx;
9731 +
9732 +       do {
9733 +               unsigned long load, group_capacity;
9734 +               int local_group;
9735 +               int i;
9736 +               unsigned int balance_cpu = -1, first_idle_cpu = 0;
9737 +               unsigned long sum_nr_running, sum_weighted_load;
9738 +
9739 +               local_group = cpu_isset(this_cpu, group->cpumask);
9740 +
9741 +               if (local_group)
9742 +                       balance_cpu = first_cpu(group->cpumask);
9743 +
9744 +               /* Tally up the load of all CPUs in the group */
9745 +               sum_weighted_load = sum_nr_running = avg_load = 0;
9746 +
9747 +               for_each_cpu_mask(i, group->cpumask) {
9748 +                       struct rq *rq;
9749 +
9750 +                       if (!cpu_isset(i, *cpus))
9751 +                               continue;
9752 +
9753 +                       rq = cpu_rq(i);
9754 +
9755 +                       if (*sd_idle && !idle_cpu(i))
9756 +                               *sd_idle = 0;
9757 +
9758 +                       /* Bias balancing toward cpus of our domain */
9759 +                       if (local_group) {
9760 +                               if (idle_cpu(i) && !first_idle_cpu) {
9761 +                                       first_idle_cpu = 1;
9762 +                                       balance_cpu = i;
9763 +                               }
9764 +
9765 +                               load = target_load(i, load_idx);
9766 +                       } else
9767 +                               load = source_load(i, load_idx);
9768 +
9769 +                       avg_load += load;
9770 +                       sum_nr_running += rq->nr_running;
9771 +                       sum_weighted_load += rq->raw_weighted_load;
9772 +               }
9773 +
9774 +               /*
9775 +                * First idle cpu or the first cpu(busiest) in this sched group
9776 +                * is eligible for doing load balancing at this and above
9777 +                * domains.
9778 +                */
9779 +               if (local_group && balance_cpu != this_cpu && balance) {
9780 +                       *balance = 0;
9781 +                       goto ret;
9782 +               }
9783 +
9784 +               total_load += avg_load;
9785 +               total_pwr += group->__cpu_power;
9786 +
9787 +               /* Adjust by relative CPU power of the group */
9788 +               avg_load = sg_div_cpu_power(group,
9789 +                               avg_load * SCHED_LOAD_SCALE);
9790 +
9791 +               group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
9792 +
9793 +               if (local_group) {
9794 +                       this_load = avg_load;
9795 +                       this = group;
9796 +                       this_nr_running = sum_nr_running;
9797 +                       this_load_per_task = sum_weighted_load;
9798 +               } else if (avg_load > max_load &&
9799 +                          sum_nr_running > group_capacity) {
9800 +                       max_load = avg_load;
9801 +                       busiest = group;
9802 +                       busiest_nr_running = sum_nr_running;
9803 +                       busiest_load_per_task = sum_weighted_load;
9804 +               }
9805 +
9806 +#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
9807 +               /*
9808 +                * Busy processors will not participate in power savings
9809 +                * balance.
9810 +                */
9811 +               if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
9812 +                       goto group_next;
9813 +
9814 +               /*
9815 +                * If the local group is idle or completely loaded
9816 +                * no need to do power savings balance at this domain
9817 +                */
9818 +               if (local_group && (this_nr_running >= group_capacity ||
9819 +                                   !this_nr_running))
9820 +                       power_savings_balance = 0;
9821 +
9822 +               /*
9823 +                * If a group is already running at full capacity or idle,
9824 +                * don't include that group in power savings calculations
9825 +                */
9826 +               if (!power_savings_balance || sum_nr_running >= group_capacity
9827 +                   || !sum_nr_running)
9828 +                       goto group_next;
9829 +
9830 +               /*
9831 +                * Calculate the group which has the least non-idle load.
9832 +                * This is the group from where we need to pick up the load
9833 +                * for saving power
9834 +                */
9835 +               if ((sum_nr_running < min_nr_running) ||
9836 +                   (sum_nr_running == min_nr_running &&
9837 +                    first_cpu(group->cpumask) <
9838 +                    first_cpu(group_min->cpumask))) {
9839 +                       group_min = group;
9840 +                       min_nr_running = sum_nr_running;
9841 +                       min_load_per_task = sum_weighted_load /
9842 +                                               sum_nr_running;
9843 +               }
9844 +
9845 +               /*
9846 +                * Calculate the group which is almost near its
9847 +                * capacity but still has some space to pick up some load
9848 +                * from other group and save more power
9849 +                */
9850 +               if (sum_nr_running <= group_capacity - 1) {
9851 +                       if (sum_nr_running > leader_nr_running ||
9852 +                           (sum_nr_running == leader_nr_running &&
9853 +                            first_cpu(group->cpumask) >
9854 +                             first_cpu(group_leader->cpumask))) {
9855 +                               group_leader = group;
9856 +                               leader_nr_running = sum_nr_running;
9857 +                       }
9858 +               }
9859 +group_next:
9860 +#endif
9861 +               group = group->next;
9862 +       } while (group != sd->groups);
9863 +
9864 +       if (!busiest || this_load >= max_load || busiest_nr_running == 0)
9865 +               goto out_balanced;
9866 +
9867 +       avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
9868 +
9869 +       if (this_load >= avg_load ||
9870 +                       100*max_load <= sd->imbalance_pct*this_load)
9871 +               goto out_balanced;
9872 +
9873 +       busiest_load_per_task /= busiest_nr_running;
9874 +       /*
9875 +        * We're trying to get all the cpus to the average_load, so we don't
9876 +        * want to push ourselves above the average load, nor do we wish to
9877 +        * reduce the max loaded cpu below the average load, as either of these
9878 +        * actions would just result in more rebalancing later, and ping-pong
9879 +        * tasks around. Thus we look for the minimum possible imbalance.
9880 +        * Negative imbalances (*we* are more loaded than anyone else) will
9881 +        * be counted as no imbalance for these purposes -- we can't fix that
9882 +        * by pulling tasks to us.  Be careful of negative numbers as they'll
9883 +        * appear as very large values with unsigned longs.
9884 +        */
9885 +       if (max_load <= busiest_load_per_task)
9886 +               goto out_balanced;
9887 +
9888 +       /*
9889 +        * In the presence of smp nice balancing, certain scenarios can have
9890 +        * max load less than avg load(as we skip the groups at or below
9891 +        * its cpu_power, while calculating max_load..)
9892 +        */
9893 +       if (max_load < avg_load) {
9894 +               *imbalance = 0;
9895 +               goto small_imbalance;
9896 +       }
9897 +
9898 +       /* Don't want to pull so many tasks that a group would go idle */
9899 +       max_pull = min(max_load - avg_load, max_load - busiest_load_per_task);
9900 +
9901 +       /* How much load to actually move to equalise the imbalance */
9902 +       *imbalance = min(max_pull * busiest->__cpu_power,
9903 +                               (avg_load - this_load) * this->__cpu_power)
9904 +                       / SCHED_LOAD_SCALE;
9905 +
9906 +       /*
9907 +        * if *imbalance is less than the average load per runnable task
9908 +        * there is no gaurantee that any tasks will be moved so we'll have
9909 +        * a think about bumping its value to force at least one task to be
9910 +        * moved
9911 +        */
9912 +       if (*imbalance < busiest_load_per_task) {
9913 +               unsigned long tmp, pwr_now, pwr_move;
9914 +               unsigned int imbn;
9915 +
9916 +small_imbalance:
9917 +               pwr_move = pwr_now = 0;
9918 +               imbn = 2;
9919 +               if (this_nr_running) {
9920 +                       this_load_per_task /= this_nr_running;
9921 +                       if (busiest_load_per_task > this_load_per_task)
9922 +                               imbn = 1;
9923 +               } else
9924 +                       this_load_per_task = SCHED_LOAD_SCALE;
9925 +
9926 +               if (max_load - this_load >= busiest_load_per_task * imbn) {
9927 +                       *imbalance = busiest_load_per_task;
9928 +                       return busiest;
9929 +               }
9930 +
9931 +               /*
9932 +                * OK, we don't have enough imbalance to justify moving tasks,
9933 +                * however we may be able to increase total CPU power used by
9934 +                * moving them.
9935 +                */
9936 +
9937 +               pwr_now += busiest->__cpu_power *
9938 +                               min(busiest_load_per_task, max_load);
9939 +               pwr_now += this->__cpu_power *
9940 +                               min(this_load_per_task, this_load);
9941 +               pwr_now /= SCHED_LOAD_SCALE;
9942 +
9943 +               /* Amount of load we'd subtract */
9944 +               tmp = sg_div_cpu_power(busiest,
9945 +                               busiest_load_per_task * SCHED_LOAD_SCALE);
9946 +               if (max_load > tmp)
9947 +                       pwr_move += busiest->__cpu_power *
9948 +                               min(busiest_load_per_task, max_load - tmp);
9949 +
9950 +               /* Amount of load we'd add */
9951 +               if (max_load * busiest->__cpu_power <
9952 +                               busiest_load_per_task * SCHED_LOAD_SCALE)
9953 +                       tmp = sg_div_cpu_power(this,
9954 +                                       max_load * busiest->__cpu_power);
9955 +               else
9956 +                       tmp = sg_div_cpu_power(this,
9957 +                               busiest_load_per_task * SCHED_LOAD_SCALE);
9958 +               pwr_move += this->__cpu_power *
9959 +                               min(this_load_per_task, this_load + tmp);
9960 +               pwr_move /= SCHED_LOAD_SCALE;
9961 +
9962 +               /* Move if we gain throughput */
9963 +               if (pwr_move <= pwr_now)
9964 +                       goto out_balanced;
9965 +
9966 +               *imbalance = busiest_load_per_task;
9967 +       }
9968 +
9969 +       return busiest;
9970 +
9971 +out_balanced:
9972 +#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
9973 +       if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
9974 +               goto ret;
9975 +
9976 +       if (this == group_leader && group_leader != group_min) {
9977 +               *imbalance = min_load_per_task;
9978 +               return group_min;
9979 +       }
9980 +#endif
9981 +ret:
9982 +       *imbalance = 0;
9983 +       return NULL;
9984 +}
9985 +
9986 +/*
9987 + * find_busiest_queue - find the busiest runqueue among the cpus in group.
9988 + */
9989 +static struct rq *
9990 +find_busiest_queue(struct sched_group *group, enum idle_type idle,
9991 +                  unsigned long imbalance, cpumask_t *cpus)
9992 +{
9993 +       struct rq *busiest = NULL, *rq;
9994 +       unsigned long max_load = 0;
9995 +       int i;
9996 +
9997 +       for_each_cpu_mask(i, group->cpumask) {
9998 +
9999 +               if (!cpu_isset(i, *cpus))
10000 +                       continue;
10001 +
10002 +               rq = cpu_rq(i);
10003 +
10004 +               if (rq->nr_running == 1 && rq->raw_weighted_load > imbalance)
10005 +                       continue;
10006 +
10007 +               if (rq->raw_weighted_load > max_load) {
10008 +                       max_load = rq->raw_weighted_load;
10009 +                       busiest = rq;
10010 +               }
10011 +       }
10012 +
10013 +       return busiest;
10014 +}
10015 +
10016 +/*
10017 + * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
10018 + * so long as it is large enough.
10019 + */
10020 +#define MAX_PINNED_INTERVAL    512
10021 +
10022 +static inline unsigned long minus_1_or_zero(unsigned long n)
10023 +{
10024 +       return n > 0 ? n - 1 : 0;
10025 +}
10026 +
10027 +/*
10028 + * Check this_cpu to ensure it is balanced within domain. Attempt to move
10029 + * tasks if there is an imbalance.
10030 + */
10031 +static int load_balance(int this_cpu, struct rq *this_rq,
10032 +                       struct sched_domain *sd, enum idle_type idle,
10033 +                       int *balance)
10034 +{
10035 +       int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
10036 +       struct sched_group *group;
10037 +       unsigned long imbalance;
10038 +       struct rq *busiest;
10039 +       cpumask_t cpus = CPU_MASK_ALL;
10040 +       unsigned long flags;
10041 +
10042 +       /*
10043 +        * When power savings policy is enabled for the parent domain, idle
10044 +        * sibling can pick up load irrespective of busy siblings. In this case,
10045 +        * let the state of idle sibling percolate up as IDLE, instead of
10046 +        * portraying it as NOT_IDLE.
10047 +        */
10048 +       if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
10049 +           !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
10050 +               sd_idle = 1;
10051 +
10052 +       schedstat_inc(sd, lb_cnt[idle]);
10053 +
10054 +redo:
10055 +       group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
10056 +                                  &cpus, balance);
10057 +
10058 +       if (*balance == 0)
10059 +               goto out_balanced;
10060 +
10061 +       if (!group) {
10062 +               schedstat_inc(sd, lb_nobusyg[idle]);
10063 +               goto out_balanced;
10064 +       }
10065 +
10066 +       busiest = find_busiest_queue(group, idle, imbalance, &cpus);
10067 +       if (!busiest) {
10068 +               schedstat_inc(sd, lb_nobusyq[idle]);
10069 +               goto out_balanced;
10070 +       }
10071 +
10072 +       BUG_ON(busiest == this_rq);
10073 +
10074 +       schedstat_add(sd, lb_imbalance[idle], imbalance);
10075 +
10076 +       nr_moved = 0;
10077 +       if (busiest->nr_running > 1) {
10078 +               /*
10079 +                * Attempt to move tasks. If find_busiest_group has found
10080 +                * an imbalance but busiest->nr_running <= 1, the group is
10081 +                * still unbalanced. nr_moved simply stays zero, so it is
10082 +                * correctly treated as an imbalance.
10083 +                */
10084 +               local_irq_save(flags);
10085 +               double_rq_lock(this_rq, busiest);
10086 +               nr_moved = move_tasks(this_rq, this_cpu, busiest,
10087 +                                     minus_1_or_zero(busiest->nr_running),
10088 +                                     imbalance, sd, idle, &all_pinned);
10089 +               double_rq_unlock(this_rq, busiest);
10090 +               local_irq_restore(flags);
10091 +
10092 +               /*
10093 +                * some other cpu did the load balance for us.
10094 +                */
10095 +               if (nr_moved && this_cpu != smp_processor_id())
10096 +                       resched_cpu(this_cpu);
10097 +
10098 +               /* All tasks on this runqueue were pinned by CPU affinity */
10099 +               if (unlikely(all_pinned)) {
10100 +                       cpu_clear(cpu_of(busiest), cpus);
10101 +                       if (!cpus_empty(cpus))
10102 +                               goto redo;
10103 +                       goto out_balanced;
10104 +               }
10105 +       }
10106 +
10107 +       if (!nr_moved) {
10108 +               schedstat_inc(sd, lb_failed[idle]);
10109 +               sd->nr_balance_failed++;
10110 +
10111 +               if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
10112 +
10113 +                       spin_lock_irqsave(&busiest->lock, flags);
10114 +
10115 +                       /* don't kick the migration_thread, if the curr
10116 +                        * task on busiest cpu can't be moved to this_cpu
10117 +                        */
10118 +                       if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
10119 +                               spin_unlock_irqrestore(&busiest->lock, flags);
10120 +                               all_pinned = 1;
10121 +                               goto out_one_pinned;
10122 +                       }
10123 +
10124 +                       if (!busiest->active_balance) {
10125 +                               busiest->active_balance = 1;
10126 +                               busiest->push_cpu = this_cpu;
10127 +                               active_balance = 1;
10128 +                       }
10129 +                       spin_unlock_irqrestore(&busiest->lock, flags);
10130 +                       if (active_balance)
10131 +                               wake_up_process(busiest->migration_thread);
10132 +
10133 +                       /*
10134 +                        * We've kicked active balancing, reset the failure
10135 +                        * counter.
10136 +                        */
10137 +                       sd->nr_balance_failed = sd->cache_nice_tries+1;
10138 +               }
10139 +       } else
10140 +               sd->nr_balance_failed = 0;
10141 +
10142 +       if (likely(!active_balance)) {
10143 +               /* We were unbalanced, so reset the balancing interval */
10144 +               sd->balance_interval = sd->min_interval;
10145 +       } else {
10146 +               /*
10147 +                * If we've begun active balancing, start to back off. This
10148 +                * case may not be covered by the all_pinned logic if there
10149 +                * is only 1 task on the busy runqueue (because we don't call
10150 +                * move_tasks).
10151 +                */
10152 +               if (sd->balance_interval < sd->max_interval)
10153 +                       sd->balance_interval *= 2;
10154 +       }
10155 +
10156 +       if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
10157 +           !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
10158 +               return -1;
10159 +       return nr_moved;
10160 +
10161 +out_balanced:
10162 +       schedstat_inc(sd, lb_balanced[idle]);
10163 +
10164 +       sd->nr_balance_failed = 0;
10165 +
10166 +out_one_pinned:
10167 +       /* tune up the balancing interval */
10168 +       if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
10169 +                       (sd->balance_interval < sd->max_interval))
10170 +               sd->balance_interval *= 2;
10171 +
10172 +       if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
10173 +           !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
10174 +               return -1;
10175 +       return 0;
10176 +}
10177 +
10178 +/*
10179 + * Check this_cpu to ensure it is balanced within domain. Attempt to move
10180 + * tasks if there is an imbalance.
10181 + *
10182 + * Called from schedule when this_rq is about to become idle (NEWLY_IDLE).
10183 + * this_rq is locked.
10184 + */
10185 +static int
10186 +load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
10187 +{
10188 +       struct sched_group *group;
10189 +       struct rq *busiest = NULL;
10190 +       unsigned long imbalance;
10191 +       int nr_moved = 0;
10192 +       int sd_idle = 0;
10193 +       cpumask_t cpus = CPU_MASK_ALL;
10194 +
10195 +       /*
10196 +        * When power savings policy is enabled for the parent domain, idle
10197 +        * sibling can pick up load irrespective of busy siblings. In this case,
10198 +        * let the state of idle sibling percolate up as IDLE, instead of
10199 +        * portraying it as NOT_IDLE.
10200 +        */
10201 +       if (sd->flags & SD_SHARE_CPUPOWER &&
10202 +           !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
10203 +               sd_idle = 1;
10204 +
10205 +       schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
10206 +redo:
10207 +       group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE,
10208 +                                  &sd_idle, &cpus, NULL);
10209 +       if (!group) {
10210 +               schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
10211 +               goto out_balanced;
10212 +       }
10213 +
10214 +       busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance,
10215 +                               &cpus);
10216 +       if (!busiest) {
10217 +               schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
10218 +               goto out_balanced;
10219 +       }
10220 +
10221 +       BUG_ON(busiest == this_rq);
10222 +
10223 +       schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
10224 +
10225 +       nr_moved = 0;
10226 +       if (busiest->nr_running > 1) {
10227 +               /* Attempt to move tasks */
10228 +               double_lock_balance(this_rq, busiest);
10229 +               nr_moved = move_tasks(this_rq, this_cpu, busiest,
10230 +                                       minus_1_or_zero(busiest->nr_running),
10231 +                                       imbalance, sd, NEWLY_IDLE, NULL);
10232 +               spin_unlock(&busiest->lock);
10233 +
10234 +               if (!nr_moved) {
10235 +                       cpu_clear(cpu_of(busiest), cpus);
10236 +                       if (!cpus_empty(cpus))
10237 +                               goto redo;
10238 +               }
10239 +       }
10240 +
10241 +       if (!nr_moved) {
10242 +               schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
10243 +               if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
10244 +                   !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
10245 +                       return -1;
10246 +       } else
10247 +               sd->nr_balance_failed = 0;
10248 +
10249 +       return nr_moved;
10250 +
10251 +out_balanced:
10252 +       schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
10253 +       if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
10254 +           !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
10255 +               return -1;
10256 +       sd->nr_balance_failed = 0;
10257 +
10258 +       return 0;
10259 +}
10260 +
10261 +/*
10262 + * idle_balance is called by schedule() if this_cpu is about to become
10263 + * idle. Attempts to pull tasks from other CPUs.
10264 + */
10265 +static void idle_balance(int this_cpu, struct rq *this_rq)
10266 +{
10267 +       struct sched_domain *sd;
10268 +       int pulled_task = 0;
10269 +       unsigned long next_balance = jiffies + 60 *  HZ;
10270 +
10271 +       for_each_domain(this_cpu, sd) {
10272 +               unsigned long interval;
10273 +
10274 +               if (!(sd->flags & SD_LOAD_BALANCE))
10275 +                       continue;
10276 +
10277 +               if (sd->flags & SD_BALANCE_NEWIDLE)
10278 +                       /* If we've pulled tasks over stop searching: */
10279 +                       pulled_task = load_balance_newidle(this_cpu,
10280 +                                                               this_rq, sd);
10281 +
10282 +               interval = msecs_to_jiffies(sd->balance_interval);
10283 +               if (time_after(next_balance, sd->last_balance + interval))
10284 +                       next_balance = sd->last_balance + interval;
10285 +               if (pulled_task)
10286 +                       break;
10287 +       }
10288 +       if (!pulled_task)
10289 +               /*
10290 +                * We are going idle. next_balance may be set based on
10291 +                * a busy processor. So reset next_balance.
10292 +                */
10293 +               this_rq->next_balance = next_balance;
10294 +}
10295 +
10296 +/*
10297 + * active_load_balance is run by migration threads. It pushes running tasks
10298 + * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
10299 + * running on each physical CPU where possible, and avoids physical /
10300 + * logical imbalances.
10301 + *
10302 + * Called with busiest_rq locked.
10303 + */
10304 +static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
10305 +{
10306 +       int target_cpu = busiest_rq->push_cpu;
10307 +       struct sched_domain *sd;
10308 +       struct rq *target_rq;
10309 +
10310 +       /* Is there any task to move? */
10311 +       if (busiest_rq->nr_running <= 1)
10312 +               return;
10313 +
10314 +       target_rq = cpu_rq(target_cpu);
10315 +
10316 +       /*
10317 +        * This condition is "impossible", if it occurs
10318 +        * we need to fix it.  Originally reported by
10319 +        * Bjorn Helgaas on a 128-cpu setup.
10320 +        */
10321 +       BUG_ON(busiest_rq == target_rq);
10322 +
10323 +       /* move a task from busiest_rq to target_rq */
10324 +       double_lock_balance(busiest_rq, target_rq);
10325 +
10326 +       /* Search for an sd spanning us and the target CPU. */
10327 +       for_each_domain(target_cpu, sd) {
10328 +               if ((sd->flags & SD_LOAD_BALANCE) &&
10329 +                   cpu_isset(busiest_cpu, sd->span))
10330 +                               break;
10331 +       }
10332 +
10333 +       if (likely(sd)) {
10334 +               schedstat_inc(sd, alb_cnt);
10335 +
10336 +               if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
10337 +                              RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE,
10338 +                              NULL))
10339 +                       schedstat_inc(sd, alb_pushed);
10340 +               else
10341 +                       schedstat_inc(sd, alb_failed);
10342 +       }
10343 +       spin_unlock(&target_rq->lock);
10344 +}
10345 +
10346 +static void update_load(struct rq *this_rq)
10347 +{
10348 +       unsigned long this_load;
10349 +       unsigned int i, scale;
10350 +
10351 +       this_load = this_rq->raw_weighted_load;
10352 +
10353 +       /* Update our load: */
10354 +       for (i = 0, scale = 1; i < 3; i++, scale += scale) {
10355 +               unsigned long old_load, new_load;
10356 +
10357 +               /* scale is effectively 1 << i now, and >> i divides by scale */
10358 +
10359 +               old_load = this_rq->cpu_load[i];
10360 +               new_load = this_load;
10361 +               /*
10362 +                * Round up the averaging division if load is increasing. This
10363 +                * prevents us from getting stuck on 9 if the load is 10, for
10364 +                * example.
10365 +                */
10366 +               if (new_load > old_load)
10367 +                       new_load += scale-1;
10368 +               this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
10369 +       }
10370 +}
10371 +
10372 +#ifdef CONFIG_NO_HZ
10373 +static struct {
10374 +       atomic_t load_balancer;
10375 +       cpumask_t  cpu_mask;
10376 +} nohz ____cacheline_aligned = {
10377 +       .load_balancer = ATOMIC_INIT(-1),
10378 +       .cpu_mask = CPU_MASK_NONE,
10379 +};
10380 +
10381 +/*
10382 + * This routine will try to nominate the ilb (idle load balancing)
10383 + * owner among the cpus whose ticks are stopped. ilb owner will do the idle
10384 + * load balancing on behalf of all those cpus. If all the cpus in the system
10385 + * go into this tickless mode, then there will be no ilb owner (as there is
10386 + * no need for one) and all the cpus will sleep till the next wakeup event
10387 + * arrives...
10388 + *
10389 + * For the ilb owner, tick is not stopped. And this tick will be used
10390 + * for idle load balancing. ilb owner will still be part of
10391 + * nohz.cpu_mask..
10392 + *
10393 + * While stopping the tick, this cpu will become the ilb owner if there
10394 + * is no other owner. And will be the owner till that cpu becomes busy
10395 + * or if all cpus in the system stop their ticks at which point
10396 + * there is no need for ilb owner.
10397 + *
10398 + * When the ilb owner becomes busy, it nominates another owner, during the
10399 + * next busy scheduler_tick()
10400 + */
10401 +int select_nohz_load_balancer(int stop_tick)
10402 +{
10403 +       int cpu = smp_processor_id();
10404 +
10405 +       if (stop_tick) {
10406 +               cpu_set(cpu, nohz.cpu_mask);
10407 +               cpu_rq(cpu)->in_nohz_recently = 1;
10408 +
10409 +               /*
10410 +                * If we are going offline and still the leader, give up!
10411 +                */
10412 +               if (cpu_is_offline(cpu) &&
10413 +                   atomic_read(&nohz.load_balancer) == cpu) {
10414 +                       if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
10415 +                               BUG();
10416 +                       return 0;
10417 +               }
10418 +
10419 +               /* time for ilb owner also to sleep */
10420 +               if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) {
10421 +                       if (atomic_read(&nohz.load_balancer) == cpu)
10422 +                               atomic_set(&nohz.load_balancer, -1);
10423 +                       return 0;
10424 +               }
10425 +
10426 +               if (atomic_read(&nohz.load_balancer) == -1) {
10427 +                       /* make me the ilb owner */
10428 +                       if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
10429 +                               return 1;
10430 +               } else if (atomic_read(&nohz.load_balancer) == cpu)
10431 +                       return 1;
10432 +       } else {
10433 +               if (!cpu_isset(cpu, nohz.cpu_mask))
10434 +                       return 0;
10435 +
10436 +               cpu_clear(cpu, nohz.cpu_mask);
10437 +
10438 +               if (atomic_read(&nohz.load_balancer) == cpu)
10439 +                       if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
10440 +                               BUG();
10441 +       }
10442 +       return 0;
10443 +}
10444 +#endif
10445 +
10446 +static DEFINE_SPINLOCK(balancing);
10447 +
10448 +/*
10449 + * It checks each scheduling domain to see if it is due to be balanced,
10450 + * and initiates a balancing operation if so.
10451 + *
10452 + * Balancing parameters are set up in arch_init_sched_domains.
10453 + */
10454 +static inline void rebalance_domains(int cpu, enum idle_type idle)
10455 +{
10456 +       int balance = 1;
10457 +       struct rq *rq = cpu_rq(cpu);
10458 +       unsigned long interval;
10459 +       struct sched_domain *sd;
10460 +       /* Earliest time when we have to do rebalance again */
10461 +       unsigned long next_balance = jiffies + 60*HZ;
10462 +
10463 +       for_each_domain(cpu, sd) {
10464 +               if (!(sd->flags & SD_LOAD_BALANCE))
10465 +                       continue;
10466 +
10467 +               interval = sd->balance_interval;
10468 +               if (idle != SCHED_IDLE)
10469 +                       interval *= sd->busy_factor;
10470 +
10471 +               /* scale ms to jiffies */
10472 +               interval = msecs_to_jiffies(interval);
10473 +               if (unlikely(!interval))
10474 +                       interval = 1;
10475 +
10476 +               if (sd->flags & SD_SERIALIZE) {
10477 +                       if (!spin_trylock(&balancing))
10478 +                               goto out;
10479 +               }
10480 +
10481 +               if (time_after_eq(jiffies, sd->last_balance + interval)) {
10482 +                       if (load_balance(cpu, rq, sd, idle, &balance)) {
10483 +                               /*
10484 +                                * We've pulled tasks over so either we're no
10485 +                                * longer idle, or one of our SMT siblings is
10486 +                                * not idle.
10487 +                                */
10488 +                               idle = NOT_IDLE;
10489 +                       }
10490 +                       sd->last_balance = jiffies;
10491 +               }
10492 +               if (sd->flags & SD_SERIALIZE)
10493 +                       spin_unlock(&balancing);
10494 +out:
10495 +               if (time_after(next_balance, sd->last_balance + interval))
10496 +                       next_balance = sd->last_balance + interval;
10497 +
10498 +               /*
10499 +                * Stop the load balance at this level. There is another
10500 +                * CPU in our sched group which is doing load balancing more
10501 +                * actively.
10502 +                */
10503 +               if (!balance)
10504 +                       break;
10505 +       }
10506 +       rq->next_balance = next_balance;
10507 +}
10508 +
10509 +/*
10510 + * run_rebalance_domains is triggered when needed from the scheduler tick.
10511 + * In CONFIG_NO_HZ case, the idle load balance owner will do the
10512 + * rebalancing for all the cpus for whom scheduler ticks are stopped.
10513 + */
10514 +static void run_rebalance_domains(struct softirq_action *h)
10515 +{
10516 +       int local_cpu = smp_processor_id();
10517 +       struct rq *local_rq = cpu_rq(local_cpu);
10518 +       enum idle_type idle = local_rq->idle_at_tick ? SCHED_IDLE : NOT_IDLE;
10519 +
10520 +       rebalance_domains(local_cpu, idle);
10521 +
10522 +#ifdef CONFIG_NO_HZ
10523 +       /*
10524 +        * If this cpu is the owner for idle load balancing, then do the
10525 +        * balancing on behalf of the other idle cpus whose ticks are
10526 +        * stopped.
10527 +        */
10528 +       if (local_rq->idle_at_tick &&
10529 +           atomic_read(&nohz.load_balancer) == local_cpu) {
10530 +               cpumask_t cpus = nohz.cpu_mask;
10531 +               struct rq *rq;
10532 +               int balance_cpu;
10533 +
10534 +               cpu_clear(local_cpu, cpus);
10535 +               for_each_cpu_mask(balance_cpu, cpus) {
10536 +                       /*
10537 +                        * If this cpu gets work to do, stop the load balancing
10538 +                        * work being done for other cpus. Next load
10539 +                        * balancing owner will pick it up.
10540 +                        */
10541 +                       if (need_resched())
10542 +                               break;
10543 +
10544 +                       rebalance_domains(balance_cpu, SCHED_IDLE);
10545 +
10546 +                       rq = cpu_rq(balance_cpu);
10547 +                       if (time_after(local_rq->next_balance, rq->next_balance))
10548 +                               local_rq->next_balance = rq->next_balance;
10549 +               }
10550 +       }
10551 +#endif
10552 +}
10553 +
10554 +/*
10555 + * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
10556 + *
10557 + * In case of CONFIG_NO_HZ, this is the place where we nominate a new
10558 + * idle load balancing owner or decide to stop the periodic load balancing,
10559 + * if the whole system is idle.
10560 + */
10561 +static inline void trigger_load_balance(int cpu)
10562 +{
10563 +       struct rq *rq = cpu_rq(cpu);
10564 +#ifdef CONFIG_NO_HZ
10565 +       /*
10566 +        * If we were in the nohz mode recently and busy at the current
10567 +        * scheduler tick, then check if we need to nominate new idle
10568 +        * load balancer.
10569 +        */
10570 +       if (rq->in_nohz_recently && !rq->idle_at_tick) {
10571 +               rq->in_nohz_recently = 0;
10572 +
10573 +               if (atomic_read(&nohz.load_balancer) == cpu) {
10574 +                       cpu_clear(cpu, nohz.cpu_mask);
10575 +                       atomic_set(&nohz.load_balancer, -1);
10576 +               }
10577 +
10578 +               if (atomic_read(&nohz.load_balancer) == -1) {
10579 +                       /*
10580 +                        * simple selection for now: Nominate the
10581 +                        * first cpu in the nohz list to be the next
10582 +                        * ilb owner.
10583 +                        *
10584 +                        * TBD: Traverse the sched domains and nominate
10585 +                        * the nearest cpu in the nohz.cpu_mask.
10586 +                        */
10587 +                       int ilb = first_cpu(nohz.cpu_mask);
10588 +
10589 +                       if (ilb != NR_CPUS)
10590 +                               resched_cpu(ilb);
10591 +               }
10592 +       }
10593 +
10594 +       /*
10595 +        * If this cpu is idle and doing idle load balancing for all the
10596 +        * cpus with ticks stopped, is it time for that to stop?
10597 +        */
10598 +       if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu &&
10599 +           cpus_weight(nohz.cpu_mask) == num_online_cpus()) {
10600 +               resched_cpu(cpu);
10601 +               return;
10602 +       }
10603 +
10604 +       /*
10605 +        * If this cpu is idle and the idle load balancing is done by
10606 +        * someone else, then no need raise the SCHED_SOFTIRQ
10607 +        */
10608 +       if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu &&
10609 +           cpu_isset(cpu, nohz.cpu_mask))
10610 +               return;
10611 +#endif
10612 +       if (time_after_eq(jiffies, rq->next_balance))
10613 +               raise_softirq(SCHED_SOFTIRQ);
10614 +}
10615 +#else
10616 +/*
10617 + * on UP we do not need to balance between CPUs:
10618 + */
10619 +static inline void idle_balance(int cpu, struct rq *rq)
10620 +{
10621 +}
10622 +#endif
10623 +
10624 +DEFINE_PER_CPU(struct kernel_stat, kstat);
10625 +
10626 +EXPORT_PER_CPU_SYMBOL(kstat);
10627 +
10628 +/*
10629 + * This is called on clock ticks and on context switches.
10630 + * Bank in p->sched_time the ns elapsed since the last tick or switch.
10631 + */
10632 +static inline void
10633 +update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now)
10634 +{
10635 +       p->sched_time += now - p->last_ran;
10636 +       p->last_ran = rq->most_recent_timestamp = now;
10637 +}
10638 +
10639 +/*
10640 + * Return current->sched_time plus any more ns on the sched_clock
10641 + * that have not yet been banked.
10642 + */
10643 +unsigned long long current_sched_time(const struct task_struct *p)
10644 +{
10645 +       unsigned long long ns;
10646 +       unsigned long flags;
10647 +
10648 +       local_irq_save(flags);
10649 +       ns = p->sched_time + sched_clock() - p->last_ran;
10650 +       local_irq_restore(flags);
10651 +
10652 +       return ns;
10653 +}
10654 +
10655 +/*
10656 + * We place interactive tasks back into the active array, if possible.
10657 + *
10658 + * To guarantee that this does not starve expired tasks we ignore the
10659 + * interactivity of a task if the first expired task had to wait more
10660 + * than a 'reasonable' amount of time. This deadline timeout is
10661 + * load-dependent, as the frequency of array switched decreases with
10662 + * increasing number of running tasks. We also ignore the interactivity
10663 + * if a better static_prio task has expired:
10664 + */
10665 +static inline int expired_starving(struct rq *rq)
10666 +{
10667 +       if (rq->curr->static_prio > rq->best_expired_prio)
10668 +               return 1;
10669 +       if (!STARVATION_LIMIT || !rq->expired_timestamp)
10670 +               return 0;
10671 +       if (jiffies - rq->expired_timestamp > STARVATION_LIMIT * rq->nr_running)
10672 +               return 1;
10673 +       return 0;
10674 +}
10675 +
10676 +/*
10677 + * Account user cpu time to a process.
10678 + * @p: the process that the cpu time gets accounted to
10679 + * @hardirq_offset: the offset to subtract from hardirq_count()
10680 + * @cputime: the cpu time spent in user space since the last update
10681 + */
10682 +void account_user_time(struct task_struct *p, cputime_t cputime)
10683 +{
10684 +       struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
10685 +       struct vx_info *vxi = p->vx_info;  /* p is _always_ current */
10686 +       cputime64_t tmp;
10687 +       int nice = (TASK_NICE(p) > 0);
10688 +
10689 +       p->utime = cputime_add(p->utime, cputime);
10690 +       vx_account_user(vxi, cputime, nice);
10691 +
10692 +       /* Add user time to cpustat. */
10693 +       tmp = cputime_to_cputime64(cputime);
10694 +       if (nice)
10695 +               cpustat->nice = cputime64_add(cpustat->nice, tmp);
10696 +       else
10697 +               cpustat->user = cputime64_add(cpustat->user, tmp);
10698 +}
10699 +
10700 +/*
10701 + * Account system cpu time to a process.
10702 + * @p: the process that the cpu time gets accounted to
10703 + * @hardirq_offset: the offset to subtract from hardirq_count()
10704 + * @cputime: the cpu time spent in kernel space since the last update
10705 + */
10706 +void account_system_time(struct task_struct *p, int hardirq_offset,
10707 +                        cputime_t cputime)
10708 +{
10709 +       struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
10710 +       struct vx_info *vxi = p->vx_info;  /* p is _always_ current */
10711 +       struct rq *rq = this_rq();
10712 +       cputime64_t tmp;
10713 +
10714 +       p->stime = cputime_add(p->stime, cputime);
10715 +       vx_account_system(vxi, cputime, (p == rq->idle));
10716 +
10717 +       /* Add system time to cpustat. */
10718 +       tmp = cputime_to_cputime64(cputime);
10719 +       if (hardirq_count() - hardirq_offset)
10720 +               cpustat->irq = cputime64_add(cpustat->irq, tmp);
10721 +       else if (softirq_count())
10722 +               cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
10723 +       else if (p != rq->idle)
10724 +               cpustat->system = cputime64_add(cpustat->system, tmp);
10725 +       else if (atomic_read(&rq->nr_iowait) > 0)
10726 +               cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
10727 +       else
10728 +               cpustat->idle = cputime64_add(cpustat->idle, tmp);
10729 +       /* Account for system time used */
10730 +       acct_update_integrals(p);
10731 +}
10732 +
10733 +/*
10734 + * Account for involuntary wait time.
10735 + * @p: the process from which the cpu time has been stolen
10736 + * @steal: the cpu time spent in involuntary wait
10737 + */
10738 +void account_steal_time(struct task_struct *p, cputime_t steal)
10739 +{
10740 +       struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
10741 +       cputime64_t tmp = cputime_to_cputime64(steal);
10742 +       struct rq *rq = this_rq();
10743 +
10744 +       if (p == rq->idle) {
10745 +               p->stime = cputime_add(p->stime, steal);
10746 +               if (atomic_read(&rq->nr_iowait) > 0)
10747 +                       cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
10748 +               else
10749 +                       cpustat->idle = cputime64_add(cpustat->idle, tmp);
10750 +       } else
10751 +               cpustat->steal = cputime64_add(cpustat->steal, tmp);
10752 +}
10753 +
10754 +static void task_running_tick(struct rq *rq, struct task_struct *p, int cpu)
10755 +{
10756 +       if (p->array != rq->active) {
10757 +               /* Task has expired but was not scheduled yet */
10758 +               set_tsk_need_resched(p);
10759 +               return;
10760 +       }
10761 +       spin_lock(&rq->lock);
10762 +       /*
10763 +        * The task was running during this tick - update the
10764 +        * time slice counter. Note: we do not update a thread's
10765 +        * priority until it either goes to sleep or uses up its
10766 +        * timeslice. This makes it possible for interactive tasks
10767 +        * to use up their timeslices at their highest priority levels.
10768 +        */
10769 +       if (rt_task(p)) {
10770 +               /*
10771 +                * RR tasks need a special form of timeslice management.
10772 +                * FIFO tasks have no timeslices.
10773 +                */
10774 +               if ((p->policy == SCHED_RR) && !--p->time_slice) {
10775 +                       p->time_slice = task_timeslice(p);
10776 +                       p->first_time_slice = 0;
10777 +                       set_tsk_need_resched(p);
10778 +
10779 +                       /* put it at the end of the queue: */
10780 +                       requeue_task(p, rq->active);
10781 +               }
10782 +               goto out_unlock;
10783 +       }
10784 +       if (vx_need_resched(p, --p->time_slice, cpu)) {
10785 +               dequeue_task(p, rq->active);
10786 +               set_tsk_need_resched(p);
10787 +               p->prio = effective_prio(p);
10788 +               p->time_slice = task_timeslice(p);
10789 +               p->first_time_slice = 0;
10790 +
10791 +               if (!rq->expired_timestamp)
10792 +                       rq->expired_timestamp = jiffies;
10793 +               if (!TASK_INTERACTIVE(p) || expired_starving(rq)) {
10794 +                       enqueue_task(p, rq->expired);
10795 +                       if (p->static_prio < rq->best_expired_prio)
10796 +                               rq->best_expired_prio = p->static_prio;
10797 +               } else
10798 +                       enqueue_task(p, rq->active);
10799 +       } else {
10800 +               /*
10801 +                * Prevent a too long timeslice allowing a task to monopolize
10802 +                * the CPU. We do this by splitting up the timeslice into
10803 +                * smaller pieces.
10804 +                *
10805 +                * Note: this does not mean the task's timeslices expire or
10806 +                * get lost in any way, they just might be preempted by
10807 +                * another task of equal priority. (one with higher
10808 +                * priority would have preempted this task already.) We
10809 +                * requeue this task to the end of the list on this priority
10810 +                * level, which is in essence a round-robin of tasks with
10811 +                * equal priority.
10812 +                *
10813 +                * This only applies to tasks in the interactive
10814 +                * delta range with at least TIMESLICE_GRANULARITY to requeue.
10815 +                */
10816 +               if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
10817 +                       p->time_slice) % TIMESLICE_GRANULARITY(p)) &&
10818 +                       (p->time_slice >= TIMESLICE_GRANULARITY(p)) &&
10819 +                       (p->array == rq->active)) {
10820 +
10821 +                       requeue_task(p, rq->active);
10822 +                       set_tsk_need_resched(p);
10823 +               }
10824 +       }
10825 +out_unlock:
10826 +       spin_unlock(&rq->lock);
10827 +}
10828 +
10829 +/*
10830 + * This function gets called by the timer code, with HZ frequency.
10831 + * We call it with interrupts disabled.
10832 + *
10833 + * It also gets called by the fork code, when changing the parent's
10834 + * timeslices.
10835 + */
10836 +void scheduler_tick(void)
10837 +{
10838 +       unsigned long long now = sched_clock();
10839 +       struct task_struct *p = current;
10840 +       int cpu = smp_processor_id();
10841 +       int idle_at_tick = idle_cpu(cpu);
10842 +       struct rq *rq = cpu_rq(cpu);
10843 +
10844 +       update_cpu_clock(p, rq, now);
10845 +       vxm_sync(now, cpu);
10846 +
10847 +       if (idle_at_tick)
10848 +               vx_idle_resched(rq);
10849 +       else
10850 +               task_running_tick(rq, p, cpu);
10851 +#ifdef CONFIG_SMP
10852 +       update_load(rq);
10853 +       rq->idle_at_tick = idle_at_tick;
10854 +       trigger_load_balance(cpu);
10855 +#endif
10856 +}
10857 +
10858 +#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
10859 +
10860 +void fastcall add_preempt_count(int val)
10861 +{
10862 +       /*
10863 +        * Underflow?
10864 +        */
10865 +       if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
10866 +               return;
10867 +       preempt_count() += val;
10868 +       /*
10869 +        * Spinlock count overflowing soon?
10870 +        */
10871 +       DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
10872 +                               PREEMPT_MASK - 10);
10873 +}
10874 +EXPORT_SYMBOL(add_preempt_count);
10875 +
10876 +void fastcall sub_preempt_count(int val)
10877 +{
10878 +       /*
10879 +        * Underflow?
10880 +        */
10881 +       if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
10882 +               return;
10883 +       /*
10884 +        * Is the spinlock portion underflowing?
10885 +        */
10886 +       if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
10887 +                       !(preempt_count() & PREEMPT_MASK)))
10888 +               return;
10889 +
10890 +       preempt_count() -= val;
10891 +}
10892 +EXPORT_SYMBOL(sub_preempt_count);
10893 +
10894 +#endif
10895 +
10896 +static inline int interactive_sleep(enum sleep_type sleep_type)
10897 +{
10898 +       return (sleep_type == SLEEP_INTERACTIVE ||
10899 +               sleep_type == SLEEP_INTERRUPTED);
10900 +}
10901 +
10902 +/*
10903 + * schedule() is the main scheduler function.
10904 + */
10905 +asmlinkage void __sched schedule(void)
10906 +{
10907 +       struct task_struct *prev, *next;
10908 +       struct prio_array *array;
10909 +       struct list_head *queue;
10910 +       unsigned long long now;
10911 +       unsigned long run_time;
10912 +       int cpu, idx, new_prio;
10913 +       long *switch_count;
10914 +       struct rq *rq;
10915 +
10916 +       /*
10917 +        * Test if we are atomic.  Since do_exit() needs to call into
10918 +        * schedule() atomically, we ignore that path for now.
10919 +        * Otherwise, whine if we are scheduling when we should not be.
10920 +        */
10921 +       if (unlikely(in_atomic() && !current->exit_state)) {
10922 +               printk(KERN_ERR "BUG: scheduling while atomic: "
10923 +                       "%s/0x%08x/%d\n",
10924 +                       current->comm, preempt_count(), current->pid);
10925 +               debug_show_held_locks(current);
10926 +               if (irqs_disabled())
10927 +                       print_irqtrace_events(current);
10928 +               dump_stack();
10929 +       }
10930 +       profile_hit(SCHED_PROFILING, __builtin_return_address(0));
10931 +
10932 +need_resched:
10933 +       preempt_disable();
10934 +       prev = current;
10935 +       release_kernel_lock(prev);
10936 +need_resched_nonpreemptible:
10937 +       rq = this_rq();
10938 +
10939 +       /*
10940 +        * The idle thread is not allowed to schedule!
10941 +        * Remove this check after it has been exercised a bit.
10942 +        */
10943 +       if (unlikely(prev == rq->idle) && prev->state != TASK_RUNNING) {
10944 +               printk(KERN_ERR "bad: scheduling from the idle thread!\n");
10945 +               dump_stack();
10946 +       }
10947 +
10948 +       schedstat_inc(rq, sched_cnt);
10949 +       now = sched_clock();
10950 +       if (likely((long long)(now - prev->timestamp) < NS_MAX_SLEEP_AVG)) {
10951 +               run_time = now - prev->timestamp;
10952 +               if (unlikely((long long)(now - prev->timestamp) < 0))
10953 +                       run_time = 0;
10954 +       } else
10955 +               run_time = NS_MAX_SLEEP_AVG;
10956 +
10957 +       /*
10958 +        * Tasks charged proportionately less run_time at high sleep_avg to
10959 +        * delay them losing their interactive status
10960 +        */
10961 +       run_time /= (CURRENT_BONUS(prev) ? : 1);
10962 +
10963 +       spin_lock_irq(&rq->lock);
10964 +
10965 +       switch_count = &prev->nivcsw;
10966 +       if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
10967 +               switch_count = &prev->nvcsw;
10968 +               if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
10969 +                               unlikely(signal_pending(prev))))
10970 +                       prev->state = TASK_RUNNING;
10971 +               else {
10972 +                       if (prev->state == TASK_UNINTERRUPTIBLE) {
10973 +                               rq->nr_uninterruptible++;
10974 +                               vx_uninterruptible_inc(prev);
10975 +                       }
10976 +                       deactivate_task(prev, rq);
10977 +               }
10978 +       }
10979 +
10980 +       cpu = smp_processor_id();
10981 +       vx_set_rq_time(rq, jiffies);
10982 +try_unhold:
10983 +       vx_try_unhold(rq, cpu);
10984 +pick_next:
10985 +
10986 +       if (unlikely(!rq->nr_running)) {
10987 +               /* can we skip idle time? */
10988 +               if (vx_try_skip(rq, cpu))
10989 +                       goto try_unhold;
10990 +
10991 +               idle_balance(cpu, rq);
10992 +               if (!rq->nr_running) {
10993 +                       next = rq->idle;
10994 +                       rq->expired_timestamp = 0;
10995 +                       goto switch_tasks;
10996 +               }
10997 +       }
10998 +
10999 +       array = rq->active;
11000 +       if (unlikely(!array->nr_active)) {
11001 +               /*
11002 +                * Switch the active and expired arrays.
11003 +                */
11004 +               schedstat_inc(rq, sched_switch);
11005 +               rq->active = rq->expired;
11006 +               rq->expired = array;
11007 +               array = rq->active;
11008 +               rq->expired_timestamp = 0;
11009 +               rq->best_expired_prio = MAX_PRIO;
11010 +       }
11011 +
11012 +       idx = sched_find_first_bit(array->bitmap);
11013 +       queue = array->queue + idx;
11014 +       next = list_entry(queue->next, struct task_struct, run_list);
11015 +
11016 +       /* check before we schedule this context */
11017 +       if (!vx_schedule(next, rq, cpu))
11018 +               goto pick_next;
11019 +
11020 +       if (!rt_task(next) && interactive_sleep(next->sleep_type)) {
11021 +               unsigned long long delta = now - next->timestamp;
11022 +               if (unlikely((long long)(now - next->timestamp) < 0))
11023 +                       delta = 0;
11024 +
11025 +               if (next->sleep_type == SLEEP_INTERACTIVE)
11026 +                       delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
11027 +
11028 +               array = next->array;
11029 +               new_prio = recalc_task_prio(next, next->timestamp + delta);
11030 +
11031 +               if (unlikely(next->prio != new_prio)) {
11032 +                       dequeue_task(next, array);
11033 +                       next->prio = new_prio;
11034 +                       enqueue_task(next, array);
11035 +               }
11036 +       }
11037 +       next->sleep_type = SLEEP_NORMAL;
11038 +switch_tasks:
11039 +       if (next == rq->idle)
11040 +               schedstat_inc(rq, sched_goidle);
11041 +       prefetch(next);
11042 +       prefetch_stack(next);
11043 +       clear_tsk_need_resched(prev);
11044 +       rcu_qsctr_inc(task_cpu(prev));
11045 +
11046 +       update_cpu_clock(prev, rq, now);
11047 +
11048 +       prev->sleep_avg -= run_time;
11049 +       if ((long)prev->sleep_avg <= 0)
11050 +               prev->sleep_avg = 0;
11051 +       prev->timestamp = prev->last_ran = now;
11052 +
11053 +       sched_info_switch(prev, next);
11054 +       if (likely(prev != next)) {
11055 +               next->timestamp = next->last_ran = now;
11056 +               rq->nr_switches++;
11057 +               rq->curr = next;
11058 +               ++*switch_count;
11059 +
11060 +               prepare_task_switch(rq, next);
11061 +               prev = context_switch(rq, prev, next);
11062 +               barrier();
11063 +               /*
11064 +                * this_rq must be evaluated again because prev may have moved
11065 +                * CPUs since it called schedule(), thus the 'rq' on its stack
11066 +                * frame will be invalid.
11067 +                */
11068 +               finish_task_switch(this_rq(), prev);
11069 +       } else
11070 +               spin_unlock_irq(&rq->lock);
11071 +
11072 +       prev = current;
11073 +       if (unlikely(reacquire_kernel_lock(prev) < 0))
11074 +               goto need_resched_nonpreemptible;
11075 +       preempt_enable_no_resched();
11076 +       if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
11077 +               goto need_resched;
11078 +}
11079 +EXPORT_SYMBOL(schedule);
11080 +
11081 +#ifdef CONFIG_PREEMPT
11082 +/*
11083 + * this is the entry point to schedule() from in-kernel preemption
11084 + * off of preempt_enable.  Kernel preemptions off return from interrupt
11085 + * occur there and call schedule directly.
11086 + */
11087 +asmlinkage void __sched preempt_schedule(void)
11088 +{
11089 +       struct thread_info *ti = current_thread_info();
11090 +#ifdef CONFIG_PREEMPT_BKL
11091 +       struct task_struct *task = current;
11092 +       int saved_lock_depth;
11093 +#endif
11094 +       /*
11095 +        * If there is a non-zero preempt_count or interrupts are disabled,
11096 +        * we do not want to preempt the current task.  Just return..
11097 +        */
11098 +       if (likely(ti->preempt_count || irqs_disabled()))
11099 +               return;
11100 +
11101 +need_resched:
11102 +       add_preempt_count(PREEMPT_ACTIVE);
11103 +       /*
11104 +        * We keep the big kernel semaphore locked, but we
11105 +        * clear ->lock_depth so that schedule() doesnt
11106 +        * auto-release the semaphore:
11107 +        */
11108 +#ifdef CONFIG_PREEMPT_BKL
11109 +       saved_lock_depth = task->lock_depth;
11110 +       task->lock_depth = -1;
11111 +#endif
11112 +       schedule();
11113 +#ifdef CONFIG_PREEMPT_BKL
11114 +       task->lock_depth = saved_lock_depth;
11115 +#endif
11116 +       sub_preempt_count(PREEMPT_ACTIVE);
11117 +
11118 +       /* we could miss a preemption opportunity between schedule and now */
11119 +       barrier();
11120 +       if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
11121 +               goto need_resched;
11122 +}
11123 +EXPORT_SYMBOL(preempt_schedule);
11124 +
11125 +/*
11126 + * this is the entry point to schedule() from kernel preemption
11127 + * off of irq context.
11128 + * Note, that this is called and return with irqs disabled. This will
11129 + * protect us against recursive calling from irq.
11130 + */
11131 +asmlinkage void __sched preempt_schedule_irq(void)
11132 +{
11133 +       struct thread_info *ti = current_thread_info();
11134 +#ifdef CONFIG_PREEMPT_BKL
11135 +       struct task_struct *task = current;
11136 +       int saved_lock_depth;
11137 +#endif
11138 +       /* Catch callers which need to be fixed */
11139 +       BUG_ON(ti->preempt_count || !irqs_disabled());
11140 +
11141 +need_resched:
11142 +       add_preempt_count(PREEMPT_ACTIVE);
11143 +       /*
11144 +        * We keep the big kernel semaphore locked, but we
11145 +        * clear ->lock_depth so that schedule() doesnt
11146 +        * auto-release the semaphore:
11147 +        */
11148 +#ifdef CONFIG_PREEMPT_BKL
11149 +       saved_lock_depth = task->lock_depth;
11150 +       task->lock_depth = -1;
11151 +#endif
11152 +       local_irq_enable();
11153 +       schedule();
11154 +       local_irq_disable();
11155 +#ifdef CONFIG_PREEMPT_BKL
11156 +       task->lock_depth = saved_lock_depth;
11157 +#endif
11158 +       sub_preempt_count(PREEMPT_ACTIVE);
11159 +
11160 +       /* we could miss a preemption opportunity between schedule and now */
11161 +       barrier();
11162 +       if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
11163 +               goto need_resched;
11164 +}
11165 +
11166 +#endif /* CONFIG_PREEMPT */
11167 +
11168 +int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
11169 +                         void *key)
11170 +{
11171 +       return try_to_wake_up(curr->private, mode, sync);
11172 +}
11173 +EXPORT_SYMBOL(default_wake_function);
11174 +
11175 +/*
11176 + * The core wakeup function.  Non-exclusive wakeups (nr_exclusive == 0) just
11177 + * wake everything up.  If it's an exclusive wakeup (nr_exclusive == small +ve
11178 + * number) then we wake all the non-exclusive tasks and one exclusive task.
11179 + *
11180 + * There are circumstances in which we can try to wake a task which has already
11181 + * started to run but is not in state TASK_RUNNING.  try_to_wake_up() returns
11182 + * zero in this (rare) case, and we handle it by continuing to scan the queue.
11183 + */
11184 +static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
11185 +                            int nr_exclusive, int sync, void *key)
11186 +{
11187 +       struct list_head *tmp, *next;
11188 +
11189 +       list_for_each_safe(tmp, next, &q->task_list) {
11190 +               wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
11191 +               unsigned flags = curr->flags;
11192 +
11193 +               if (curr->func(curr, mode, sync, key) &&
11194 +                               (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
11195 +                       break;
11196 +       }
11197 +}
11198 +
11199 +/**
11200 + * __wake_up - wake up threads blocked on a waitqueue.
11201 + * @q: the waitqueue
11202 + * @mode: which threads
11203 + * @nr_exclusive: how many wake-one or wake-many threads to wake up
11204 + * @key: is directly passed to the wakeup function
11205 + */
11206 +void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
11207 +                       int nr_exclusive, void *key)
11208 +{
11209 +       unsigned long flags;
11210 +
11211 +       spin_lock_irqsave(&q->lock, flags);
11212 +       __wake_up_common(q, mode, nr_exclusive, 0, key);
11213 +       spin_unlock_irqrestore(&q->lock, flags);
11214 +}
11215 +EXPORT_SYMBOL(__wake_up);
11216 +
11217 +/*
11218 + * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
11219 + */
11220 +void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
11221 +{
11222 +       __wake_up_common(q, mode, 1, 0, NULL);
11223 +}
11224 +
11225 +/**
11226 + * __wake_up_sync - wake up threads blocked on a waitqueue.
11227 + * @q: the waitqueue
11228 + * @mode: which threads
11229 + * @nr_exclusive: how many wake-one or wake-many threads to wake up
11230 + *
11231 + * The sync wakeup differs that the waker knows that it will schedule
11232 + * away soon, so while the target thread will be woken up, it will not
11233 + * be migrated to another CPU - ie. the two threads are 'synchronized'
11234 + * with each other. This can prevent needless bouncing between CPUs.
11235 + *
11236 + * On UP it can prevent extra preemption.
11237 + */
11238 +void fastcall
11239 +__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
11240 +{
11241 +       unsigned long flags;
11242 +       int sync = 1;
11243 +
11244 +       if (unlikely(!q))
11245 +               return;
11246 +
11247 +       if (unlikely(!nr_exclusive))
11248 +               sync = 0;
11249 +
11250 +       spin_lock_irqsave(&q->lock, flags);
11251 +       __wake_up_common(q, mode, nr_exclusive, sync, NULL);
11252 +       spin_unlock_irqrestore(&q->lock, flags);
11253 +}
11254 +EXPORT_SYMBOL_GPL(__wake_up_sync);     /* For internal use only */
11255 +
11256 +void fastcall complete(struct completion *x)
11257 +{
11258 +       unsigned long flags;
11259 +
11260 +       spin_lock_irqsave(&x->wait.lock, flags);
11261 +       x->done++;
11262 +       __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
11263 +                        1, 0, NULL);
11264 +       spin_unlock_irqrestore(&x->wait.lock, flags);
11265 +}
11266 +EXPORT_SYMBOL(complete);
11267 +
11268 +void fastcall complete_all(struct completion *x)
11269 +{
11270 +       unsigned long flags;
11271 +
11272 +       spin_lock_irqsave(&x->wait.lock, flags);
11273 +       x->done += UINT_MAX/2;
11274 +       __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
11275 +                        0, 0, NULL);
11276 +       spin_unlock_irqrestore(&x->wait.lock, flags);
11277 +}
11278 +EXPORT_SYMBOL(complete_all);
11279 +
11280 +void fastcall __sched wait_for_completion(struct completion *x)
11281 +{
11282 +       might_sleep();
11283 +
11284 +       spin_lock_irq(&x->wait.lock);
11285 +       if (!x->done) {
11286 +               DECLARE_WAITQUEUE(wait, current);
11287 +
11288 +               wait.flags |= WQ_FLAG_EXCLUSIVE;
11289 +               __add_wait_queue_tail(&x->wait, &wait);
11290 +               do {
11291 +                       __set_current_state(TASK_UNINTERRUPTIBLE);
11292 +                       spin_unlock_irq(&x->wait.lock);
11293 +                       schedule();
11294 +                       spin_lock_irq(&x->wait.lock);
11295 +               } while (!x->done);
11296 +               __remove_wait_queue(&x->wait, &wait);
11297 +       }
11298 +       x->done--;
11299 +       spin_unlock_irq(&x->wait.lock);
11300 +}
11301 +EXPORT_SYMBOL(wait_for_completion);
11302 +
11303 +unsigned long fastcall __sched
11304 +wait_for_completion_timeout(struct completion *x, unsigned long timeout)
11305 +{
11306 +       might_sleep();
11307 +
11308 +       spin_lock_irq(&x->wait.lock);
11309 +       if (!x->done) {
11310 +               DECLARE_WAITQUEUE(wait, current);
11311 +
11312 +               wait.flags |= WQ_FLAG_EXCLUSIVE;
11313 +               __add_wait_queue_tail(&x->wait, &wait);
11314 +               do {
11315 +                       __set_current_state(TASK_UNINTERRUPTIBLE);
11316 +                       spin_unlock_irq(&x->wait.lock);
11317 +                       timeout = schedule_timeout(timeout);
11318 +                       spin_lock_irq(&x->wait.lock);
11319 +                       if (!timeout) {
11320 +                               __remove_wait_queue(&x->wait, &wait);
11321 +                               goto out;
11322 +                       }
11323 +               } while (!x->done);
11324 +               __remove_wait_queue(&x->wait, &wait);
11325 +       }
11326 +       x->done--;
11327 +out:
11328 +       spin_unlock_irq(&x->wait.lock);
11329 +       return timeout;
11330 +}
11331 +EXPORT_SYMBOL(wait_for_completion_timeout);
11332 +
11333 +int fastcall __sched wait_for_completion_interruptible(struct completion *x)
11334 +{
11335 +       int ret = 0;
11336 +
11337 +       might_sleep();
11338 +
11339 +       spin_lock_irq(&x->wait.lock);
11340 +       if (!x->done) {
11341 +               DECLARE_WAITQUEUE(wait, current);
11342 +
11343 +               wait.flags |= WQ_FLAG_EXCLUSIVE;
11344 +               __add_wait_queue_tail(&x->wait, &wait);
11345 +               do {
11346 +                       if (signal_pending(current)) {
11347 +                               ret = -ERESTARTSYS;
11348 +                               __remove_wait_queue(&x->wait, &wait);
11349 +                               goto out;
11350 +                       }
11351 +                       __set_current_state(TASK_INTERRUPTIBLE);
11352 +                       spin_unlock_irq(&x->wait.lock);
11353 +                       schedule();
11354 +                       spin_lock_irq(&x->wait.lock);
11355 +               } while (!x->done);
11356 +               __remove_wait_queue(&x->wait, &wait);
11357 +       }
11358 +       x->done--;
11359 +out:
11360 +       spin_unlock_irq(&x->wait.lock);
11361 +
11362 +       return ret;
11363 +}
11364 +EXPORT_SYMBOL(wait_for_completion_interruptible);
11365 +
11366 +unsigned long fastcall __sched
11367 +wait_for_completion_interruptible_timeout(struct completion *x,
11368 +                                         unsigned long timeout)
11369 +{
11370 +       might_sleep();
11371 +
11372 +       spin_lock_irq(&x->wait.lock);
11373 +       if (!x->done) {
11374 +               DECLARE_WAITQUEUE(wait, current);
11375 +
11376 +               wait.flags |= WQ_FLAG_EXCLUSIVE;
11377 +               __add_wait_queue_tail(&x->wait, &wait);
11378 +               do {
11379 +                       if (signal_pending(current)) {
11380 +                               timeout = -ERESTARTSYS;
11381 +                               __remove_wait_queue(&x->wait, &wait);
11382 +                               goto out;
11383 +                       }
11384 +                       __set_current_state(TASK_INTERRUPTIBLE);
11385 +                       spin_unlock_irq(&x->wait.lock);
11386 +                       timeout = schedule_timeout(timeout);
11387 +                       spin_lock_irq(&x->wait.lock);
11388 +                       if (!timeout) {
11389 +                               __remove_wait_queue(&x->wait, &wait);
11390 +                               goto out;
11391 +                       }
11392 +               } while (!x->done);
11393 +               __remove_wait_queue(&x->wait, &wait);
11394 +       }
11395 +       x->done--;
11396 +out:
11397 +       spin_unlock_irq(&x->wait.lock);
11398 +       return timeout;
11399 +}
11400 +EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
11401 +
11402 +
11403 +#define        SLEEP_ON_VAR                                    \
11404 +       unsigned long flags;                            \
11405 +       wait_queue_t wait;                              \
11406 +       init_waitqueue_entry(&wait, current);
11407 +
11408 +#define SLEEP_ON_HEAD                                  \
11409 +       spin_lock_irqsave(&q->lock,flags);              \
11410 +       __add_wait_queue(q, &wait);                     \
11411 +       spin_unlock(&q->lock);
11412 +
11413 +#define        SLEEP_ON_TAIL                                   \
11414 +       spin_lock_irq(&q->lock);                        \
11415 +       __remove_wait_queue(q, &wait);                  \
11416 +       spin_unlock_irqrestore(&q->lock, flags);
11417 +
11418 +void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
11419 +{
11420 +       SLEEP_ON_VAR
11421 +
11422 +       current->state = TASK_INTERRUPTIBLE;
11423 +
11424 +       SLEEP_ON_HEAD
11425 +       schedule();
11426 +       SLEEP_ON_TAIL
11427 +}
11428 +EXPORT_SYMBOL(interruptible_sleep_on);
11429 +
11430 +long fastcall __sched
11431 +interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
11432 +{
11433 +       SLEEP_ON_VAR
11434 +
11435 +       current->state = TASK_INTERRUPTIBLE;
11436 +
11437 +       SLEEP_ON_HEAD
11438 +       timeout = schedule_timeout(timeout);
11439 +       SLEEP_ON_TAIL
11440 +
11441 +       return timeout;
11442 +}
11443 +EXPORT_SYMBOL(interruptible_sleep_on_timeout);
11444 +
11445 +void fastcall __sched sleep_on(wait_queue_head_t *q)
11446 +{
11447 +       SLEEP_ON_VAR
11448 +
11449 +       current->state = TASK_UNINTERRUPTIBLE;
11450 +
11451 +       SLEEP_ON_HEAD
11452 +       schedule();
11453 +       SLEEP_ON_TAIL
11454 +}
11455 +EXPORT_SYMBOL(sleep_on);
11456 +
11457 +long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
11458 +{
11459 +       SLEEP_ON_VAR
11460 +
11461 +       current->state = TASK_UNINTERRUPTIBLE;
11462 +
11463 +       SLEEP_ON_HEAD
11464 +       timeout = schedule_timeout(timeout);
11465 +       SLEEP_ON_TAIL
11466 +
11467 +       return timeout;
11468 +}
11469 +
11470 +EXPORT_SYMBOL(sleep_on_timeout);
11471 +
11472 +#ifdef CONFIG_RT_MUTEXES
11473 +
11474 +/*
11475 + * rt_mutex_setprio - set the current priority of a task
11476 + * @p: task
11477 + * @prio: prio value (kernel-internal form)
11478 + *
11479 + * This function changes the 'effective' priority of a task. It does
11480 + * not touch ->normal_prio like __setscheduler().
11481 + *
11482 + * Used by the rt_mutex code to implement priority inheritance logic.
11483 + */
11484 +void rt_mutex_setprio(struct task_struct *p, int prio)
11485 +{
11486 +       struct prio_array *array;
11487 +       unsigned long flags;
11488 +       struct rq *rq;
11489 +       int oldprio;
11490 +
11491 +       BUG_ON(prio < 0 || prio > MAX_PRIO);
11492 +
11493 +       rq = task_rq_lock(p, &flags);
11494 +
11495 +       oldprio = p->prio;
11496 +       array = p->array;
11497 +       if (array)
11498 +               dequeue_task(p, array);
11499 +       p->prio = prio;
11500 +
11501 +       if (array) {
11502 +               /*
11503 +                * If changing to an RT priority then queue it
11504 +                * in the active array!
11505 +                */
11506 +               if (rt_task(p))
11507 +                       array = rq->active;
11508 +               enqueue_task(p, array);
11509 +               /*
11510 +                * Reschedule if we are currently running on this runqueue and
11511 +                * our priority decreased, or if we are not currently running on
11512 +                * this runqueue and our priority is higher than the current's
11513 +                */
11514 +               if (task_running(rq, p)) {
11515 +                       if (p->prio > oldprio)
11516 +                               resched_task(rq->curr);
11517 +               } else if (TASK_PREEMPTS_CURR(p, rq))
11518 +                       resched_task(rq->curr);
11519 +       }
11520 +       task_rq_unlock(rq, &flags);
11521 +}
11522 +
11523 +#endif
11524 +
11525 +void set_user_nice(struct task_struct *p, long nice)
11526 +{
11527 +       struct prio_array *array;
11528 +       int old_prio, delta;
11529 +       unsigned long flags;
11530 +       struct rq *rq;
11531 +
11532 +       if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
11533 +               return;
11534 +       /*
11535 +        * We have to be careful, if called from sys_setpriority(),
11536 +        * the task might be in the middle of scheduling on another CPU.
11537 +        */
11538 +       rq = task_rq_lock(p, &flags);
11539 +       /*
11540 +        * The RT priorities are set via sched_setscheduler(), but we still
11541 +        * allow the 'normal' nice value to be set - but as expected
11542 +        * it wont have any effect on scheduling until the task is
11543 +        * not SCHED_NORMAL/SCHED_BATCH:
11544 +        */
11545 +       if (has_rt_policy(p)) {
11546 +               p->static_prio = NICE_TO_PRIO(nice);
11547 +               goto out_unlock;
11548 +       }
11549 +       array = p->array;
11550 +       if (array) {
11551 +               dequeue_task(p, array);
11552 +               dec_raw_weighted_load(rq, p);
11553 +       }
11554 +
11555 +       p->static_prio = NICE_TO_PRIO(nice);
11556 +       set_load_weight(p);
11557 +       old_prio = p->prio;
11558 +       p->prio = effective_prio(p);
11559 +       delta = p->prio - old_prio;
11560 +
11561 +       if (array) {
11562 +               enqueue_task(p, array);
11563 +               inc_raw_weighted_load(rq, p);
11564 +               /*
11565 +                * If the task increased its priority or is running and
11566 +                * lowered its priority, then reschedule its CPU:
11567 +                */
11568 +               if (delta < 0 || (delta > 0 && task_running(rq, p)))
11569 +                       resched_task(rq->curr);
11570 +       }
11571 +out_unlock:
11572 +       task_rq_unlock(rq, &flags);
11573 +}
11574 +EXPORT_SYMBOL(set_user_nice);
11575 +
11576 +/*
11577 + * can_nice - check if a task can reduce its nice value
11578 + * @p: task
11579 + * @nice: nice value
11580 + */
11581 +int can_nice(const struct task_struct *p, const int nice)
11582 +{
11583 +       /* convert nice value [19,-20] to rlimit style value [1,40] */
11584 +       int nice_rlim = 20 - nice;
11585 +
11586 +       return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
11587 +               capable(CAP_SYS_NICE));
11588 +}
11589 +
11590 +#ifdef __ARCH_WANT_SYS_NICE
11591 +
11592 +/*
11593 + * sys_nice - change the priority of the current process.
11594 + * @increment: priority increment
11595 + *
11596 + * sys_setpriority is a more generic, but much slower function that
11597 + * does similar things.
11598 + */
11599 +asmlinkage long sys_nice(int increment)
11600 +{
11601 +       long nice, retval;
11602 +
11603 +       /*
11604 +        * Setpriority might change our priority at the same moment.
11605 +        * We don't have to worry. Conceptually one call occurs first
11606 +        * and we have a single winner.
11607 +        */
11608 +       if (increment < -40)
11609 +               increment = -40;
11610 +       if (increment > 40)
11611 +               increment = 40;
11612 +
11613 +       nice = PRIO_TO_NICE(current->static_prio) + increment;
11614 +       if (nice < -20)
11615 +               nice = -20;
11616 +       if (nice > 19)
11617 +               nice = 19;
11618 +
11619 +       if (increment < 0 && !can_nice(current, nice))
11620 +               return vx_flags(VXF_IGNEG_NICE, 0) ? 0 : -EPERM;
11621 +
11622 +       retval = security_task_setnice(current, nice);
11623 +       if (retval)
11624 +               return retval;
11625 +
11626 +       set_user_nice(current, nice);
11627 +       return 0;
11628 +}
11629 +
11630 +#endif
11631 +
11632 +/**
11633 + * task_prio - return the priority value of a given task.
11634 + * @p: the task in question.
11635 + *
11636 + * This is the priority value as seen by users in /proc.
11637 + * RT tasks are offset by -200. Normal tasks are centered
11638 + * around 0, value goes from -16 to +15.
11639 + */
11640 +int task_prio(const struct task_struct *p)
11641 +{
11642 +       return p->prio - MAX_RT_PRIO;
11643 +}
11644 +
11645 +/**
11646 + * task_nice - return the nice value of a given task.
11647 + * @p: the task in question.
11648 + */
11649 +int task_nice(const struct task_struct *p)
11650 +{
11651 +       return TASK_NICE(p);
11652 +}
11653 +EXPORT_SYMBOL_GPL(task_nice);
11654 +
11655 +/**
11656 + * idle_cpu - is a given cpu idle currently?
11657 + * @cpu: the processor in question.
11658 + */
11659 +int idle_cpu(int cpu)
11660 +{
11661 +       return cpu_curr(cpu) == cpu_rq(cpu)->idle;
11662 +}
11663 +
11664 +/**
11665 + * idle_task - return the idle task for a given cpu.
11666 + * @cpu: the processor in question.
11667 + */
11668 +struct task_struct *idle_task(int cpu)
11669 +{
11670 +       return cpu_rq(cpu)->idle;
11671 +}
11672 +
11673 +/**
11674 + * find_process_by_pid - find a process with a matching PID value.
11675 + * @pid: the pid in question.
11676 + */
11677 +static inline struct task_struct *find_process_by_pid(pid_t pid)
11678 +{
11679 +       return pid ? find_task_by_pid(pid) : current;
11680 +}
11681 +
11682 +/* Actually do priority change: must hold rq lock. */
11683 +static void __setscheduler(struct task_struct *p, int policy, int prio)
11684 +{
11685 +       BUG_ON(p->array);
11686 +
11687 +       p->policy = policy;
11688 +       p->rt_priority = prio;
11689 +       p->normal_prio = normal_prio(p);
11690 +       /* we are holding p->pi_lock already */
11691 +       p->prio = rt_mutex_getprio(p);
11692 +       /*
11693 +        * SCHED_BATCH tasks are treated as perpetual CPU hogs:
11694 +        */
11695 +       if (policy == SCHED_BATCH)
11696 +               p->sleep_avg = 0;
11697 +       set_load_weight(p);
11698 +}
11699 +
11700 +/**
11701 + * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
11702 + * @p: the task in question.
11703 + * @policy: new policy.
11704 + * @param: structure containing the new RT priority.
11705 + *
11706 + * NOTE that the task may be already dead.
11707 + */
11708 +int sched_setscheduler(struct task_struct *p, int policy,
11709 +                      struct sched_param *param)
11710 +{
11711 +       int retval, oldprio, oldpolicy = -1;
11712 +       struct prio_array *array;
11713 +       unsigned long flags;
11714 +       struct rq *rq;
11715 +
11716 +       /* may grab non-irq protected spin_locks */
11717 +       BUG_ON(in_interrupt());
11718 +recheck:
11719 +       /* double check policy once rq lock held */
11720 +       if (policy < 0)
11721 +               policy = oldpolicy = p->policy;
11722 +       else if (policy != SCHED_FIFO && policy != SCHED_RR &&
11723 +                       policy != SCHED_NORMAL && policy != SCHED_BATCH)
11724 +               return -EINVAL;
11725 +       /*
11726 +        * Valid priorities for SCHED_FIFO and SCHED_RR are
11727 +        * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and
11728 +        * SCHED_BATCH is 0.
11729 +        */
11730 +       if (param->sched_priority < 0 ||
11731 +           (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
11732 +           (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
11733 +               return -EINVAL;
11734 +       if (is_rt_policy(policy) != (param->sched_priority != 0))
11735 +               return -EINVAL;
11736 +
11737 +       /*
11738 +        * Allow unprivileged RT tasks to decrease priority:
11739 +        */
11740 +       if (!capable(CAP_SYS_NICE)) {
11741 +               if (is_rt_policy(policy)) {
11742 +                       unsigned long rlim_rtprio;
11743 +                       unsigned long flags;
11744 +
11745 +                       if (!lock_task_sighand(p, &flags))
11746 +                               return -ESRCH;
11747 +                       rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
11748 +                       unlock_task_sighand(p, &flags);
11749 +
11750 +                       /* can't set/change the rt policy */
11751 +                       if (policy != p->policy && !rlim_rtprio)
11752 +                               return -EPERM;
11753 +
11754 +                       /* can't increase priority */
11755 +                       if (param->sched_priority > p->rt_priority &&
11756 +                           param->sched_priority > rlim_rtprio)
11757 +                               return -EPERM;
11758 +               }
11759 +
11760 +               /* can't change other user's priorities */
11761 +               if ((current->euid != p->euid) &&
11762 +                   (current->euid != p->uid))
11763 +                       return -EPERM;
11764 +       }
11765 +
11766 +       retval = security_task_setscheduler(p, policy, param);
11767 +       if (retval)
11768 +               return retval;
11769 +       /*
11770 +        * make sure no PI-waiters arrive (or leave) while we are
11771 +        * changing the priority of the task:
11772 +        */
11773 +       spin_lock_irqsave(&p->pi_lock, flags);
11774 +       /*
11775 +        * To be able to change p->policy safely, the apropriate
11776 +        * runqueue lock must be held.
11777 +        */
11778 +       rq = __task_rq_lock(p);
11779 +       /* recheck policy now with rq lock held */
11780 +       if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
11781 +               policy = oldpolicy = -1;
11782 +               __task_rq_unlock(rq);
11783 +               spin_unlock_irqrestore(&p->pi_lock, flags);
11784 +               goto recheck;
11785 +       }
11786 +       array = p->array;
11787 +       if (array)
11788 +               deactivate_task(p, rq);
11789 +       oldprio = p->prio;
11790 +       __setscheduler(p, policy, param->sched_priority);
11791 +       if (array) {
11792 +               vx_activate_task(p);
11793 +               __activate_task(p, rq);
11794 +               /*
11795 +                * Reschedule if we are currently running on this runqueue and
11796 +                * our priority decreased, or if we are not currently running on
11797 +                * this runqueue and our priority is higher than the current's
11798 +                */
11799 +               if (task_running(rq, p)) {
11800 +                       if (p->prio > oldprio)
11801 +                               resched_task(rq->curr);
11802 +               } else if (TASK_PREEMPTS_CURR(p, rq))
11803 +                       resched_task(rq->curr);
11804 +       }
11805 +       __task_rq_unlock(rq);
11806 +       spin_unlock_irqrestore(&p->pi_lock, flags);
11807 +
11808 +       rt_mutex_adjust_pi(p);
11809 +
11810 +       return 0;
11811 +}
11812 +EXPORT_SYMBOL_GPL(sched_setscheduler);
11813 +
11814 +static int
11815 +do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
11816 +{
11817 +       struct sched_param lparam;
11818 +       struct task_struct *p;
11819 +       int retval;
11820 +
11821 +       if (!param || pid < 0)
11822 +               return -EINVAL;
11823 +       if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
11824 +               return -EFAULT;
11825 +
11826 +       rcu_read_lock();
11827 +       retval = -ESRCH;
11828 +       p = find_process_by_pid(pid);
11829 +       if (p != NULL)
11830 +               retval = sched_setscheduler(p, policy, &lparam);
11831 +       rcu_read_unlock();
11832 +
11833 +       return retval;
11834 +}
11835 +
11836 +/**
11837 + * sys_sched_setscheduler - set/change the scheduler policy and RT priority
11838 + * @pid: the pid in question.
11839 + * @policy: new policy.
11840 + * @param: structure containing the new RT priority.
11841 + */
11842 +asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
11843 +                                      struct sched_param __user *param)
11844 +{
11845 +       /* negative values for policy are not valid */
11846 +       if (policy < 0)
11847 +               return -EINVAL;
11848 +
11849 +       return do_sched_setscheduler(pid, policy, param);
11850 +}
11851 +
11852 +/**
11853 + * sys_sched_setparam - set/change the RT priority of a thread
11854 + * @pid: the pid in question.
11855 + * @param: structure containing the new RT priority.
11856 + */
11857 +asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
11858 +{
11859 +       return do_sched_setscheduler(pid, -1, param);
11860 +}
11861 +
11862 +/**
11863 + * sys_sched_getscheduler - get the policy (scheduling class) of a thread
11864 + * @pid: the pid in question.
11865 + */
11866 +asmlinkage long sys_sched_getscheduler(pid_t pid)
11867 +{
11868 +       struct task_struct *p;
11869 +       int retval = -EINVAL;
11870 +
11871 +       if (pid < 0)
11872 +               goto out_nounlock;
11873 +
11874 +       retval = -ESRCH;
11875 +       read_lock(&tasklist_lock);
11876 +       p = find_process_by_pid(pid);
11877 +       if (p) {
11878 +               retval = security_task_getscheduler(p);
11879 +               if (!retval)
11880 +                       retval = p->policy;
11881 +       }
11882 +       read_unlock(&tasklist_lock);
11883 +
11884 +out_nounlock:
11885 +       return retval;
11886 +}
11887 +
11888 +/**
11889 + * sys_sched_getscheduler - get the RT priority of a thread
11890 + * @pid: the pid in question.
11891 + * @param: structure containing the RT priority.
11892 + */
11893 +asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
11894 +{
11895 +       struct sched_param lp;
11896 +       struct task_struct *p;
11897 +       int retval = -EINVAL;
11898 +
11899 +       if (!param || pid < 0)
11900 +               goto out_nounlock;
11901 +
11902 +       read_lock(&tasklist_lock);
11903 +       p = find_process_by_pid(pid);
11904 +       retval = -ESRCH;
11905 +       if (!p)
11906 +               goto out_unlock;
11907 +
11908 +       retval = security_task_getscheduler(p);
11909 +       if (retval)
11910 +               goto out_unlock;
11911 +
11912 +       lp.sched_priority = p->rt_priority;
11913 +       read_unlock(&tasklist_lock);
11914 +
11915 +       /*
11916 +        * This one might sleep, we cannot do it with a spinlock held ...
11917 +        */
11918 +       retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
11919 +
11920 +out_nounlock:
11921 +       return retval;
11922 +
11923 +out_unlock:
11924 +       read_unlock(&tasklist_lock);
11925 +       return retval;
11926 +}
11927 +
11928 +long sched_setaffinity(pid_t pid, cpumask_t new_mask)
11929 +{
11930 +       cpumask_t cpus_allowed;
11931 +       struct task_struct *p;
11932 +       int retval;
11933 +
11934 +       mutex_lock(&sched_hotcpu_mutex);
11935 +       read_lock(&tasklist_lock);
11936 +
11937 +       p = find_process_by_pid(pid);
11938 +       if (!p) {
11939 +               read_unlock(&tasklist_lock);
11940 +               mutex_unlock(&sched_hotcpu_mutex);
11941 +               return -ESRCH;
11942 +       }
11943 +
11944 +       /*
11945 +        * It is not safe to call set_cpus_allowed with the
11946 +        * tasklist_lock held.  We will bump the task_struct's
11947 +        * usage count and then drop tasklist_lock.
11948 +        */
11949 +       get_task_struct(p);
11950 +       read_unlock(&tasklist_lock);
11951 +
11952 +       retval = -EPERM;
11953 +       if ((current->euid != p->euid) && (current->euid != p->uid) &&
11954 +                       !capable(CAP_SYS_NICE))
11955 +               goto out_unlock;
11956 +
11957 +       retval = security_task_setscheduler(p, 0, NULL);
11958 +       if (retval)
11959 +               goto out_unlock;
11960 +
11961 +       cpus_allowed = cpuset_cpus_allowed(p);
11962 +       cpus_and(new_mask, new_mask, cpus_allowed);
11963 +       retval = set_cpus_allowed(p, new_mask);
11964 +
11965 +out_unlock:
11966 +       put_task_struct(p);
11967 +       mutex_unlock(&sched_hotcpu_mutex);
11968 +       return retval;
11969 +}
11970 +
11971 +static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
11972 +                            cpumask_t *new_mask)
11973 +{
11974 +       if (len < sizeof(cpumask_t)) {
11975 +               memset(new_mask, 0, sizeof(cpumask_t));
11976 +       } else if (len > sizeof(cpumask_t)) {
11977 +               len = sizeof(cpumask_t);
11978 +       }
11979 +       return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
11980 +}
11981 +
11982 +/**
11983 + * sys_sched_setaffinity - set the cpu affinity of a process
11984 + * @pid: pid of the process
11985 + * @len: length in bytes of the bitmask pointed to by user_mask_ptr
11986 + * @user_mask_ptr: user-space pointer to the new cpu mask
11987 + */
11988 +asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
11989 +                                     unsigned long __user *user_mask_ptr)
11990 +{
11991 +       cpumask_t new_mask;
11992 +       int retval;
11993 +
11994 +       retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask);
11995 +       if (retval)
11996 +               return retval;
11997 +
11998 +       return sched_setaffinity(pid, new_mask);
11999 +}
12000 +
12001 +/*
12002 + * Represents all cpu's present in the system
12003 + * In systems capable of hotplug, this map could dynamically grow
12004 + * as new cpu's are detected in the system via any platform specific
12005 + * method, such as ACPI for e.g.
12006 + */
12007 +
12008 +cpumask_t cpu_present_map __read_mostly;
12009 +EXPORT_SYMBOL(cpu_present_map);
12010 +
12011 +#ifndef CONFIG_SMP
12012 +cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
12013 +EXPORT_SYMBOL(cpu_online_map);
12014 +
12015 +cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
12016 +EXPORT_SYMBOL(cpu_possible_map);
12017 +#endif
12018 +
12019 +long sched_getaffinity(pid_t pid, cpumask_t *mask)
12020 +{
12021 +       struct task_struct *p;
12022 +       int retval;
12023 +
12024 +       mutex_lock(&sched_hotcpu_mutex);
12025 +       read_lock(&tasklist_lock);
12026 +
12027 +       retval = -ESRCH;
12028 +       p = find_process_by_pid(pid);
12029 +       if (!p)
12030 +               goto out_unlock;
12031 +
12032 +       retval = security_task_getscheduler(p);
12033 +       if (retval)
12034 +               goto out_unlock;
12035 +
12036 +       cpus_and(*mask, p->cpus_allowed, cpu_online_map);
12037 +
12038 +out_unlock:
12039 +       read_unlock(&tasklist_lock);
12040 +       mutex_unlock(&sched_hotcpu_mutex);
12041 +       if (retval)
12042 +               return retval;
12043 +
12044 +       return 0;
12045 +}
12046 +
12047 +/**
12048 + * sys_sched_getaffinity - get the cpu affinity of a process
12049 + * @pid: pid of the process
12050 + * @len: length in bytes of the bitmask pointed to by user_mask_ptr
12051 + * @user_mask_ptr: user-space pointer to hold the current cpu mask
12052 + */
12053 +asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
12054 +                                     unsigned long __user *user_mask_ptr)
12055 +{
12056 +       int ret;
12057 +       cpumask_t mask;
12058 +
12059 +       if (len < sizeof(cpumask_t))
12060 +               return -EINVAL;
12061 +
12062 +       ret = sched_getaffinity(pid, &mask);
12063 +       if (ret < 0)
12064 +               return ret;
12065 +
12066 +       if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t)))
12067 +               return -EFAULT;
12068 +
12069 +       return sizeof(cpumask_t);
12070 +}
12071 +
12072 +/**
12073 + * sys_sched_yield - yield the current processor to other threads.
12074 + *
12075 + * This function yields the current CPU by moving the calling thread
12076 + * to the expired array. If there are no other threads running on this
12077 + * CPU then this function will return.
12078 + */
12079 +asmlinkage long sys_sched_yield(void)
12080 +{
12081 +       struct rq *rq = this_rq_lock();
12082 +       struct prio_array *array = current->array, *target = rq->expired;
12083 +
12084 +       schedstat_inc(rq, yld_cnt);
12085 +       /*
12086 +        * We implement yielding by moving the task into the expired
12087 +        * queue.
12088 +        *
12089 +        * (special rule: RT tasks will just roundrobin in the active
12090 +        *  array.)
12091 +        */
12092 +       if (rt_task(current))
12093 +               target = rq->active;
12094 +
12095 +       if (array->nr_active == 1) {
12096 +               schedstat_inc(rq, yld_act_empty);
12097 +               if (!rq->expired->nr_active)
12098 +                       schedstat_inc(rq, yld_both_empty);
12099 +       } else if (!rq->expired->nr_active)
12100 +               schedstat_inc(rq, yld_exp_empty);
12101 +
12102 +       if (array != target) {
12103 +               dequeue_task(current, array);
12104 +               enqueue_task(current, target);
12105 +       } else
12106 +               /*
12107 +                * requeue_task is cheaper so perform that if possible.
12108 +                */
12109 +               requeue_task(current, array);
12110 +
12111 +       /*
12112 +        * Since we are going to call schedule() anyway, there's
12113 +        * no need to preempt or enable interrupts:
12114 +        */
12115 +       __release(rq->lock);
12116 +       spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
12117 +       _raw_spin_unlock(&rq->lock);
12118 +       preempt_enable_no_resched();
12119 +
12120 +       schedule();
12121 +
12122 +       return 0;
12123 +}
12124 +
12125 +static void __cond_resched(void)
12126 +{
12127 +#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
12128 +       __might_sleep(__FILE__, __LINE__);
12129 +#endif
12130 +       /*
12131 +        * The BKS might be reacquired before we have dropped
12132 +        * PREEMPT_ACTIVE, which could trigger a second
12133 +        * cond_resched() call.
12134 +        */
12135 +       do {
12136 +               add_preempt_count(PREEMPT_ACTIVE);
12137 +               schedule();
12138 +               sub_preempt_count(PREEMPT_ACTIVE);
12139 +       } while (need_resched());
12140 +}
12141 +
12142 +int __sched cond_resched(void)
12143 +{
12144 +       if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
12145 +                                       system_state == SYSTEM_RUNNING) {
12146 +               __cond_resched();
12147 +               return 1;
12148 +       }
12149 +       return 0;
12150 +}
12151 +EXPORT_SYMBOL(cond_resched);
12152 +
12153 +/*
12154 + * cond_resched_lock() - if a reschedule is pending, drop the given lock,
12155 + * call schedule, and on return reacquire the lock.
12156 + *
12157 + * This works OK both with and without CONFIG_PREEMPT.  We do strange low-level
12158 + * operations here to prevent schedule() from being called twice (once via
12159 + * spin_unlock(), once by hand).
12160 + */
12161 +int cond_resched_lock(spinlock_t *lock)
12162 +{
12163 +       int ret = 0;
12164 +
12165 +       if (need_lockbreak(lock)) {
12166 +               spin_unlock(lock);
12167 +               cpu_relax();
12168 +               ret = 1;
12169 +               spin_lock(lock);
12170 +       }
12171 +       if (need_resched() && system_state == SYSTEM_RUNNING) {
12172 +               spin_release(&lock->dep_map, 1, _THIS_IP_);
12173 +               _raw_spin_unlock(lock);
12174 +               preempt_enable_no_resched();
12175 +               __cond_resched();
12176 +               ret = 1;
12177 +               spin_lock(lock);
12178 +       }
12179 +       return ret;
12180 +}
12181 +EXPORT_SYMBOL(cond_resched_lock);
12182 +
12183 +int __sched cond_resched_softirq(void)
12184 +{
12185 +       BUG_ON(!in_softirq());
12186 +
12187 +       if (need_resched() && system_state == SYSTEM_RUNNING) {
12188 +               local_bh_enable();
12189 +               __cond_resched();
12190 +               local_bh_disable();
12191 +               return 1;
12192 +       }
12193 +       return 0;
12194 +}
12195 +EXPORT_SYMBOL(cond_resched_softirq);
12196 +
12197 +/**
12198 + * yield - yield the current processor to other threads.
12199 + *
12200 + * This is a shortcut for kernel-space yielding - it marks the
12201 + * thread runnable and calls sys_sched_yield().
12202 + */
12203 +void __sched yield(void)
12204 +{
12205 +       set_current_state(TASK_RUNNING);
12206 +       sys_sched_yield();
12207 +}
12208 +EXPORT_SYMBOL(yield);
12209 +
12210 +/*
12211 + * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
12212 + * that process accounting knows that this is a task in IO wait state.
12213 + *
12214 + * But don't do that if it is a deliberate, throttling IO wait (this task
12215 + * has set its backing_dev_info: the queue against which it should throttle)
12216 + */
12217 +void __sched io_schedule(void)
12218 +{
12219 +       struct rq *rq = &__raw_get_cpu_var(runqueues);
12220 +
12221 +       delayacct_blkio_start();
12222 +       atomic_inc(&rq->nr_iowait);
12223 +       schedule();
12224 +       atomic_dec(&rq->nr_iowait);
12225 +       delayacct_blkio_end();
12226 +}
12227 +EXPORT_SYMBOL(io_schedule);
12228 +
12229 +long __sched io_schedule_timeout(long timeout)
12230 +{
12231 +       struct rq *rq = &__raw_get_cpu_var(runqueues);
12232 +       long ret;
12233 +
12234 +       delayacct_blkio_start();
12235 +       atomic_inc(&rq->nr_iowait);
12236 +       ret = schedule_timeout(timeout);
12237 +       atomic_dec(&rq->nr_iowait);
12238 +       delayacct_blkio_end();
12239 +       return ret;
12240 +}
12241 +
12242 +/**
12243 + * sys_sched_get_priority_max - return maximum RT priority.
12244 + * @policy: scheduling class.
12245 + *
12246 + * this syscall returns the maximum rt_priority that can be used
12247 + * by a given scheduling class.
12248 + */
12249 +asmlinkage long sys_sched_get_priority_max(int policy)
12250 +{
12251 +       int ret = -EINVAL;
12252 +
12253 +       switch (policy) {
12254 +       case SCHED_FIFO:
12255 +       case SCHED_RR:
12256 +               ret = MAX_USER_RT_PRIO-1;
12257 +               break;
12258 +       case SCHED_NORMAL:
12259 +       case SCHED_BATCH:
12260 +               ret = 0;
12261 +               break;
12262 +       }
12263 +       return ret;
12264 +}
12265 +
12266 +/**
12267 + * sys_sched_get_priority_min - return minimum RT priority.
12268 + * @policy: scheduling class.
12269 + *
12270 + * this syscall returns the minimum rt_priority that can be used
12271 + * by a given scheduling class.
12272 + */
12273 +asmlinkage long sys_sched_get_priority_min(int policy)
12274 +{
12275 +       int ret = -EINVAL;
12276 +
12277 +       switch (policy) {
12278 +       case SCHED_FIFO:
12279 +       case SCHED_RR:
12280 +               ret = 1;
12281 +               break;
12282 +       case SCHED_NORMAL:
12283 +       case SCHED_BATCH:
12284 +               ret = 0;
12285 +       }
12286 +       return ret;
12287 +}
12288 +
12289 +/**
12290 + * sys_sched_rr_get_interval - return the default timeslice of a process.
12291 + * @pid: pid of the process.
12292 + * @interval: userspace pointer to the timeslice value.
12293 + *
12294 + * this syscall writes the default timeslice value of a given process
12295 + * into the user-space timespec buffer. A value of '0' means infinity.
12296 + */
12297 +asmlinkage
12298 +long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
12299 +{
12300 +       struct task_struct *p;
12301 +       int retval = -EINVAL;
12302 +       struct timespec t;
12303 +
12304 +       if (pid < 0)
12305 +               goto out_nounlock;
12306 +
12307 +       retval = -ESRCH;
12308 +       read_lock(&tasklist_lock);
12309 +       p = find_process_by_pid(pid);
12310 +       if (!p)
12311 +               goto out_unlock;
12312 +
12313 +       retval = security_task_getscheduler(p);
12314 +       if (retval)
12315 +               goto out_unlock;
12316 +
12317 +       jiffies_to_timespec(p->policy == SCHED_FIFO ?
12318 +                               0 : task_timeslice(p), &t);
12319 +       read_unlock(&tasklist_lock);
12320 +       retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
12321 +out_nounlock:
12322 +       return retval;
12323 +out_unlock:
12324 +       read_unlock(&tasklist_lock);
12325 +       return retval;
12326 +}
12327 +
12328 +static const char stat_nam[] = "RSDTtZX";
12329 +
12330 +static void show_task(struct task_struct *p)
12331 +{
12332 +       unsigned long free = 0;
12333 +       unsigned state;
12334 +
12335 +       state = p->state ? __ffs(p->state) + 1 : 0;
12336 +       printk("%-13.13s %c", p->comm,
12337 +               state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
12338 +#if (BITS_PER_LONG == 32)
12339 +       if (state == TASK_RUNNING)
12340 +               printk(" running ");
12341 +       else
12342 +               printk(" %08lX ", thread_saved_pc(p));
12343 +#else
12344 +       if (state == TASK_RUNNING)
12345 +               printk("  running task   ");
12346 +       else
12347 +               printk(" %016lx ", thread_saved_pc(p));
12348 +#endif
12349 +#ifdef CONFIG_DEBUG_STACK_USAGE
12350 +       {
12351 +               unsigned long *n = end_of_stack(p);
12352 +               while (!*n)
12353 +                       n++;
12354 +               free = (unsigned long)n - (unsigned long)end_of_stack(p);
12355 +       }
12356 +#endif
12357 +       printk("%5lu %5d %6d", free, p->pid, p->parent->pid);
12358 +       if (!p->mm)
12359 +               printk(" (L-TLB)\n");
12360 +       else
12361 +               printk(" (NOTLB)\n");
12362 +
12363 +       if (state != TASK_RUNNING)
12364 +               show_stack(p, NULL);
12365 +}
12366 +
12367 +void show_state_filter(unsigned long state_filter)
12368 +{
12369 +       struct task_struct *g, *p;
12370 +
12371 +#if (BITS_PER_LONG == 32)
12372 +       printk("\n"
12373 +              "                         free                        sibling\n");
12374 +       printk("  task             PC    stack   pid father child younger older\n");
12375 +#else
12376 +       printk("\n"
12377 +              "                                 free                        sibling\n");
12378 +       printk("  task                 PC        stack   pid father child younger older\n");
12379 +#endif
12380 +       read_lock(&tasklist_lock);
12381 +       do_each_thread(g, p) {
12382 +               /*
12383 +                * reset the NMI-timeout, listing all files on a slow
12384 +                * console might take alot of time:
12385 +                */
12386 +               touch_nmi_watchdog();
12387 +               if (!state_filter || (p->state & state_filter))
12388 +                       show_task(p);
12389 +       } while_each_thread(g, p);
12390 +
12391 +       touch_all_softlockup_watchdogs();
12392 +
12393 +       read_unlock(&tasklist_lock);
12394 +       /*
12395 +        * Only show locks if all tasks are dumped:
12396 +        */
12397 +       if (state_filter == -1)
12398 +               debug_show_all_locks();
12399 +}
12400 +
12401 +/**
12402 + * init_idle - set up an idle thread for a given CPU
12403 + * @idle: task in question
12404 + * @cpu: cpu the idle task belongs to
12405 + *
12406 + * NOTE: this function does not set the idle thread's NEED_RESCHED
12407 + * flag, to make booting more robust.
12408 + */
12409 +void __cpuinit init_idle(struct task_struct *idle, int cpu)
12410 +{
12411 +       struct rq *rq = cpu_rq(cpu);
12412 +       unsigned long flags;
12413 +
12414 +       idle->timestamp = sched_clock();
12415 +       idle->sleep_avg = 0;
12416 +       idle->array = NULL;
12417 +       idle->prio = idle->normal_prio = MAX_PRIO;
12418 +       idle->state = TASK_RUNNING;
12419 +       idle->cpus_allowed = cpumask_of_cpu(cpu);
12420 +       set_task_cpu(idle, cpu);
12421 +
12422 +       spin_lock_irqsave(&rq->lock, flags);
12423 +       rq->curr = rq->idle = idle;
12424 +#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
12425 +       idle->oncpu = 1;
12426 +#endif
12427 +       spin_unlock_irqrestore(&rq->lock, flags);
12428 +
12429 +       /* Set the preempt count _outside_ the spinlocks! */
12430 +#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
12431 +       task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
12432 +#else
12433 +       task_thread_info(idle)->preempt_count = 0;
12434 +#endif
12435 +}
12436 +
12437 +/*
12438 + * In a system that switches off the HZ timer nohz_cpu_mask
12439 + * indicates which cpus entered this state. This is used
12440 + * in the rcu update to wait only for active cpus. For system
12441 + * which do not switch off the HZ timer nohz_cpu_mask should
12442 + * always be CPU_MASK_NONE.
12443 + */
12444 +cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
12445 +
12446 +#ifdef CONFIG_SMP
12447 +/*
12448 + * This is how migration works:
12449 + *
12450 + * 1) we queue a struct migration_req structure in the source CPU's
12451 + *    runqueue and wake up that CPU's migration thread.
12452 + * 2) we down() the locked semaphore => thread blocks.
12453 + * 3) migration thread wakes up (implicitly it forces the migrated
12454 + *    thread off the CPU)
12455 + * 4) it gets the migration request and checks whether the migrated
12456 + *    task is still in the wrong runqueue.
12457 + * 5) if it's in the wrong runqueue then the migration thread removes
12458 + *    it and puts it into the right queue.
12459 + * 6) migration thread up()s the semaphore.
12460 + * 7) we wake up and the migration is done.
12461 + */
12462 +
12463 +/*
12464 + * Change a given task's CPU affinity. Migrate the thread to a
12465 + * proper CPU and schedule it away if the CPU it's executing on
12466 + * is removed from the allowed bitmask.
12467 + *
12468 + * NOTE: the caller must have a valid reference to the task, the
12469 + * task must not exit() & deallocate itself prematurely.  The
12470 + * call is not atomic; no spinlocks may be held.
12471 + */
12472 +int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
12473 +{
12474 +       struct migration_req req;
12475 +       unsigned long flags;
12476 +       struct rq *rq;
12477 +       int ret = 0;
12478 +
12479 +       rq = task_rq_lock(p, &flags);
12480 +       if (!cpus_intersects(new_mask, cpu_online_map)) {
12481 +               ret = -EINVAL;
12482 +               goto out;
12483 +       }
12484 +
12485 +       p->cpus_allowed = new_mask;
12486 +       /* Can the task run on the task's current CPU? If so, we're done */
12487 +       if (cpu_isset(task_cpu(p), new_mask))
12488 +               goto out;
12489 +
12490 +       if (migrate_task(p, any_online_cpu(new_mask), &req)) {
12491 +               /* Need help from migration thread: drop lock and wait. */
12492 +               task_rq_unlock(rq, &flags);
12493 +               wake_up_process(rq->migration_thread);
12494 +               wait_for_completion(&req.done);
12495 +               tlb_migrate_finish(p->mm);
12496 +               return 0;
12497 +       }
12498 +out:
12499 +       task_rq_unlock(rq, &flags);
12500 +
12501 +       return ret;
12502 +}
12503 +EXPORT_SYMBOL_GPL(set_cpus_allowed);
12504 +
12505 +/*
12506 + * Move (not current) task off this cpu, onto dest cpu.  We're doing
12507 + * this because either it can't run here any more (set_cpus_allowed()
12508 + * away from this CPU, or CPU going down), or because we're
12509 + * attempting to rebalance this task on exec (sched_exec).
12510 + *
12511 + * So we race with normal scheduler movements, but that's OK, as long
12512 + * as the task is no longer on this CPU.
12513 + *
12514 + * Returns non-zero if task was successfully migrated.
12515 + */
12516 +static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
12517 +{
12518 +       struct rq *rq_dest, *rq_src;
12519 +       int ret = 0;
12520 +
12521 +       if (unlikely(cpu_is_offline(dest_cpu)))
12522 +               return ret;
12523 +
12524 +       rq_src = cpu_rq(src_cpu);
12525 +       rq_dest = cpu_rq(dest_cpu);
12526 +
12527 +       double_rq_lock(rq_src, rq_dest);
12528 +       /* Already moved. */
12529 +       if (task_cpu(p) != src_cpu)
12530 +               goto out;
12531 +       /* Affinity changed (again). */
12532 +       if (!cpu_isset(dest_cpu, p->cpus_allowed))
12533 +               goto out;
12534 +
12535 +       set_task_cpu(p, dest_cpu);
12536 +       if (p->array) {
12537 +               /*
12538 +                * Sync timestamp with rq_dest's before activating.
12539 +                * The same thing could be achieved by doing this step
12540 +                * afterwards, and pretending it was a local activate.
12541 +                * This way is cleaner and logically correct.
12542 +                */
12543 +               p->timestamp = p->timestamp - rq_src->most_recent_timestamp
12544 +                               + rq_dest->most_recent_timestamp;
12545 +               deactivate_task(p, rq_src);
12546 +               vx_activate_task(p);
12547 +               __activate_task(p, rq_dest);
12548 +               if (TASK_PREEMPTS_CURR(p, rq_dest))
12549 +                       resched_task(rq_dest->curr);
12550 +       }
12551 +       ret = 1;
12552 +out:
12553 +       double_rq_unlock(rq_src, rq_dest);
12554 +       return ret;
12555 +}
12556 +
12557 +/*
12558 + * migration_thread - this is a highprio system thread that performs
12559 + * thread migration by bumping thread off CPU then 'pushing' onto
12560 + * another runqueue.
12561 + */
12562 +static int migration_thread(void *data)
12563 +{
12564 +       int cpu = (long)data;
12565 +       struct rq *rq;
12566 +
12567 +       rq = cpu_rq(cpu);
12568 +       BUG_ON(rq->migration_thread != current);
12569 +
12570 +       set_current_state(TASK_INTERRUPTIBLE);
12571 +       while (!kthread_should_stop()) {
12572 +               struct migration_req *req;
12573 +               struct list_head *head;
12574 +
12575 +               try_to_freeze();
12576 +
12577 +               spin_lock_irq(&rq->lock);
12578 +
12579 +               if (cpu_is_offline(cpu)) {
12580 +                       spin_unlock_irq(&rq->lock);
12581 +                       goto wait_to_die;
12582 +               }
12583 +
12584 +               if (rq->active_balance) {
12585 +                       active_load_balance(rq, cpu);
12586 +                       rq->active_balance = 0;
12587 +               }
12588 +
12589 +               head = &rq->migration_queue;
12590 +
12591 +               if (list_empty(head)) {
12592 +                       spin_unlock_irq(&rq->lock);
12593 +                       schedule();
12594 +                       set_current_state(TASK_INTERRUPTIBLE);
12595 +                       continue;
12596 +               }
12597 +               req = list_entry(head->next, struct migration_req, list);
12598 +               list_del_init(head->next);
12599 +
12600 +               spin_unlock(&rq->lock);
12601 +               __migrate_task(req->task, cpu, req->dest_cpu);
12602 +               local_irq_enable();
12603 +
12604 +               complete(&req->done);
12605 +       }
12606 +       __set_current_state(TASK_RUNNING);
12607 +       return 0;
12608 +
12609 +wait_to_die:
12610 +       /* Wait for kthread_stop */
12611 +       set_current_state(TASK_INTERRUPTIBLE);
12612 +       while (!kthread_should_stop()) {
12613 +               schedule();
12614 +               set_current_state(TASK_INTERRUPTIBLE);
12615 +       }
12616 +       __set_current_state(TASK_RUNNING);
12617 +       return 0;
12618 +}
12619 +
12620 +#ifdef CONFIG_HOTPLUG_CPU
12621 +/*
12622 + * Figure out where task on dead CPU should go, use force if neccessary.
12623 + * NOTE: interrupts should be disabled by the caller
12624 + */
12625 +static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
12626 +{
12627 +       unsigned long flags;
12628 +       cpumask_t mask;
12629 +       struct rq *rq;
12630 +       int dest_cpu;
12631 +
12632 +restart:
12633 +       /* On same node? */
12634 +       mask = node_to_cpumask(cpu_to_node(dead_cpu));
12635 +       cpus_and(mask, mask, p->cpus_allowed);
12636 +       dest_cpu = any_online_cpu(mask);
12637 +
12638 +       /* On any allowed CPU? */
12639 +       if (dest_cpu == NR_CPUS)
12640 +               dest_cpu = any_online_cpu(p->cpus_allowed);
12641 +
12642 +       /* No more Mr. Nice Guy. */
12643 +       if (dest_cpu == NR_CPUS) {
12644 +               rq = task_rq_lock(p, &flags);
12645 +               cpus_setall(p->cpus_allowed);
12646 +               dest_cpu = any_online_cpu(p->cpus_allowed);
12647 +               task_rq_unlock(rq, &flags);
12648 +
12649 +               /*
12650 +                * Don't tell them about moving exiting tasks or
12651 +                * kernel threads (both mm NULL), since they never
12652 +                * leave kernel.
12653 +                */
12654 +               if (p->mm && printk_ratelimit())
12655 +                       printk(KERN_INFO "process %d (%s) no "
12656 +                              "longer affine to cpu%d\n",
12657 +                              p->pid, p->comm, dead_cpu);
12658 +       }
12659 +       if (!__migrate_task(p, dead_cpu, dest_cpu))
12660 +               goto restart;
12661 +}
12662 +
12663 +/*
12664 + * While a dead CPU has no uninterruptible tasks queued at this point,
12665 + * it might still have a nonzero ->nr_uninterruptible counter, because
12666 + * for performance reasons the counter is not stricly tracking tasks to
12667 + * their home CPUs. So we just add the counter to another CPU's counter,
12668 + * to keep the global sum constant after CPU-down:
12669 + */
12670 +static void migrate_nr_uninterruptible(struct rq *rq_src)
12671 +{
12672 +       struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL));
12673 +       unsigned long flags;
12674 +
12675 +       local_irq_save(flags);
12676 +       double_rq_lock(rq_src, rq_dest);
12677 +       rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
12678 +       rq_src->nr_uninterruptible = 0;
12679 +       double_rq_unlock(rq_src, rq_dest);
12680 +       local_irq_restore(flags);
12681 +}
12682 +
12683 +/* Run through task list and migrate tasks from the dead cpu. */
12684 +static void migrate_live_tasks(int src_cpu)
12685 +{
12686 +       struct task_struct *p, *t;
12687 +
12688 +       write_lock_irq(&tasklist_lock);
12689 +
12690 +       do_each_thread(t, p) {
12691 +               if (p == current)
12692 +                       continue;
12693 +
12694 +               if (task_cpu(p) == src_cpu)
12695 +                       move_task_off_dead_cpu(src_cpu, p);
12696 +       } while_each_thread(t, p);
12697 +
12698 +       write_unlock_irq(&tasklist_lock);
12699 +}
12700 +
12701 +/* Schedules idle task to be the next runnable task on current CPU.
12702 + * It does so by boosting its priority to highest possible and adding it to
12703 + * the _front_ of the runqueue. Used by CPU offline code.
12704 + */
12705 +void sched_idle_next(void)
12706 +{
12707 +       int this_cpu = smp_processor_id();
12708 +       struct rq *rq = cpu_rq(this_cpu);
12709 +       struct task_struct *p = rq->idle;
12710 +       unsigned long flags;
12711 +
12712 +       /* cpu has to be offline */
12713 +       BUG_ON(cpu_online(this_cpu));
12714 +
12715 +       /*
12716 +        * Strictly not necessary since rest of the CPUs are stopped by now
12717 +        * and interrupts disabled on the current cpu.
12718 +        */
12719 +       spin_lock_irqsave(&rq->lock, flags);
12720 +
12721 +       __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
12722 +
12723 +       /* Add idle task to the _front_ of its priority queue: */
12724 +       __activate_idle_task(p, rq);
12725 +
12726 +       spin_unlock_irqrestore(&rq->lock, flags);
12727 +}
12728 +
12729 +/*
12730 + * Ensures that the idle task is using init_mm right before its cpu goes
12731 + * offline.
12732 + */
12733 +void idle_task_exit(void)
12734 +{
12735 +       struct mm_struct *mm = current->active_mm;
12736 +
12737 +       BUG_ON(cpu_online(smp_processor_id()));
12738 +
12739 +       if (mm != &init_mm)
12740 +               switch_mm(mm, &init_mm, current);
12741 +       mmdrop(mm);
12742 +}
12743 +
12744 +/* called under rq->lock with disabled interrupts */
12745 +static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
12746 +{
12747 +       struct rq *rq = cpu_rq(dead_cpu);
12748 +
12749 +       /* Must be exiting, otherwise would be on tasklist. */
12750 +       BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD);
12751 +
12752 +       /* Cannot have done final schedule yet: would have vanished. */
12753 +       BUG_ON(p->state == TASK_DEAD);
12754 +
12755 +       get_task_struct(p);
12756 +
12757 +       /*
12758 +        * Drop lock around migration; if someone else moves it,
12759 +        * that's OK.  No task can be added to this CPU, so iteration is
12760 +        * fine.
12761 +        * NOTE: interrupts should be left disabled  --dev@
12762 +        */
12763 +       spin_unlock(&rq->lock);
12764 +       move_task_off_dead_cpu(dead_cpu, p);
12765 +       spin_lock(&rq->lock);
12766 +
12767 +       put_task_struct(p);
12768 +}
12769 +
12770 +/* release_task() removes task from tasklist, so we won't find dead tasks. */
12771 +static void migrate_dead_tasks(unsigned int dead_cpu)
12772 +{
12773 +       struct rq *rq = cpu_rq(dead_cpu);
12774 +       unsigned int arr, i;
12775 +
12776 +       for (arr = 0; arr < 2; arr++) {
12777 +               for (i = 0; i < MAX_PRIO; i++) {
12778 +                       struct list_head *list = &rq->arrays[arr].queue[i];
12779 +
12780 +                       while (!list_empty(list))
12781 +                               migrate_dead(dead_cpu, list_entry(list->next,
12782 +                                            struct task_struct, run_list));
12783 +               }
12784 +       }
12785 +}
12786 +#endif /* CONFIG_HOTPLUG_CPU */
12787 +
12788 +/*
12789 + * migration_call - callback that gets triggered when a CPU is added.
12790 + * Here we can start up the necessary migration thread for the new CPU.
12791 + */
12792 +static int __cpuinit
12793 +migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
12794 +{
12795 +       struct task_struct *p;
12796 +       int cpu = (long)hcpu;
12797 +       unsigned long flags;
12798 +       struct rq *rq;
12799 +
12800 +       switch (action) {
12801 +       case CPU_LOCK_ACQUIRE:
12802 +               mutex_lock(&sched_hotcpu_mutex);
12803 +               break;
12804 +
12805 +       case CPU_UP_PREPARE:
12806 +       case CPU_UP_PREPARE_FROZEN:
12807 +               p = kthread_create(migration_thread, hcpu, "migration/%d",cpu);
12808 +               if (IS_ERR(p))
12809 +                       return NOTIFY_BAD;
12810 +               p->flags |= PF_NOFREEZE;
12811 +               kthread_bind(p, cpu);
12812 +               /* Must be high prio: stop_machine expects to yield to it. */
12813 +               rq = task_rq_lock(p, &flags);
12814 +               __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
12815 +               task_rq_unlock(rq, &flags);
12816 +               cpu_rq(cpu)->migration_thread = p;
12817 +               break;
12818 +
12819 +       case CPU_ONLINE:
12820 +       case CPU_ONLINE_FROZEN:
12821 +               /* Strictly unneccessary, as first user will wake it. */
12822 +               wake_up_process(cpu_rq(cpu)->migration_thread);
12823 +               break;
12824 +
12825 +#ifdef CONFIG_HOTPLUG_CPU
12826 +       case CPU_UP_CANCELED:
12827 +       case CPU_UP_CANCELED_FROZEN:
12828 +               if (!cpu_rq(cpu)->migration_thread)
12829 +                       break;
12830 +               /* Unbind it from offline cpu so it can run.  Fall thru. */
12831 +               kthread_bind(cpu_rq(cpu)->migration_thread,
12832 +                            any_online_cpu(cpu_online_map));
12833 +               kthread_stop(cpu_rq(cpu)->migration_thread);
12834 +               cpu_rq(cpu)->migration_thread = NULL;
12835 +               break;
12836 +
12837 +       case CPU_DEAD:
12838 +       case CPU_DEAD_FROZEN:
12839 +               migrate_live_tasks(cpu);
12840 +               rq = cpu_rq(cpu);
12841 +               kthread_stop(rq->migration_thread);
12842 +               rq->migration_thread = NULL;
12843 +               /* Idle task back to normal (off runqueue, low prio) */
12844 +               rq = task_rq_lock(rq->idle, &flags);
12845 +               deactivate_task(rq->idle, rq);
12846 +               rq->idle->static_prio = MAX_PRIO;
12847 +               __setscheduler(rq->idle, SCHED_NORMAL, 0);
12848 +               migrate_dead_tasks(cpu);
12849 +               task_rq_unlock(rq, &flags);
12850 +               migrate_nr_uninterruptible(rq);
12851 +               BUG_ON(rq->nr_running != 0);
12852 +
12853 +               /* No need to migrate the tasks: it was best-effort if
12854 +                * they didn't take sched_hotcpu_mutex.  Just wake up
12855 +                * the requestors. */
12856 +               spin_lock_irq(&rq->lock);
12857 +               while (!list_empty(&rq->migration_queue)) {
12858 +                       struct migration_req *req;
12859 +
12860 +                       req = list_entry(rq->migration_queue.next,
12861 +                                        struct migration_req, list);
12862 +                       list_del_init(&req->list);
12863 +                       complete(&req->done);
12864 +               }
12865 +               spin_unlock_irq(&rq->lock);
12866 +               break;
12867 +#endif
12868 +       case CPU_LOCK_RELEASE:
12869 +               mutex_unlock(&sched_hotcpu_mutex);
12870 +               break;
12871 +       }
12872 +       return NOTIFY_OK;
12873 +}
12874 +
12875 +/* Register at highest priority so that task migration (migrate_all_tasks)
12876 + * happens before everything else.
12877 + */
12878 +static struct notifier_block __cpuinitdata migration_notifier = {
12879 +       .notifier_call = migration_call,
12880 +       .priority = 10
12881 +};
12882 +
12883 +int __init migration_init(void)
12884 +{
12885 +       void *cpu = (void *)(long)smp_processor_id();
12886 +       int err;
12887 +
12888 +       /* Start one for the boot CPU: */
12889 +       err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
12890 +       BUG_ON(err == NOTIFY_BAD);
12891 +       migration_call(&migration_notifier, CPU_ONLINE, cpu);
12892 +       register_cpu_notifier(&migration_notifier);
12893 +
12894 +       return 0;
12895 +}
12896 +#endif
12897 +
12898 +#ifdef CONFIG_SMP
12899 +
12900 +/* Number of possible processor ids */
12901 +int nr_cpu_ids __read_mostly = NR_CPUS;
12902 +EXPORT_SYMBOL(nr_cpu_ids);
12903 +
12904 +#undef SCHED_DOMAIN_DEBUG
12905 +#ifdef SCHED_DOMAIN_DEBUG
12906 +static void sched_domain_debug(struct sched_domain *sd, int cpu)
12907 +{
12908 +       int level = 0;
12909 +
12910 +       if (!sd) {
12911 +               printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
12912 +               return;
12913 +       }
12914 +
12915 +       printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
12916 +
12917 +       do {
12918 +               int i;
12919 +               char str[NR_CPUS];
12920 +               struct sched_group *group = sd->groups;
12921 +               cpumask_t groupmask;
12922 +
12923 +               cpumask_scnprintf(str, NR_CPUS, sd->span);
12924 +               cpus_clear(groupmask);
12925 +
12926 +               printk(KERN_DEBUG);
12927 +               for (i = 0; i < level + 1; i++)
12928 +                       printk(" ");
12929 +               printk("domain %d: ", level);
12930 +
12931 +               if (!(sd->flags & SD_LOAD_BALANCE)) {
12932 +                       printk("does not load-balance\n");
12933 +                       if (sd->parent)
12934 +                               printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
12935 +                                               " has parent");
12936 +                       break;
12937 +               }
12938 +
12939 +               printk("span %s\n", str);
12940 +
12941 +               if (!cpu_isset(cpu, sd->span))
12942 +                       printk(KERN_ERR "ERROR: domain->span does not contain "
12943 +                                       "CPU%d\n", cpu);
12944 +               if (!cpu_isset(cpu, group->cpumask))
12945 +                       printk(KERN_ERR "ERROR: domain->groups does not contain"
12946 +                                       " CPU%d\n", cpu);
12947 +
12948 +               printk(KERN_DEBUG);
12949 +               for (i = 0; i < level + 2; i++)
12950 +                       printk(" ");
12951 +               printk("groups:");
12952 +               do {
12953 +                       if (!group) {
12954 +                               printk("\n");
12955 +                               printk(KERN_ERR "ERROR: group is NULL\n");
12956 +                               break;
12957 +                       }
12958 +
12959 +                       if (!group->__cpu_power) {
12960 +                               printk("\n");
12961 +                               printk(KERN_ERR "ERROR: domain->cpu_power not "
12962 +                                               "set\n");
12963 +                       }
12964 +
12965 +                       if (!cpus_weight(group->cpumask)) {
12966 +                               printk("\n");
12967 +                               printk(KERN_ERR "ERROR: empty group\n");
12968 +                       }
12969 +
12970 +                       if (cpus_intersects(groupmask, group->cpumask)) {
12971 +                               printk("\n");
12972 +                               printk(KERN_ERR "ERROR: repeated CPUs\n");
12973 +                       }
12974 +
12975 +                       cpus_or(groupmask, groupmask, group->cpumask);
12976 +
12977 +                       cpumask_scnprintf(str, NR_CPUS, group->cpumask);
12978 +                       printk(" %s", str);
12979 +
12980 +                       group = group->next;
12981 +               } while (group != sd->groups);
12982 +               printk("\n");
12983 +
12984 +               if (!cpus_equal(sd->span, groupmask))
12985 +                       printk(KERN_ERR "ERROR: groups don't span "
12986 +                                       "domain->span\n");
12987 +
12988 +               level++;
12989 +               sd = sd->parent;
12990 +               if (!sd)
12991 +                       continue;
12992 +
12993 +               if (!cpus_subset(groupmask, sd->span))
12994 +                       printk(KERN_ERR "ERROR: parent span is not a superset "
12995 +                               "of domain->span\n");
12996 +
12997 +       } while (sd);
12998 +}
12999 +#else
13000 +# define sched_domain_debug(sd, cpu) do { } while (0)
13001 +#endif
13002 +
13003 +static int sd_degenerate(struct sched_domain *sd)
13004 +{
13005 +       if (cpus_weight(sd->span) == 1)
13006 +               return 1;
13007 +
13008 +       /* Following flags need at least 2 groups */
13009 +       if (sd->flags & (SD_LOAD_BALANCE |
13010 +                        SD_BALANCE_NEWIDLE |
13011 +                        SD_BALANCE_FORK |
13012 +                        SD_BALANCE_EXEC |
13013 +                        SD_SHARE_CPUPOWER |
13014 +                        SD_SHARE_PKG_RESOURCES)) {
13015 +               if (sd->groups != sd->groups->next)
13016 +                       return 0;
13017 +       }
13018 +
13019 +       /* Following flags don't use groups */
13020 +       if (sd->flags & (SD_WAKE_IDLE |
13021 +                        SD_WAKE_AFFINE |
13022 +                        SD_WAKE_BALANCE))
13023 +               return 0;
13024 +
13025 +       return 1;
13026 +}
13027 +
13028 +static int
13029 +sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
13030 +{
13031 +       unsigned long cflags = sd->flags, pflags = parent->flags;
13032 +
13033 +       if (sd_degenerate(parent))
13034 +               return 1;
13035 +
13036 +       if (!cpus_equal(sd->span, parent->span))
13037 +               return 0;
13038 +
13039 +       /* Does parent contain flags not in child? */
13040 +       /* WAKE_BALANCE is a subset of WAKE_AFFINE */
13041 +       if (cflags & SD_WAKE_AFFINE)
13042 +               pflags &= ~SD_WAKE_BALANCE;
13043 +       /* Flags needing groups don't count if only 1 group in parent */
13044 +       if (parent->groups == parent->groups->next) {
13045 +               pflags &= ~(SD_LOAD_BALANCE |
13046 +                               SD_BALANCE_NEWIDLE |
13047 +                               SD_BALANCE_FORK |
13048 +                               SD_BALANCE_EXEC |
13049 +                               SD_SHARE_CPUPOWER |
13050 +                               SD_SHARE_PKG_RESOURCES);
13051 +       }
13052 +       if (~cflags & pflags)
13053 +               return 0;
13054 +
13055 +       return 1;
13056 +}
13057 +
13058 +/*
13059 + * Attach the domain 'sd' to 'cpu' as its base domain.  Callers must
13060 + * hold the hotplug lock.
13061 + */
13062 +static void cpu_attach_domain(struct sched_domain *sd, int cpu)
13063 +{
13064 +       struct rq *rq = cpu_rq(cpu);
13065 +       struct sched_domain *tmp;
13066 +
13067 +       /* Remove the sched domains which do not contribute to scheduling. */
13068 +       for (tmp = sd; tmp; tmp = tmp->parent) {
13069 +               struct sched_domain *parent = tmp->parent;
13070 +               if (!parent)
13071 +                       break;
13072 +               if (sd_parent_degenerate(tmp, parent)) {
13073 +                       tmp->parent = parent->parent;
13074 +                       if (parent->parent)
13075 +                               parent->parent->child = tmp;
13076 +               }
13077 +       }
13078 +
13079 +       if (sd && sd_degenerate(sd)) {
13080 +               sd = sd->parent;
13081 +               if (sd)
13082 +                       sd->child = NULL;
13083 +       }
13084 +
13085 +       sched_domain_debug(sd, cpu);
13086 +
13087 +       rcu_assign_pointer(rq->sd, sd);
13088 +}
13089 +
13090 +/* cpus with isolated domains */
13091 +static cpumask_t cpu_isolated_map = CPU_MASK_NONE;
13092 +
13093 +/* Setup the mask of cpus configured for isolated domains */
13094 +static int __init isolated_cpu_setup(char *str)
13095 +{
13096 +       int ints[NR_CPUS], i;
13097 +
13098 +       str = get_options(str, ARRAY_SIZE(ints), ints);
13099 +       cpus_clear(cpu_isolated_map);
13100 +       for (i = 1; i <= ints[0]; i++)
13101 +               if (ints[i] < NR_CPUS)
13102 +                       cpu_set(ints[i], cpu_isolated_map);
13103 +       return 1;
13104 +}
13105 +
13106 +__setup ("isolcpus=", isolated_cpu_setup);
13107 +
13108 +/*
13109 + * init_sched_build_groups takes the cpumask we wish to span, and a pointer
13110 + * to a function which identifies what group(along with sched group) a CPU
13111 + * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS
13112 + * (due to the fact that we keep track of groups covered with a cpumask_t).
13113 + *
13114 + * init_sched_build_groups will build a circular linked list of the groups
13115 + * covered by the given span, and will set each group's ->cpumask correctly,
13116 + * and ->cpu_power to 0.
13117 + */
13118 +static void
13119 +init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
13120 +                       int (*group_fn)(int cpu, const cpumask_t *cpu_map,
13121 +                                       struct sched_group **sg))
13122 +{
13123 +       struct sched_group *first = NULL, *last = NULL;
13124 +       cpumask_t covered = CPU_MASK_NONE;
13125 +       int i;
13126 +
13127 +       for_each_cpu_mask(i, span) {
13128 +               struct sched_group *sg;
13129 +               int group = group_fn(i, cpu_map, &sg);
13130 +               int j;
13131 +
13132 +               if (cpu_isset(i, covered))
13133 +                       continue;
13134 +
13135 +               sg->cpumask = CPU_MASK_NONE;
13136 +               sg->__cpu_power = 0;
13137 +
13138 +               for_each_cpu_mask(j, span) {
13139 +                       if (group_fn(j, cpu_map, NULL) != group)
13140 +                               continue;
13141 +
13142 +                       cpu_set(j, covered);
13143 +                       cpu_set(j, sg->cpumask);
13144 +               }
13145 +               if (!first)
13146 +                       first = sg;
13147 +               if (last)
13148 +                       last->next = sg;
13149 +               last = sg;
13150 +       }
13151 +       last->next = first;
13152 +}
13153 +
13154 +#define SD_NODES_PER_DOMAIN 16
13155 +
13156 +/*
13157 + * Self-tuning task migration cost measurement between source and target CPUs.
13158 + *
13159 + * This is done by measuring the cost of manipulating buffers of varying
13160 + * sizes. For a given buffer-size here are the steps that are taken:
13161 + *
13162 + * 1) the source CPU reads+dirties a shared buffer
13163 + * 2) the target CPU reads+dirties the same shared buffer
13164 + *
13165 + * We measure how long they take, in the following 4 scenarios:
13166 + *
13167 + *  - source: CPU1, target: CPU2 | cost1
13168 + *  - source: CPU2, target: CPU1 | cost2
13169 + *  - source: CPU1, target: CPU1 | cost3
13170 + *  - source: CPU2, target: CPU2 | cost4
13171 + *
13172 + * We then calculate the cost3+cost4-cost1-cost2 difference - this is
13173 + * the cost of migration.
13174 + *
13175 + * We then start off from a small buffer-size and iterate up to larger
13176 + * buffer sizes, in 5% steps - measuring each buffer-size separately, and
13177 + * doing a maximum search for the cost. (The maximum cost for a migration
13178 + * normally occurs when the working set size is around the effective cache
13179 + * size.)
13180 + */
13181 +#define SEARCH_SCOPE           2
13182 +#define MIN_CACHE_SIZE         (64*1024U)
13183 +#define DEFAULT_CACHE_SIZE     (5*1024*1024U)
13184 +#define ITERATIONS             1
13185 +#define SIZE_THRESH            130
13186 +#define COST_THRESH            130
13187 +
13188 +/*
13189 + * The migration cost is a function of 'domain distance'. Domain
13190 + * distance is the number of steps a CPU has to iterate down its
13191 + * domain tree to share a domain with the other CPU. The farther
13192 + * two CPUs are from each other, the larger the distance gets.
13193 + *
13194 + * Note that we use the distance only to cache measurement results,
13195 + * the distance value is not used numerically otherwise. When two
13196 + * CPUs have the same distance it is assumed that the migration
13197 + * cost is the same. (this is a simplification but quite practical)
13198 + */
13199 +#define MAX_DOMAIN_DISTANCE 32
13200 +
13201 +static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] =
13202 +               { [ 0 ... MAX_DOMAIN_DISTANCE-1 ] =
13203 +/*
13204 + * Architectures may override the migration cost and thus avoid
13205 + * boot-time calibration. Unit is nanoseconds. Mostly useful for
13206 + * virtualized hardware:
13207 + */
13208 +#ifdef CONFIG_DEFAULT_MIGRATION_COST
13209 +                       CONFIG_DEFAULT_MIGRATION_COST
13210 +#else
13211 +                       -1LL
13212 +#endif
13213 +};
13214 +
13215 +/*
13216 + * Allow override of migration cost - in units of microseconds.
13217 + * E.g. migration_cost=1000,2000,3000 will set up a level-1 cost
13218 + * of 1 msec, level-2 cost of 2 msecs and level3 cost of 3 msecs:
13219 + */
13220 +static int __init migration_cost_setup(char *str)
13221 +{
13222 +       int ints[MAX_DOMAIN_DISTANCE+1], i;
13223 +
13224 +       str = get_options(str, ARRAY_SIZE(ints), ints);
13225 +
13226 +       printk("#ints: %d\n", ints[0]);
13227 +       for (i = 1; i <= ints[0]; i++) {
13228 +               migration_cost[i-1] = (unsigned long long)ints[i]*1000;
13229 +               printk("migration_cost[%d]: %Ld\n", i-1, migration_cost[i-1]);
13230 +       }
13231 +       return 1;
13232 +}
13233 +
13234 +__setup ("migration_cost=", migration_cost_setup);
13235 +
13236 +/*
13237 + * Global multiplier (divisor) for migration-cutoff values,
13238 + * in percentiles. E.g. use a value of 150 to get 1.5 times
13239 + * longer cache-hot cutoff times.
13240 + *
13241 + * (We scale it from 100 to 128 to long long handling easier.)
13242 + */
13243 +
13244 +#define MIGRATION_FACTOR_SCALE 128
13245 +
13246 +static unsigned int migration_factor = MIGRATION_FACTOR_SCALE;
13247 +
13248 +static int __init setup_migration_factor(char *str)
13249 +{
13250 +       get_option(&str, &migration_factor);
13251 +       migration_factor = migration_factor * MIGRATION_FACTOR_SCALE / 100;
13252 +       return 1;
13253 +}
13254 +
13255 +__setup("migration_factor=", setup_migration_factor);
13256 +
13257 +/*
13258 + * Estimated distance of two CPUs, measured via the number of domains
13259 + * we have to pass for the two CPUs to be in the same span:
13260 + */
13261 +static unsigned long domain_distance(int cpu1, int cpu2)
13262 +{
13263 +       unsigned long distance = 0;
13264 +       struct sched_domain *sd;
13265 +
13266 +       for_each_domain(cpu1, sd) {
13267 +               WARN_ON(!cpu_isset(cpu1, sd->span));
13268 +               if (cpu_isset(cpu2, sd->span))
13269 +                       return distance;
13270 +               distance++;
13271 +       }
13272 +       if (distance >= MAX_DOMAIN_DISTANCE) {
13273 +               WARN_ON(1);
13274 +               distance = MAX_DOMAIN_DISTANCE-1;
13275 +       }
13276 +
13277 +       return distance;
13278 +}
13279 +
13280 +static unsigned int migration_debug;
13281 +
13282 +static int __init setup_migration_debug(char *str)
13283 +{
13284 +       get_option(&str, &migration_debug);
13285 +       return 1;
13286 +}
13287 +
13288 +__setup("migration_debug=", setup_migration_debug);
13289 +
13290 +/*
13291 + * Maximum cache-size that the scheduler should try to measure.
13292 + * Architectures with larger caches should tune this up during
13293 + * bootup. Gets used in the domain-setup code (i.e. during SMP
13294 + * bootup).
13295 + */
13296 +unsigned int max_cache_size;
13297 +
13298 +static int __init setup_max_cache_size(char *str)
13299 +{
13300 +       get_option(&str, &max_cache_size);
13301 +       return 1;
13302 +}
13303 +
13304 +__setup("max_cache_size=", setup_max_cache_size);
13305 +
13306 +/*
13307 + * Dirty a big buffer in a hard-to-predict (for the L2 cache) way. This
13308 + * is the operation that is timed, so we try to generate unpredictable
13309 + * cachemisses that still end up filling the L2 cache:
13310 + */
13311 +static void touch_cache(void *__cache, unsigned long __size)
13312 +{
13313 +       unsigned long size = __size / sizeof(long);
13314 +       unsigned long chunk1 = size / 3;
13315 +       unsigned long chunk2 = 2 * size / 3;
13316 +       unsigned long *cache = __cache;
13317 +       int i;
13318 +
13319 +       for (i = 0; i < size/6; i += 8) {
13320 +               switch (i % 6) {
13321 +                       case 0: cache[i]++;
13322 +                       case 1: cache[size-1-i]++;
13323 +                       case 2: cache[chunk1-i]++;
13324 +                       case 3: cache[chunk1+i]++;
13325 +                       case 4: cache[chunk2-i]++;
13326 +                       case 5: cache[chunk2+i]++;
13327 +               }
13328 +       }
13329 +}
13330 +
13331 +/*
13332 + * Measure the cache-cost of one task migration. Returns in units of nsec.
13333 + */
13334 +static unsigned long long
13335 +measure_one(void *cache, unsigned long size, int source, int target)
13336 +{
13337 +       cpumask_t mask, saved_mask;
13338 +       unsigned long long t0, t1, t2, t3, cost;
13339 +
13340 +       saved_mask = current->cpus_allowed;
13341 +
13342 +       /*
13343 +        * Flush source caches to RAM and invalidate them:
13344 +        */
13345 +       sched_cacheflush();
13346 +
13347 +       /*
13348 +        * Migrate to the source CPU:
13349 +        */
13350 +       mask = cpumask_of_cpu(source);
13351 +       set_cpus_allowed(current, mask);
13352 +       WARN_ON(smp_processor_id() != source);
13353 +
13354 +       /*
13355 +        * Dirty the working set:
13356 +        */
13357 +       t0 = sched_clock();
13358 +       touch_cache(cache, size);
13359 +       t1 = sched_clock();
13360 +
13361 +       /*
13362 +        * Migrate to the target CPU, dirty the L2 cache and access
13363 +        * the shared buffer. (which represents the working set
13364 +        * of a migrated task.)
13365 +        */
13366 +       mask = cpumask_of_cpu(target);
13367 +       set_cpus_allowed(current, mask);
13368 +       WARN_ON(smp_processor_id() != target);
13369 +
13370 +       t2 = sched_clock();
13371 +       touch_cache(cache, size);
13372 +       t3 = sched_clock();
13373 +
13374 +       cost = t1-t0 + t3-t2;
13375 +
13376 +       if (migration_debug >= 2)
13377 +               printk("[%d->%d]: %8Ld %8Ld %8Ld => %10Ld.\n",
13378 +                       source, target, t1-t0, t1-t0, t3-t2, cost);
13379 +       /*
13380 +        * Flush target caches to RAM and invalidate them:
13381 +        */
13382 +       sched_cacheflush();
13383 +
13384 +       set_cpus_allowed(current, saved_mask);
13385 +
13386 +       return cost;
13387 +}
13388 +
13389 +/*
13390 + * Measure a series of task migrations and return the average
13391 + * result. Since this code runs early during bootup the system
13392 + * is 'undisturbed' and the average latency makes sense.
13393 + *
13394 + * The algorithm in essence auto-detects the relevant cache-size,
13395 + * so it will properly detect different cachesizes for different
13396 + * cache-hierarchies, depending on how the CPUs are connected.
13397 + *
13398 + * Architectures can prime the upper limit of the search range via
13399 + * max_cache_size, otherwise the search range defaults to 20MB...64K.
13400 + */
13401 +static unsigned long long
13402 +measure_cost(int cpu1, int cpu2, void *cache, unsigned int size)
13403 +{
13404 +       unsigned long long cost1, cost2;
13405 +       int i;
13406 +
13407 +       /*
13408 +        * Measure the migration cost of 'size' bytes, over an
13409 +        * average of 10 runs:
13410 +        *
13411 +        * (We perturb the cache size by a small (0..4k)
13412 +        *  value to compensate size/alignment related artifacts.
13413 +        *  We also subtract the cost of the operation done on
13414 +        *  the same CPU.)
13415 +        */
13416 +       cost1 = 0;
13417 +
13418 +       /*
13419 +        * dry run, to make sure we start off cache-cold on cpu1,
13420 +        * and to get any vmalloc pagefaults in advance:
13421 +        */
13422 +       measure_one(cache, size, cpu1, cpu2);
13423 +       for (i = 0; i < ITERATIONS; i++)
13424 +               cost1 += measure_one(cache, size - i * 1024, cpu1, cpu2);
13425 +
13426 +       measure_one(cache, size, cpu2, cpu1);
13427 +       for (i = 0; i < ITERATIONS; i++)
13428 +               cost1 += measure_one(cache, size - i * 1024, cpu2, cpu1);
13429 +
13430 +       /*
13431 +        * (We measure the non-migrating [cached] cost on both
13432 +        *  cpu1 and cpu2, to handle CPUs with different speeds)
13433 +        */
13434 +       cost2 = 0;
13435 +
13436 +       measure_one(cache, size, cpu1, cpu1);
13437 +       for (i = 0; i < ITERATIONS; i++)
13438 +               cost2 += measure_one(cache, size - i * 1024, cpu1, cpu1);
13439 +
13440 +       measure_one(cache, size, cpu2, cpu2);
13441 +       for (i = 0; i < ITERATIONS; i++)
13442 +               cost2 += measure_one(cache, size - i * 1024, cpu2, cpu2);
13443 +
13444 +       /*
13445 +        * Get the per-iteration migration cost:
13446 +        */
13447 +       do_div(cost1, 2 * ITERATIONS);
13448 +       do_div(cost2, 2 * ITERATIONS);
13449 +
13450 +       return cost1 - cost2;
13451 +}
13452 +
13453 +static unsigned long long measure_migration_cost(int cpu1, int cpu2)
13454 +{
13455 +       unsigned long long max_cost = 0, fluct = 0, avg_fluct = 0;
13456 +       unsigned int max_size, size, size_found = 0;
13457 +       long long cost = 0, prev_cost;
13458 +       void *cache;
13459 +
13460 +       /*
13461 +        * Search from max_cache_size*5 down to 64K - the real relevant
13462 +        * cachesize has to lie somewhere inbetween.
13463 +        */
13464 +       if (max_cache_size) {
13465 +               max_size = max(max_cache_size * SEARCH_SCOPE, MIN_CACHE_SIZE);
13466 +               size = max(max_cache_size / SEARCH_SCOPE, MIN_CACHE_SIZE);
13467 +       } else {
13468 +               /*
13469 +                * Since we have no estimation about the relevant
13470 +                * search range
13471 +                */
13472 +               max_size = DEFAULT_CACHE_SIZE * SEARCH_SCOPE;
13473 +               size = MIN_CACHE_SIZE;
13474 +       }
13475 +
13476 +       if (!cpu_online(cpu1) || !cpu_online(cpu2)) {
13477 +               printk("cpu %d and %d not both online!\n", cpu1, cpu2);
13478 +               return 0;
13479 +       }
13480 +
13481 +       /*
13482 +        * Allocate the working set:
13483 +        */
13484 +       cache = vmalloc(max_size);
13485 +       if (!cache) {
13486 +               printk("could not vmalloc %d bytes for cache!\n", 2 * max_size);
13487 +               return 1000000; /* return 1 msec on very small boxen */
13488 +       }
13489 +
13490 +       while (size <= max_size) {
13491 +               prev_cost = cost;
13492 +               cost = measure_cost(cpu1, cpu2, cache, size);
13493 +
13494 +               /*
13495 +                * Update the max:
13496 +                */
13497 +               if (cost > 0) {
13498 +                       if (max_cost < cost) {
13499 +                               max_cost = cost;
13500 +                               size_found = size;
13501 +                       }
13502 +               }
13503 +               /*
13504 +                * Calculate average fluctuation, we use this to prevent
13505 +                * noise from triggering an early break out of the loop:
13506 +                */
13507 +               fluct = abs(cost - prev_cost);
13508 +               avg_fluct = (avg_fluct + fluct)/2;
13509 +
13510 +               if (migration_debug)
13511 +                       printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): "
13512 +                               "(%8Ld %8Ld)\n",
13513 +                               cpu1, cpu2, size,
13514 +                               (long)cost / 1000000,
13515 +                               ((long)cost / 100000) % 10,
13516 +                               (long)max_cost / 1000000,
13517 +                               ((long)max_cost / 100000) % 10,
13518 +                               domain_distance(cpu1, cpu2),
13519 +                               cost, avg_fluct);
13520 +
13521 +               /*
13522 +                * If we iterated at least 20% past the previous maximum,
13523 +                * and the cost has dropped by more than 20% already,
13524 +                * (taking fluctuations into account) then we assume to
13525 +                * have found the maximum and break out of the loop early:
13526 +                */
13527 +               if (size_found && (size*100 > size_found*SIZE_THRESH))
13528 +                       if (cost+avg_fluct <= 0 ||
13529 +                               max_cost*100 > (cost+avg_fluct)*COST_THRESH) {
13530 +
13531 +                               if (migration_debug)
13532 +                                       printk("-> found max.\n");
13533 +                               break;
13534 +                       }
13535 +               /*
13536 +                * Increase the cachesize in 10% steps:
13537 +                */
13538 +               size = size * 10 / 9;
13539 +       }
13540 +
13541 +       if (migration_debug)
13542 +               printk("[%d][%d] working set size found: %d, cost: %Ld\n",
13543 +                       cpu1, cpu2, size_found, max_cost);
13544 +
13545 +       vfree(cache);
13546 +
13547 +       /*
13548 +        * A task is considered 'cache cold' if at least 2 times
13549 +        * the worst-case cost of migration has passed.
13550 +        *
13551 +        * (this limit is only listened to if the load-balancing
13552 +        * situation is 'nice' - if there is a large imbalance we
13553 +        * ignore it for the sake of CPU utilization and
13554 +        * processing fairness.)
13555 +        */
13556 +       return 2 * max_cost * migration_factor / MIGRATION_FACTOR_SCALE;
13557 +}
13558 +
13559 +static void calibrate_migration_costs(const cpumask_t *cpu_map)
13560 +{
13561 +       int cpu1 = -1, cpu2 = -1, cpu, orig_cpu = raw_smp_processor_id();
13562 +       unsigned long j0, j1, distance, max_distance = 0;
13563 +       struct sched_domain *sd;
13564 +
13565 +       j0 = jiffies;
13566 +
13567 +       /*
13568 +        * First pass - calculate the cacheflush times:
13569 +        */
13570 +       for_each_cpu_mask(cpu1, *cpu_map) {
13571 +               for_each_cpu_mask(cpu2, *cpu_map) {
13572 +                       if (cpu1 == cpu2)
13573 +                               continue;
13574 +                       distance = domain_distance(cpu1, cpu2);
13575 +                       max_distance = max(max_distance, distance);
13576 +                       /*
13577 +                        * No result cached yet?
13578 +                        */
13579 +                       if (migration_cost[distance] == -1LL)
13580 +                               migration_cost[distance] =
13581 +                                       measure_migration_cost(cpu1, cpu2);
13582 +               }
13583 +       }
13584 +       /*
13585 +        * Second pass - update the sched domain hierarchy with
13586 +        * the new cache-hot-time estimations:
13587 +        */
13588 +       for_each_cpu_mask(cpu, *cpu_map) {
13589 +               distance = 0;
13590 +               for_each_domain(cpu, sd) {
13591 +                       sd->cache_hot_time = migration_cost[distance];
13592 +                       distance++;
13593 +               }
13594 +       }
13595 +       /*
13596 +        * Print the matrix:
13597 +        */
13598 +       if (migration_debug)
13599 +               printk("migration: max_cache_size: %d, cpu: %d MHz:\n",
13600 +                       max_cache_size,
13601 +#ifdef CONFIG_X86
13602 +                       cpu_khz/1000
13603 +#else
13604 +                       -1
13605 +#endif
13606 +               );
13607 +       if (system_state == SYSTEM_BOOTING && num_online_cpus() > 1) {
13608 +               printk("migration_cost=");
13609 +               for (distance = 0; distance <= max_distance; distance++) {
13610 +                       if (distance)
13611 +                               printk(",");
13612 +                       printk("%ld", (long)migration_cost[distance] / 1000);
13613 +               }
13614 +               printk("\n");
13615 +       }
13616 +       j1 = jiffies;
13617 +       if (migration_debug)
13618 +               printk("migration: %ld seconds\n", (j1-j0) / HZ);
13619 +
13620 +       /*
13621 +        * Move back to the original CPU. NUMA-Q gets confused
13622 +        * if we migrate to another quad during bootup.
13623 +        */
13624 +       if (raw_smp_processor_id() != orig_cpu) {
13625 +               cpumask_t mask = cpumask_of_cpu(orig_cpu),
13626 +                       saved_mask = current->cpus_allowed;
13627 +
13628 +               set_cpus_allowed(current, mask);
13629 +               set_cpus_allowed(current, saved_mask);
13630 +       }
13631 +}
13632 +
13633 +#ifdef CONFIG_NUMA
13634 +
13635 +/**
13636 + * find_next_best_node - find the next node to include in a sched_domain
13637 + * @node: node whose sched_domain we're building
13638 + * @used_nodes: nodes already in the sched_domain
13639 + *
13640 + * Find the next node to include in a given scheduling domain.  Simply
13641 + * finds the closest node not already in the @used_nodes map.
13642 + *
13643 + * Should use nodemask_t.
13644 + */
13645 +static int find_next_best_node(int node, unsigned long *used_nodes)
13646 +{
13647 +       int i, n, val, min_val, best_node = 0;
13648 +
13649 +       min_val = INT_MAX;
13650 +
13651 +       for (i = 0; i < MAX_NUMNODES; i++) {
13652 +               /* Start at @node */
13653 +               n = (node + i) % MAX_NUMNODES;
13654 +
13655 +               if (!nr_cpus_node(n))
13656 +                       continue;
13657 +
13658 +               /* Skip already used nodes */
13659 +               if (test_bit(n, used_nodes))
13660 +                       continue;
13661 +
13662 +               /* Simple min distance search */
13663 +               val = node_distance(node, n);
13664 +
13665 +               if (val < min_val) {
13666 +                       min_val = val;
13667 +                       best_node = n;
13668 +               }
13669 +       }
13670 +
13671 +       set_bit(best_node, used_nodes);
13672 +       return best_node;
13673 +}
13674 +
13675 +/**
13676 + * sched_domain_node_span - get a cpumask for a node's sched_domain
13677 + * @node: node whose cpumask we're constructing
13678 + * @size: number of nodes to include in this span
13679 + *
13680 + * Given a node, construct a good cpumask for its sched_domain to span.  It
13681 + * should be one that prevents unnecessary balancing, but also spreads tasks
13682 + * out optimally.
13683 + */
13684 +static cpumask_t sched_domain_node_span(int node)
13685 +{
13686 +       DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
13687 +       cpumask_t span, nodemask;
13688 +       int i;
13689 +
13690 +       cpus_clear(span);
13691 +       bitmap_zero(used_nodes, MAX_NUMNODES);
13692 +
13693 +       nodemask = node_to_cpumask(node);
13694 +       cpus_or(span, span, nodemask);
13695 +       set_bit(node, used_nodes);
13696 +
13697 +       for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
13698 +               int next_node = find_next_best_node(node, used_nodes);
13699 +
13700 +               nodemask = node_to_cpumask(next_node);
13701 +               cpus_or(span, span, nodemask);
13702 +       }
13703 +
13704 +       return span;
13705 +}
13706 +#endif
13707 +
13708 +int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
13709 +
13710 +/*
13711 + * SMT sched-domains:
13712 + */
13713 +#ifdef CONFIG_SCHED_SMT
13714 +static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
13715 +static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
13716 +
13717 +static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map,
13718 +                           struct sched_group **sg)
13719 +{
13720 +       if (sg)
13721 +               *sg = &per_cpu(sched_group_cpus, cpu);
13722 +       return cpu;
13723 +}
13724 +#endif
13725 +
13726 +/*
13727 + * multi-core sched-domains:
13728 + */
13729 +#ifdef CONFIG_SCHED_MC
13730 +static DEFINE_PER_CPU(struct sched_domain, core_domains);
13731 +static DEFINE_PER_CPU(struct sched_group, sched_group_core);
13732 +#endif
13733 +
13734 +#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
13735 +static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
13736 +                            struct sched_group **sg)
13737 +{
13738 +       int group;
13739 +       cpumask_t mask = cpu_sibling_map[cpu];
13740 +       cpus_and(mask, mask, *cpu_map);
13741 +       group = first_cpu(mask);
13742 +       if (sg)
13743 +               *sg = &per_cpu(sched_group_core, group);
13744 +       return group;
13745 +}
13746 +#elif defined(CONFIG_SCHED_MC)
13747 +static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
13748 +                            struct sched_group **sg)
13749 +{
13750 +       if (sg)
13751 +               *sg = &per_cpu(sched_group_core, cpu);
13752 +       return cpu;
13753 +}
13754 +#endif
13755 +
13756 +static DEFINE_PER_CPU(struct sched_domain, phys_domains);
13757 +static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
13758 +
13759 +static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
13760 +                            struct sched_group **sg)
13761 +{
13762 +       int group;
13763 +#ifdef CONFIG_SCHED_MC
13764 +       cpumask_t mask = cpu_coregroup_map(cpu);
13765 +       cpus_and(mask, mask, *cpu_map);
13766 +       group = first_cpu(mask);
13767 +#elif defined(CONFIG_SCHED_SMT)
13768 +       cpumask_t mask = cpu_sibling_map[cpu];
13769 +       cpus_and(mask, mask, *cpu_map);
13770 +       group = first_cpu(mask);
13771 +#else
13772 +       group = cpu;
13773 +#endif
13774 +       if (sg)
13775 +               *sg = &per_cpu(sched_group_phys, group);
13776 +       return group;
13777 +}
13778 +
13779 +#ifdef CONFIG_NUMA
13780 +/*
13781 + * The init_sched_build_groups can't handle what we want to do with node
13782 + * groups, so roll our own. Now each node has its own list of groups which
13783 + * gets dynamically allocated.
13784 + */
13785 +static DEFINE_PER_CPU(struct sched_domain, node_domains);
13786 +static struct sched_group **sched_group_nodes_bycpu[NR_CPUS];
13787 +
13788 +static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
13789 +static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes);
13790 +
13791 +static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
13792 +                                struct sched_group **sg)
13793 +{
13794 +       cpumask_t nodemask = node_to_cpumask(cpu_to_node(cpu));
13795 +       int group;
13796 +
13797 +       cpus_and(nodemask, nodemask, *cpu_map);
13798 +       group = first_cpu(nodemask);
13799 +
13800 +       if (sg)
13801 +               *sg = &per_cpu(sched_group_allnodes, group);
13802 +       return group;
13803 +}
13804 +
13805 +static void init_numa_sched_groups_power(struct sched_group *group_head)
13806 +{
13807 +       struct sched_group *sg = group_head;
13808 +       int j;
13809 +
13810 +       if (!sg)
13811 +               return;
13812 +next_sg:
13813 +       for_each_cpu_mask(j, sg->cpumask) {
13814 +               struct sched_domain *sd;
13815 +
13816 +               sd = &per_cpu(phys_domains, j);
13817 +               if (j != first_cpu(sd->groups->cpumask)) {
13818 +                       /*
13819 +                        * Only add "power" once for each
13820 +                        * physical package.
13821 +                        */
13822 +                       continue;
13823 +               }
13824 +
13825 +               sg_inc_cpu_power(sg, sd->groups->__cpu_power);
13826 +       }
13827 +       sg = sg->next;
13828 +       if (sg != group_head)
13829 +               goto next_sg;
13830 +}
13831 +#endif
13832 +
13833 +#ifdef CONFIG_NUMA
13834 +/* Free memory allocated for various sched_group structures */
13835 +static void free_sched_groups(const cpumask_t *cpu_map)
13836 +{
13837 +       int cpu, i;
13838 +
13839 +       for_each_cpu_mask(cpu, *cpu_map) {
13840 +               struct sched_group **sched_group_nodes
13841 +                       = sched_group_nodes_bycpu[cpu];
13842 +
13843 +               if (!sched_group_nodes)
13844 +                       continue;
13845 +
13846 +               for (i = 0; i < MAX_NUMNODES; i++) {
13847 +                       cpumask_t nodemask = node_to_cpumask(i);
13848 +                       struct sched_group *oldsg, *sg = sched_group_nodes[i];
13849 +
13850 +                       cpus_and(nodemask, nodemask, *cpu_map);
13851 +                       if (cpus_empty(nodemask))
13852 +                               continue;
13853 +
13854 +                       if (sg == NULL)
13855 +                               continue;
13856 +                       sg = sg->next;
13857 +next_sg:
13858 +                       oldsg = sg;
13859 +                       sg = sg->next;
13860 +                       kfree(oldsg);
13861 +                       if (oldsg != sched_group_nodes[i])
13862 +                               goto next_sg;
13863 +               }
13864 +               kfree(sched_group_nodes);
13865 +               sched_group_nodes_bycpu[cpu] = NULL;
13866 +       }
13867 +}
13868 +#else
13869 +static void free_sched_groups(const cpumask_t *cpu_map)
13870 +{
13871 +}
13872 +#endif
13873 +
13874 +/*
13875 + * Initialize sched groups cpu_power.
13876 + *
13877 + * cpu_power indicates the capacity of sched group, which is used while
13878 + * distributing the load between different sched groups in a sched domain.
13879 + * Typically cpu_power for all the groups in a sched domain will be same unless
13880 + * there are asymmetries in the topology. If there are asymmetries, group
13881 + * having more cpu_power will pickup more load compared to the group having
13882 + * less cpu_power.
13883 + *
13884 + * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents
13885 + * the maximum number of tasks a group can handle in the presence of other idle
13886 + * or lightly loaded groups in the same sched domain.
13887 + */
13888 +static void init_sched_groups_power(int cpu, struct sched_domain *sd)
13889 +{
13890 +       struct sched_domain *child;
13891 +       struct sched_group *group;
13892 +
13893 +       WARN_ON(!sd || !sd->groups);
13894 +
13895 +       if (cpu != first_cpu(sd->groups->cpumask))
13896 +               return;
13897 +
13898 +       child = sd->child;
13899 +
13900 +       sd->groups->__cpu_power = 0;
13901 +
13902 +       /*
13903 +        * For perf policy, if the groups in child domain share resources
13904 +        * (for example cores sharing some portions of the cache hierarchy
13905 +        * or SMT), then set this domain groups cpu_power such that each group
13906 +        * can handle only one task, when there are other idle groups in the
13907 +        * same sched domain.
13908 +        */
13909 +       if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) &&
13910 +                      (child->flags &
13911 +                       (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) {
13912 +               sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE);
13913 +               return;
13914 +       }
13915 +
13916 +       /*
13917 +        * add cpu_power of each child group to this groups cpu_power
13918 +        */
13919 +       group = child->groups;
13920 +       do {
13921 +               sg_inc_cpu_power(sd->groups, group->__cpu_power);
13922 +               group = group->next;
13923 +       } while (group != child->groups);
13924 +}
13925 +
13926 +/*
13927 + * Build sched domains for a given set of cpus and attach the sched domains
13928 + * to the individual cpus
13929 + */
13930 +static int build_sched_domains(const cpumask_t *cpu_map)
13931 +{
13932 +       int i;
13933 +       struct sched_domain *sd;
13934 +#ifdef CONFIG_NUMA
13935 +       struct sched_group **sched_group_nodes = NULL;
13936 +       int sd_allnodes = 0;
13937 +
13938 +       /*
13939 +        * Allocate the per-node list of sched groups
13940 +        */
13941 +       sched_group_nodes = kzalloc(sizeof(struct sched_group*)*MAX_NUMNODES,
13942 +                                          GFP_KERNEL);
13943 +       if (!sched_group_nodes) {
13944 +               printk(KERN_WARNING "Can not alloc sched group node list\n");
13945 +               return -ENOMEM;
13946 +       }
13947 +       sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
13948 +#endif
13949 +
13950 +       /*
13951 +        * Set up domains for cpus specified by the cpu_map.
13952 +        */
13953 +       for_each_cpu_mask(i, *cpu_map) {
13954 +               struct sched_domain *sd = NULL, *p;
13955 +               cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
13956 +
13957 +               cpus_and(nodemask, nodemask, *cpu_map);
13958 +
13959 +#ifdef CONFIG_NUMA
13960 +               if (cpus_weight(*cpu_map)
13961 +                               > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
13962 +                       sd = &per_cpu(allnodes_domains, i);
13963 +                       *sd = SD_ALLNODES_INIT;
13964 +                       sd->span = *cpu_map;
13965 +                       cpu_to_allnodes_group(i, cpu_map, &sd->groups);
13966 +                       p = sd;
13967 +                       sd_allnodes = 1;
13968 +               } else
13969 +                       p = NULL;
13970 +
13971 +               sd = &per_cpu(node_domains, i);
13972 +               *sd = SD_NODE_INIT;
13973 +               sd->span = sched_domain_node_span(cpu_to_node(i));
13974 +               sd->parent = p;
13975 +               if (p)
13976 +                       p->child = sd;
13977 +               cpus_and(sd->span, sd->span, *cpu_map);
13978 +#endif
13979 +
13980 +               p = sd;
13981 +               sd = &per_cpu(phys_domains, i);
13982 +               *sd = SD_CPU_INIT;
13983 +               sd->span = nodemask;
13984 +               sd->parent = p;
13985 +               if (p)
13986 +                       p->child = sd;
13987 +               cpu_to_phys_group(i, cpu_map, &sd->groups);
13988 +
13989 +#ifdef CONFIG_SCHED_MC
13990 +               p = sd;
13991 +               sd = &per_cpu(core_domains, i);
13992 +               *sd = SD_MC_INIT;
13993 +               sd->span = cpu_coregroup_map(i);
13994 +               cpus_and(sd->span, sd->span, *cpu_map);
13995 +               sd->parent = p;
13996 +               p->child = sd;
13997 +               cpu_to_core_group(i, cpu_map, &sd->groups);
13998 +#endif
13999 +
14000 +#ifdef CONFIG_SCHED_SMT
14001 +               p = sd;
14002 +               sd = &per_cpu(cpu_domains, i);
14003 +               *sd = SD_SIBLING_INIT;
14004 +               sd->span = cpu_sibling_map[i];
14005 +               cpus_and(sd->span, sd->span, *cpu_map);
14006 +               sd->parent = p;
14007 +               p->child = sd;
14008 +               cpu_to_cpu_group(i, cpu_map, &sd->groups);
14009 +#endif
14010 +       }
14011 +
14012 +#ifdef CONFIG_SCHED_SMT
14013 +       /* Set up CPU (sibling) groups */
14014 +       for_each_cpu_mask(i, *cpu_map) {
14015 +               cpumask_t this_sibling_map = cpu_sibling_map[i];
14016 +               cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
14017 +               if (i != first_cpu(this_sibling_map))
14018 +                       continue;
14019 +
14020 +               init_sched_build_groups(this_sibling_map, cpu_map, &cpu_to_cpu_group);
14021 +       }
14022 +#endif
14023 +
14024 +#ifdef CONFIG_SCHED_MC
14025 +       /* Set up multi-core groups */
14026 +       for_each_cpu_mask(i, *cpu_map) {
14027 +               cpumask_t this_core_map = cpu_coregroup_map(i);
14028 +               cpus_and(this_core_map, this_core_map, *cpu_map);
14029 +               if (i != first_cpu(this_core_map))
14030 +                       continue;
14031 +               init_sched_build_groups(this_core_map, cpu_map, &cpu_to_core_group);
14032 +       }
14033 +#endif
14034 +
14035 +
14036 +       /* Set up physical groups */
14037 +       for (i = 0; i < MAX_NUMNODES; i++) {
14038 +               cpumask_t nodemask = node_to_cpumask(i);
14039 +
14040 +               cpus_and(nodemask, nodemask, *cpu_map);
14041 +               if (cpus_empty(nodemask))
14042 +                       continue;
14043 +
14044 +               init_sched_build_groups(nodemask, cpu_map, &cpu_to_phys_group);
14045 +       }
14046 +
14047 +#ifdef CONFIG_NUMA
14048 +       /* Set up node groups */
14049 +       if (sd_allnodes)
14050 +               init_sched_build_groups(*cpu_map, cpu_map, &cpu_to_allnodes_group);
14051 +
14052 +       for (i = 0; i < MAX_NUMNODES; i++) {
14053 +               /* Set up node groups */
14054 +               struct sched_group *sg, *prev;
14055 +               cpumask_t nodemask = node_to_cpumask(i);
14056 +               cpumask_t domainspan;
14057 +               cpumask_t covered = CPU_MASK_NONE;
14058 +               int j;
14059 +
14060 +               cpus_and(nodemask, nodemask, *cpu_map);
14061 +               if (cpus_empty(nodemask)) {
14062 +                       sched_group_nodes[i] = NULL;
14063 +                       continue;
14064 +               }
14065 +
14066 +               domainspan = sched_domain_node_span(i);
14067 +               cpus_and(domainspan, domainspan, *cpu_map);
14068 +
14069 +               sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i);
14070 +               if (!sg) {
14071 +                       printk(KERN_WARNING "Can not alloc domain group for "
14072 +                               "node %d\n", i);
14073 +                       goto error;
14074 +               }
14075 +               sched_group_nodes[i] = sg;
14076 +               for_each_cpu_mask(j, nodemask) {
14077 +                       struct sched_domain *sd;
14078 +                       sd = &per_cpu(node_domains, j);
14079 +                       sd->groups = sg;
14080 +               }
14081 +               sg->__cpu_power = 0;
14082 +               sg->cpumask = nodemask;
14083 +               sg->next = sg;
14084 +               cpus_or(covered, covered, nodemask);
14085 +               prev = sg;
14086 +
14087 +               for (j = 0; j < MAX_NUMNODES; j++) {
14088 +                       cpumask_t tmp, notcovered;
14089 +                       int n = (i + j) % MAX_NUMNODES;
14090 +
14091 +                       cpus_complement(notcovered, covered);
14092 +                       cpus_and(tmp, notcovered, *cpu_map);
14093 +                       cpus_and(tmp, tmp, domainspan);
14094 +                       if (cpus_empty(tmp))
14095 +                               break;
14096 +
14097 +                       nodemask = node_to_cpumask(n);
14098 +                       cpus_and(tmp, tmp, nodemask);
14099 +                       if (cpus_empty(tmp))
14100 +                               continue;
14101 +
14102 +                       sg = kmalloc_node(sizeof(struct sched_group),
14103 +                                         GFP_KERNEL, i);
14104 +                       if (!sg) {
14105 +                               printk(KERN_WARNING
14106 +                               "Can not alloc domain group for node %d\n", j);
14107 +                               goto error;
14108 +                       }
14109 +                       sg->__cpu_power = 0;
14110 +                       sg->cpumask = tmp;
14111 +                       sg->next = prev->next;
14112 +                       cpus_or(covered, covered, tmp);
14113 +                       prev->next = sg;
14114 +                       prev = sg;
14115 +               }
14116 +       }
14117 +#endif
14118 +
14119 +       /* Calculate CPU power for physical packages and nodes */
14120 +#ifdef CONFIG_SCHED_SMT
14121 +       for_each_cpu_mask(i, *cpu_map) {
14122 +               sd = &per_cpu(cpu_domains, i);
14123 +               init_sched_groups_power(i, sd);
14124 +       }
14125 +#endif
14126 +#ifdef CONFIG_SCHED_MC
14127 +       for_each_cpu_mask(i, *cpu_map) {
14128 +               sd = &per_cpu(core_domains, i);
14129 +               init_sched_groups_power(i, sd);
14130 +       }
14131 +#endif
14132 +
14133 +       for_each_cpu_mask(i, *cpu_map) {
14134 +               sd = &per_cpu(phys_domains, i);
14135 +               init_sched_groups_power(i, sd);
14136 +       }
14137 +
14138 +#ifdef CONFIG_NUMA
14139 +       for (i = 0; i < MAX_NUMNODES; i++)
14140 +               init_numa_sched_groups_power(sched_group_nodes[i]);
14141 +
14142 +       if (sd_allnodes) {
14143 +               struct sched_group *sg;
14144 +
14145 +               cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg);
14146 +               init_numa_sched_groups_power(sg);
14147 +       }
14148 +#endif
14149 +
14150 +       /* Attach the domains */
14151 +       for_each_cpu_mask(i, *cpu_map) {
14152 +               struct sched_domain *sd;
14153 +#ifdef CONFIG_SCHED_SMT
14154 +               sd = &per_cpu(cpu_domains, i);
14155 +#elif defined(CONFIG_SCHED_MC)
14156 +               sd = &per_cpu(core_domains, i);
14157 +#else
14158 +               sd = &per_cpu(phys_domains, i);
14159 +#endif
14160 +               cpu_attach_domain(sd, i);
14161 +       }
14162 +       /*
14163 +        * Tune cache-hot values:
14164 +        */
14165 +       calibrate_migration_costs(cpu_map);
14166 +
14167 +       return 0;
14168 +
14169 +#ifdef CONFIG_NUMA
14170 +error:
14171 +       free_sched_groups(cpu_map);
14172 +       return -ENOMEM;
14173 +#endif
14174 +}
14175 +/*
14176 + * Set up scheduler domains and groups.  Callers must hold the hotplug lock.
14177 + */
14178 +static int arch_init_sched_domains(const cpumask_t *cpu_map)
14179 +{
14180 +       cpumask_t cpu_default_map;
14181 +       int err;
14182 +
14183 +       /*
14184 +        * Setup mask for cpus without special case scheduling requirements.
14185 +        * For now this just excludes isolated cpus, but could be used to
14186 +        * exclude other special cases in the future.
14187 +        */
14188 +       cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
14189 +
14190 +       err = build_sched_domains(&cpu_default_map);
14191 +
14192 +       return err;
14193 +}
14194 +
14195 +static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
14196 +{
14197 +       free_sched_groups(cpu_map);
14198 +}
14199 +
14200 +/*
14201 + * Detach sched domains from a group of cpus specified in cpu_map
14202 + * These cpus will now be attached to the NULL domain
14203 + */
14204 +static void detach_destroy_domains(const cpumask_t *cpu_map)
14205 +{
14206 +       int i;
14207 +
14208 +       for_each_cpu_mask(i, *cpu_map)
14209 +               cpu_attach_domain(NULL, i);
14210 +       synchronize_sched();
14211 +       arch_destroy_sched_domains(cpu_map);
14212 +}
14213 +
14214 +/*
14215 + * Partition sched domains as specified by the cpumasks below.
14216 + * This attaches all cpus from the cpumasks to the NULL domain,
14217 + * waits for a RCU quiescent period, recalculates sched
14218 + * domain information and then attaches them back to the
14219 + * correct sched domains
14220 + * Call with hotplug lock held
14221 + */
14222 +int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
14223 +{
14224 +       cpumask_t change_map;
14225 +       int err = 0;
14226 +
14227 +       cpus_and(*partition1, *partition1, cpu_online_map);
14228 +       cpus_and(*partition2, *partition2, cpu_online_map);
14229 +       cpus_or(change_map, *partition1, *partition2);
14230 +
14231 +       /* Detach sched domains from all of the affected cpus */
14232 +       detach_destroy_domains(&change_map);
14233 +       if (!cpus_empty(*partition1))
14234 +               err = build_sched_domains(partition1);
14235 +       if (!err && !cpus_empty(*partition2))
14236 +               err = build_sched_domains(partition2);
14237 +
14238 +       return err;
14239 +}
14240 +
14241 +#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
14242 +int arch_reinit_sched_domains(void)
14243 +{
14244 +       int err;
14245 +
14246 +       mutex_lock(&sched_hotcpu_mutex);
14247 +       detach_destroy_domains(&cpu_online_map);
14248 +       err = arch_init_sched_domains(&cpu_online_map);
14249 +       mutex_unlock(&sched_hotcpu_mutex);
14250 +
14251 +       return err;
14252 +}
14253 +
14254 +static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
14255 +{
14256 +       int ret;
14257 +
14258 +       if (buf[0] != '0' && buf[0] != '1')
14259 +               return -EINVAL;
14260 +
14261 +       if (smt)
14262 +               sched_smt_power_savings = (buf[0] == '1');
14263 +       else
14264 +               sched_mc_power_savings = (buf[0] == '1');
14265 +
14266 +       ret = arch_reinit_sched_domains();
14267 +
14268 +       return ret ? ret : count;
14269 +}
14270 +
14271 +int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
14272 +{
14273 +       int err = 0;
14274 +
14275 +#ifdef CONFIG_SCHED_SMT
14276 +       if (smt_capable())
14277 +               err = sysfs_create_file(&cls->kset.kobj,
14278 +                                       &attr_sched_smt_power_savings.attr);
14279 +#endif
14280 +#ifdef CONFIG_SCHED_MC
14281 +       if (!err && mc_capable())
14282 +               err = sysfs_create_file(&cls->kset.kobj,
14283 +                                       &attr_sched_mc_power_savings.attr);
14284 +#endif
14285 +       return err;
14286 +}
14287 +#endif
14288 +
14289 +#ifdef CONFIG_SCHED_MC
14290 +static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page)
14291 +{
14292 +       return sprintf(page, "%u\n", sched_mc_power_savings);
14293 +}
14294 +static ssize_t sched_mc_power_savings_store(struct sys_device *dev,
14295 +                                           const char *buf, size_t count)
14296 +{
14297 +       return sched_power_savings_store(buf, count, 0);
14298 +}
14299 +SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show,
14300 +           sched_mc_power_savings_store);
14301 +#endif
14302 +
14303 +#ifdef CONFIG_SCHED_SMT
14304 +static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page)
14305 +{
14306 +       return sprintf(page, "%u\n", sched_smt_power_savings);
14307 +}
14308 +static ssize_t sched_smt_power_savings_store(struct sys_device *dev,
14309 +                                            const char *buf, size_t count)
14310 +{
14311 +       return sched_power_savings_store(buf, count, 1);
14312 +}
14313 +SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
14314 +           sched_smt_power_savings_store);
14315 +#endif
14316 +
14317 +/*
14318 + * Force a reinitialization of the sched domains hierarchy.  The domains
14319 + * and groups cannot be updated in place without racing with the balancing
14320 + * code, so we temporarily attach all running cpus to the NULL domain
14321 + * which will prevent rebalancing while the sched domains are recalculated.
14322 + */
14323 +static int update_sched_domains(struct notifier_block *nfb,
14324 +                               unsigned long action, void *hcpu)
14325 +{
14326 +       switch (action) {
14327 +       case CPU_UP_PREPARE:
14328 +       case CPU_UP_PREPARE_FROZEN:
14329 +       case CPU_DOWN_PREPARE:
14330 +       case CPU_DOWN_PREPARE_FROZEN:
14331 +               detach_destroy_domains(&cpu_online_map);
14332 +               return NOTIFY_OK;
14333 +
14334 +       case CPU_UP_CANCELED:
14335 +       case CPU_UP_CANCELED_FROZEN:
14336 +       case CPU_DOWN_FAILED:
14337 +       case CPU_DOWN_FAILED_FROZEN:
14338 +       case CPU_ONLINE:
14339 +       case CPU_ONLINE_FROZEN:
14340 +       case CPU_DEAD:
14341 +       case CPU_DEAD_FROZEN:
14342 +               /*
14343 +                * Fall through and re-initialise the domains.
14344 +                */
14345 +               break;
14346 +       default:
14347 +               return NOTIFY_DONE;
14348 +       }
14349 +
14350 +       /* The hotplug lock is already held by cpu_up/cpu_down */
14351 +       arch_init_sched_domains(&cpu_online_map);
14352 +
14353 +       return NOTIFY_OK;
14354 +}
14355 +
14356 +void __init sched_init_smp(void)
14357 +{
14358 +       cpumask_t non_isolated_cpus;
14359 +
14360 +       mutex_lock(&sched_hotcpu_mutex);
14361 +       arch_init_sched_domains(&cpu_online_map);
14362 +       cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
14363 +       if (cpus_empty(non_isolated_cpus))
14364 +               cpu_set(smp_processor_id(), non_isolated_cpus);
14365 +       mutex_unlock(&sched_hotcpu_mutex);
14366 +       /* XXX: Theoretical race here - CPU may be hotplugged now */
14367 +       hotcpu_notifier(update_sched_domains, 0);
14368 +
14369 +       /* Move init over to a non-isolated CPU */
14370 +       if (set_cpus_allowed(current, non_isolated_cpus) < 0)
14371 +               BUG();
14372 +}
14373 +#else
14374 +void __init sched_init_smp(void)
14375 +{
14376 +}
14377 +#endif /* CONFIG_SMP */
14378 +
14379 +int in_sched_functions(unsigned long addr)
14380 +{
14381 +       /* Linker adds these: start and end of __sched functions */
14382 +       extern char __sched_text_start[], __sched_text_end[];
14383 +
14384 +       return in_lock_functions(addr) ||
14385 +               (addr >= (unsigned long)__sched_text_start
14386 +               && addr < (unsigned long)__sched_text_end);
14387 +}
14388 +
14389 +void __init sched_init(void)
14390 +{
14391 +       int i, j, k;
14392 +       int highest_cpu = 0;
14393 +
14394 +       for_each_possible_cpu(i) {
14395 +               struct prio_array *array;
14396 +               struct rq *rq;
14397 +
14398 +               rq = cpu_rq(i);
14399 +               spin_lock_init(&rq->lock);
14400 +               lockdep_set_class(&rq->lock, &rq->rq_lock_key);
14401 +               rq->nr_running = 0;
14402 +               rq->active = rq->arrays;
14403 +               rq->expired = rq->arrays + 1;
14404 +               rq->best_expired_prio = MAX_PRIO;
14405 +
14406 +#ifdef CONFIG_SMP
14407 +               rq->sd = NULL;
14408 +               for (j = 1; j < 3; j++)
14409 +                       rq->cpu_load[j] = 0;
14410 +               rq->active_balance = 0;
14411 +               rq->push_cpu = 0;
14412 +               rq->cpu = i;
14413 +               rq->migration_thread = NULL;
14414 +               INIT_LIST_HEAD(&rq->migration_queue);
14415 +#endif
14416 +               atomic_set(&rq->nr_iowait, 0);
14417 +#ifdef CONFIG_VSERVER_HARDCPU
14418 +               INIT_LIST_HEAD(&rq->hold_queue);
14419 +               rq->nr_onhold = 0;
14420 +#endif
14421 +               for (j = 0; j < 2; j++) {
14422 +                       array = rq->arrays + j;
14423 +                       for (k = 0; k < MAX_PRIO; k++) {
14424 +                               INIT_LIST_HEAD(array->queue + k);
14425 +                               __clear_bit(k, array->bitmap);
14426 +                       }
14427 +                       // delimiter for bitsearch
14428 +                       __set_bit(MAX_PRIO, array->bitmap);
14429 +               }
14430 +               highest_cpu = i;
14431 +       }
14432 +
14433 +       set_load_weight(&init_task);
14434 +
14435 +#ifdef CONFIG_SMP
14436 +       nr_cpu_ids = highest_cpu + 1;
14437 +       open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL);
14438 +#endif
14439 +
14440 +#ifdef CONFIG_RT_MUTEXES
14441 +       plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
14442 +#endif
14443 +
14444 +       /*
14445 +        * The boot idle thread does lazy MMU switching as well:
14446 +        */
14447 +       atomic_inc(&init_mm.mm_count);
14448 +       enter_lazy_tlb(&init_mm, current);
14449 +
14450 +       /*
14451 +        * Make us the idle thread. Technically, schedule() should not be
14452 +        * called from this thread, however somewhere below it might be,
14453 +        * but because we are the idle thread, we just pick up running again
14454 +        * when this runqueue becomes "idle".
14455 +        */
14456 +       init_idle(current, smp_processor_id());
14457 +}
14458 +
14459 +#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
14460 +void __might_sleep(char *file, int line)
14461 +{
14462 +#ifdef in_atomic
14463 +       static unsigned long prev_jiffy;        /* ratelimiting */
14464 +
14465 +       if ((in_atomic() || irqs_disabled()) &&
14466 +           system_state == SYSTEM_RUNNING && !oops_in_progress) {
14467 +               if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
14468 +                       return;
14469 +               prev_jiffy = jiffies;
14470 +               printk(KERN_ERR "BUG: sleeping function called from invalid"
14471 +                               " context at %s:%d\n", file, line);
14472 +               printk("in_atomic():%d, irqs_disabled():%d\n",
14473 +                       in_atomic(), irqs_disabled());
14474 +               debug_show_held_locks(current);
14475 +               if (irqs_disabled())
14476 +                       print_irqtrace_events(current);
14477 +               dump_stack();
14478 +       }
14479 +#endif
14480 +}
14481 +EXPORT_SYMBOL(__might_sleep);
14482 +#endif
14483 +
14484 +#ifdef CONFIG_MAGIC_SYSRQ
14485 +void normalize_rt_tasks(void)
14486 +{
14487 +       struct prio_array *array;
14488 +       struct task_struct *g, *p;
14489 +       unsigned long flags;
14490 +       struct rq *rq;
14491 +
14492 +       read_lock_irq(&tasklist_lock);
14493 +
14494 +       do_each_thread(g, p) {
14495 +               if (!rt_task(p))
14496 +                       continue;
14497 +
14498 +               spin_lock_irqsave(&p->pi_lock, flags);
14499 +               rq = __task_rq_lock(p);
14500 +
14501 +               array = p->array;
14502 +               if (array)
14503 +                       deactivate_task(p, task_rq(p));
14504 +               __setscheduler(p, SCHED_NORMAL, 0);
14505 +               if (array) {
14506 +                       vx_activate_task(p);
14507 +                       __activate_task(p, task_rq(p));
14508 +                       resched_task(rq->curr);
14509 +               }
14510 +
14511 +               __task_rq_unlock(rq);
14512 +               spin_unlock_irqrestore(&p->pi_lock, flags);
14513 +       } while_each_thread(g, p);
14514 +
14515 +       read_unlock_irq(&tasklist_lock);
14516 +}
14517 +
14518 +#endif /* CONFIG_MAGIC_SYSRQ */
14519 +
14520 +#ifdef CONFIG_IA64
14521 +/*
14522 + * These functions are only useful for the IA64 MCA handling.
14523 + *
14524 + * They can only be called when the whole system has been
14525 + * stopped - every CPU needs to be quiescent, and no scheduling
14526 + * activity can take place. Using them for anything else would
14527 + * be a serious bug, and as a result, they aren't even visible
14528 + * under any other configuration.
14529 + */
14530 +
14531 +/**
14532 + * curr_task - return the current task for a given cpu.
14533 + * @cpu: the processor in question.
14534 + *
14535 + * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
14536 + */
14537 +struct task_struct *curr_task(int cpu)
14538 +{
14539 +       return cpu_curr(cpu);
14540 +}
14541 +
14542 +/**
14543 + * set_curr_task - set the current task for a given cpu.
14544 + * @cpu: the processor in question.
14545 + * @p: the task pointer to set.
14546 + *
14547 + * Description: This function must only be used when non-maskable interrupts
14548 + * are serviced on a separate stack.  It allows the architecture to switch the
14549 + * notion of the current task on a cpu in a non-blocking manner.  This function
14550 + * must be called with all CPU's synchronized, and interrupts disabled, the
14551 + * and caller must save the original value of the current task (see
14552 + * curr_task() above) and restore that value before reenabling interrupts and
14553 + * re-starting the system.
14554 + *
14555 + * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
14556 + */
14557 +void set_curr_task(int cpu, struct task_struct *p)
14558 +{
14559 +       cpu_curr(cpu) = p;
14560 +}
14561 +
14562 +#endif
14563 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/kernel/timer.S linux-2.6.22-590/kernel/timer.S
14564 --- linux-2.6.22-580/kernel/timer.S     1969-12-31 19:00:00.000000000 -0500
14565 +++ linux-2.6.22-590/kernel/timer.S     2009-02-18 09:57:23.000000000 -0500
14566 @@ -0,0 +1,32311 @@
14567 +       .file   "timer.c"
14568 +       .section        .debug_abbrev,"",@progbits
14569 +.Ldebug_abbrev0:
14570 +       .section        .debug_info,"",@progbits
14571 +.Ldebug_info0:
14572 +       .section        .debug_line,"",@progbits
14573 +.Ldebug_line0:
14574 +       .text
14575 +.Ltext0:
14576 +.globl __round_jiffies
14577 +       .type   __round_jiffies, @function
14578 +__round_jiffies:
14579 +.LFB883:
14580 +       .file 1 "kernel/timer.c"
14581 +       .loc 1 138 0
14582 +.LVL0:
14583 +       pushl   %edi
14584 +.LCFI0:
14585 +       pushl   %esi
14586 +.LCFI1:
14587 +       .loc 1 150 0
14588 +       leal    (%edx,%edx,2), %esi
14589 +       .loc 1 152 0
14590 +       movl    $250, %edx
14591 +.LVL1:
14592 +       .loc 1 150 0
14593 +       leal    (%eax,%esi), %ecx
14594 +.LVL2:
14595 +       .loc 1 152 0
14596 +       movl    %edx, %edi
14597 +       .loc 1 138 0
14598 +       pushl   %ebx
14599 +.LCFI2:
14600 +       .loc 1 152 0
14601 +       xorl    %edx, %edx
14602 +       .loc 1 138 0
14603 +       movl    %eax, %ebx
14604 +       .loc 1 152 0
14605 +       movl    %ecx, %eax
14606 +.LVL3:
14607 +       divl    %edi
14608 +       .loc 1 138 0
14609 +       subl    $4, %esp
14610 +.LCFI3:
14611 +       .loc 1 160 0
14612 +       cmpl    $61, %edx
14613 +       jg      .L2
14614 +       .loc 1 161 0
14615 +       movl    %ecx, %eax
14616 +.LVL4:
14617 +       subl    %edx, %eax
14618 +       jmp     .L4
14619 +.LVL5:
14620 +.L2:
14621 +       .loc 1 163 0
14622 +       subl    %edx, %ecx
14623 +       leal    250(%ecx), %eax
14624 +.LVL6:
14625 +.L4:
14626 +       .loc 1 166 0
14627 +       movl    %eax, %edx
14628 +.LVL7:
14629 +       .loc 1 168 0
14630 +       movl    jiffies, %eax
14631 +.LVL8:
14632 +       .loc 1 166 0
14633 +       subl    %esi, %edx
14634 +       .loc 1 168 0
14635 +       cmpl    %eax, %edx
14636 +       cmova   %edx, %ebx
14637 +       .loc 1 171 0
14638 +       popl    %edx
14639 +.LVL9:
14640 +       movl    %ebx, %eax
14641 +       popl    %ebx
14642 +.LVL10:
14643 +       popl    %esi
14644 +       popl    %edi
14645 +       ret
14646 +.LFE883:
14647 +       .size   __round_jiffies, .-__round_jiffies
14648 +.globl __round_jiffies_relative
14649 +       .type   __round_jiffies_relative, @function
14650 +__round_jiffies_relative:
14651 +.LFB884:
14652 +       .loc 1 195 0
14653 +.LVL11:
14654 +       .loc 1 202 0
14655 +       movl    jiffies, %ecx
14656 +       addl    %ecx, %eax
14657 +.LVL12:
14658 +       call    __round_jiffies
14659 +.LVL13:
14660 +       movl    jiffies, %edx
14661 +       subl    %edx, %eax
14662 +       .loc 1 203 0
14663 +       ret
14664 +.LFE884:
14665 +       .size   __round_jiffies_relative, .-__round_jiffies_relative
14666 +.globl round_jiffies
14667 +       .type   round_jiffies, @function
14668 +round_jiffies:
14669 +.LFB885:
14670 +       .loc 1 222 0
14671 +.LVL14:
14672 +.LBB179:
14673 +       .loc 1 223 0
14674 +#APP
14675 +       movl %fs:per_cpu__cpu_number,%edx
14676 +.LVL15:
14677 +#NO_APP
14678 +.LBE179:
14679 +       jmp     __round_jiffies
14680 +.LVL16:
14681 +.LFE885:
14682 +       .size   round_jiffies, .-round_jiffies
14683 +.globl round_jiffies_relative
14684 +       .type   round_jiffies_relative, @function
14685 +round_jiffies_relative:
14686 +.LFB886:
14687 +       .loc 1 243 0
14688 +.LVL17:
14689 +.LBB180:
14690 +       .loc 1 244 0
14691 +#APP
14692 +       movl %fs:per_cpu__cpu_number,%edx
14693 +.LVL18:
14694 +#NO_APP
14695 +.LBE180:
14696 +       jmp     __round_jiffies_relative
14697 +.LVL19:
14698 +.LFE886:
14699 +       .size   round_jiffies_relative, .-round_jiffies_relative
14700 +       .type   internal_add_timer, @function
14701 +internal_add_timer:
14702 +.LFB888:
14703 +       .loc 1 258 0
14704 +.LVL20:
14705 +       pushl   %esi
14706 +.LCFI4:
14707 +       pushl   %ebx
14708 +.LCFI5:
14709 +       .loc 1 258 0
14710 +       movl    %eax, %ebx
14711 +       .loc 1 259 0
14712 +       movl    8(%edx), %eax
14713 +.LVL21:
14714 +       .loc 1 260 0
14715 +       movl    8(%ebx), %esi
14716 +       movl    %eax, %ecx
14717 +.LVL22:
14718 +       subl    %esi, %ecx
14719 +       .loc 1 263 0
14720 +       cmpl    $255, %ecx
14721 +       jbe     .L28
14722 +       .loc 1 266 0
14723 +       cmpl    $16383, %ecx
14724 +       ja      .L19
14725 +.LBB181:
14726 +       .loc 1 268 0
14727 +       shrl    $5, %eax
14728 +       andl    $504, %eax
14729 +       leal    2060(%eax,%ebx), %ecx
14730 +.LVL23:
14731 +       jmp     .L18
14732 +.LVL24:
14733 +.L19:
14734 +.LBE181:
14735 +       .loc 1 269 0
14736 +       cmpl    $1048575, %ecx
14737 +       ja      .L21
14738 +.LBB182:
14739 +       .loc 1 271 0
14740 +       shrl    $11, %eax
14741 +       andl    $504, %eax
14742 +       leal    2572(%eax,%ebx), %ecx
14743 +.LVL25:
14744 +       jmp     .L18
14745 +.LVL26:
14746 +.L21:
14747 +.LBE182:
14748 +       .loc 1 272 0
14749 +       cmpl    $67108863, %ecx
14750 +       ja      .L23
14751 +.LBB183:
14752 +       .loc 1 274 0
14753 +       shrl    $17, %eax
14754 +       andl    $504, %eax
14755 +       leal    3084(%eax,%ebx), %ecx
14756 +.LVL27:
14757 +       jmp     .L18
14758 +.LVL28:
14759 +.L23:
14760 +.LBE183:
14761 +       .loc 1 275 0
14762 +       testl   %ecx, %ecx
14763 +       jns     .L25
14764 +       .loc 1 280 0
14765 +       movl    %esi, %eax
14766 +.LVL29:
14767 +.L28:
14768 +.LVL30:
14769 +       andl    $255, %eax
14770 +       leal    12(%ebx,%eax,8), %ecx
14771 +.LVL31:
14772 +       jmp     .L18
14773 +.LVL32:
14774 +.L25:
14775 +.LBB184:
14776 +       .loc 1 291 0
14777 +       shrl    $26, %eax
14778 +       leal    3596(%ebx,%eax,8), %ecx
14779 +.LVL33:
14780 +.L18:
14781 +.LBE184:
14782 +.LBB185:
14783 +.LBB186:
14784 +       .file 2 "include/linux/list.h"
14785 +       .loc 2 86 0
14786 +       movl    4(%ecx), %eax
14787 +.LVL34:
14788 +.LBB187:
14789 +.LBB188:
14790 +       .loc 2 48 0
14791 +       movl    %ecx, (%edx)
14792 +       .loc 2 47 0
14793 +       movl    %edx, 4(%ecx)
14794 +       .loc 2 50 0
14795 +       movl    %edx, (%eax)
14796 +.LBE188:
14797 +.LBE187:
14798 +.LBE186:
14799 +.LBE185:
14800 +       .loc 1 297 0
14801 +       popl    %ebx
14802 +.LVL35:
14803 +.LBB189:
14804 +.LBB190:
14805 +.LBB191:
14806 +.LBB192:
14807 +       .loc 2 49 0
14808 +       movl    %eax, 4(%edx)
14809 +.LBE192:
14810 +.LBE191:
14811 +.LBE190:
14812 +.LBE189:
14813 +       .loc 1 297 0
14814 +       popl    %esi
14815 +       ret
14816 +.LFE888:
14817 +       .size   internal_add_timer, .-internal_add_timer
14818 +.globl init_timer
14819 +       .type   init_timer, @function
14820 +init_timer:
14821 +.LFB889:
14822 +       .loc 1 319 0
14823 +.LVL36:
14824 +       .loc 1 320 0
14825 +       movl    $0, (%eax)
14826 +.LBB193:
14827 +       .loc 1 321 0
14828 +       movl    $per_cpu__tvec_bases, %edx
14829 +.LBB194:
14830 +#APP
14831 +       movl %fs:per_cpu__this_cpu_off,%ecx
14832 +.LVL37:
14833 +#NO_APP
14834 +.LBE194:
14835 +.LBE193:
14836 +       movl    (%edx,%ecx), %edx
14837 +       movl    %edx, 20(%eax)
14838 +       .loc 1 327 0
14839 +       ret
14840 +.LFE889:
14841 +       .size   init_timer, .-init_timer
14842 +.globl init_timer_deferrable
14843 +       .type   init_timer_deferrable, @function
14844 +init_timer_deferrable:
14845 +.LFB890:
14846 +       .loc 1 331 0
14847 +.LVL38:
14848 +       pushl   %ebx
14849 +.LCFI6:
14850 +       .loc 1 331 0
14851 +       movl    %eax, %ebx
14852 +       .loc 1 332 0
14853 +       call    init_timer
14854 +.LVL39:
14855 +.LBB197:
14856 +.LBB198:
14857 +       .loc 1 106 0
14858 +       orl     $1, 20(%ebx)
14859 +.LBE198:
14860 +.LBE197:
14861 +       .loc 1 334 0
14862 +       popl    %ebx
14863 +.LVL40:
14864 +       ret
14865 +.LFE890:
14866 +       .size   init_timer_deferrable, .-init_timer_deferrable
14867 +       .section        .rodata.str1.1,"aMS",@progbits,1
14868 +.LC0:
14869 +       .string "kernel/timer.c"
14870 +       .text
14871 +       .type   cascade, @function
14872 +cascade:
14873 +.LFB899:
14874 +       .loc 1 581 0
14875 +.LVL41:
14876 +       pushl   %edi
14877 +.LCFI7:
14878 +       movl    %eax, %edi
14879 +       pushl   %esi
14880 +.LCFI8:
14881 +       movl    %ecx, %esi
14882 +       pushl   %ebx
14883 +.LCFI9:
14884 +       subl    $8, %esp
14885 +.LCFI10:
14886 +       .loc 1 581 0
14887 +       leal    (%edx,%ecx,8), %eax
14888 +.LVL42:
14889 +.LBB199:
14890 +.LBB200:
14891 +.LBB201:
14892 +.LBB202:
14893 +       .loc 2 218 0
14894 +       movl    (%eax), %edx
14895 +.LVL43:
14896 +       .loc 2 219 0
14897 +       movl    %esp, 4(%edx)
14898 +       .loc 2 218 0
14899 +       movl    %edx, (%esp)
14900 +       .loc 2 220 0
14901 +       movl    4(%eax), %edx
14902 +       .loc 2 221 0
14903 +       movl    %esp, (%edx)
14904 +.LBE202:
14905 +.LBE201:
14906 +.LBB203:
14907 +.LBB204:
14908 +       .loc 2 32 0
14909 +       movl    %eax, (%eax)
14910 +.LBE204:
14911 +.LBE203:
14912 +.LBB205:
14913 +.LBB206:
14914 +       .loc 2 220 0
14915 +       movl    %edx, 4(%esp)
14916 +.LBE206:
14917 +.LBE205:
14918 +.LBE200:
14919 +.LBE199:
14920 +.LBB207:
14921 +       .loc 1 592 0
14922 +       movl    (%esp), %edx
14923 +.LVL44:
14924 +.LBE207:
14925 +.LBB208:
14926 +.LBB209:
14927 +.LBB210:
14928 +.LBB211:
14929 +       .loc 2 33 0
14930 +       movl    %eax, 4(%eax)
14931 +.LBE211:
14932 +.LBE210:
14933 +.LBE209:
14934 +.LBE208:
14935 +.LBB212:
14936 +       .loc 1 592 0
14937 +       movl    (%edx), %ebx
14938 +.LVL45:
14939 +       jmp     .L34
14940 +.L35:
14941 +.LBE212:
14942 +       .loc 1 593 0
14943 +       movl    20(%edx), %eax
14944 +.LVL46:
14945 +       andl    $-2, %eax
14946 +       cmpl    %edi, %eax
14947 +       je      .L36
14948 +#APP
14949 +       1:      ud2
14950 +.pushsection __bug_table,"a"
14951 +2:     .long 1b, .LC0
14952 +       .word 593, 0
14953 +       .org 2b+12
14954 +.popsection
14955 +#NO_APP
14956 +.L38:
14957 +       jmp     .L38
14958 +.L36:
14959 +       .loc 1 594 0
14960 +       movl    %edi, %eax
14961 +       call    internal_add_timer
14962 +.LVL47:
14963 +.LBB213:
14964 +       .loc 1 592 0
14965 +       movl    %ebx, %edx
14966 +.LVL48:
14967 +       movl    (%ebx), %ebx
14968 +.LVL49:
14969 +.L34:
14970 +.LBE213:
14971 +       cmpl    %esp, %edx
14972 +       jne     .L35
14973 +.LVL50:
14974 +       .loc 1 598 0
14975 +       popl    %ecx
14976 +.LVL51:
14977 +       movl    %esi, %eax
14978 +.LVL52:
14979 +       popl    %ebx
14980 +.LVL53:
14981 +       popl    %ebx
14982 +       popl    %esi
14983 +.LVL54:
14984 +       popl    %edi
14985 +.LVL55:
14986 +       ret
14987 +.LFE899:
14988 +       .size   cascade, .-cascade
14989 +       .section        .rodata.str1.1
14990 +.LC1:
14991 +       .string "WARNING: at %s:%d %s()\n"
14992 +       .section        .init.text,"ax",@progbits
14993 +       .type   timer_cpu_notify, @function
14994 +timer_cpu_notify:
14995 +.LFB923:
14996 +       .loc 1 1336 0
14997 +.LVL56:
14998 +       pushl   %ebp
14999 +.LCFI11:
15000 +       pushl   %edi
15001 +.LCFI12:
15002 +       pushl   %esi
15003 +.LCFI13:
15004 +       movl    %ecx, %esi
15005 +       pushl   %ebx
15006 +.LCFI14:
15007 +       subl    $16, %esp
15008 +.LCFI15:
15009 +       .loc 1 1338 0
15010 +       cmpl    $3, %edx
15011 +       je      .L43
15012 +.LVL57:
15013 +       cmpl    $19, %edx
15014 +       movl    $1, %eax
15015 +.LVL58:
15016 +       jne     .L44
15017 +.LVL59:
15018 +.L43:
15019 +.LBB240:
15020 +.LBB241:
15021 +       .loc 1 1238 0
15022 +       cmpb    $0, tvec_base_done.19028(%esi)
15023 +       jne     .L45
15024 +.LBB242:
15025 +       .loc 1 1241 0
15026 +       cmpb    $0, boot_done.19029
15027 +       je      .L47
15028 +.L48:
15029 +.LBB243:
15030 +.LBB244:
15031 +.LBB245:
15032 +       .file 3 "include/linux/slab_def.h"
15033 +       .loc 3 49 0
15034 +       movl    malloc_sizes+100, %eax
15035 +.LVL60:
15036 +       movl    $208, %edx
15037 +.LVL61:
15038 +       call    kmem_cache_alloc
15039 +       movl    %eax, %edi
15040 +.LBE245:
15041 +.LBE244:
15042 +.LBE243:
15043 +       .loc 1 1247 0
15044 +       movl    $32770, %eax
15045 +       testl   %edi, %edi
15046 +       je      .L44
15047 +.LVL62:
15048 +       .loc 1 1251 0
15049 +       movl    %edi, %eax
15050 +.LVL63:
15051 +       .loc 1 1245 0
15052 +       movl    %edi, %ebx
15053 +.LVL64:
15054 +       .loc 1 1251 0
15055 +       andl    $1, %eax
15056 +       je      .L51
15057 +.LBB246:
15058 +       .loc 1 1252 0
15059 +       movl    $__func__.19031, 12(%esp)
15060 +       movl    $1252, 8(%esp)
15061 +       movl    $.LC0, 4(%esp)
15062 +       movl    $.LC1, (%esp)
15063 +       call    printk
15064 +       call    dump_stack
15065 +.LBE246:
15066 +       .loc 1 1253 0
15067 +       movl    %edi, %eax
15068 +       call    kfree
15069 +       movl    $32770, %eax
15070 +       jmp     .L44
15071 +.LVL65:
15072 +.L51:
15073 +.LBB247:
15074 +.LBB248:
15075 +.LBB249:
15076 +       .file 4 "include/asm/string.h"
15077 +       .loc 4 447 0
15078 +       movl    $1056, %ecx
15079 +.LVL66:
15080 +#APP
15081 +       rep ; stosl
15082 +.LVL67:
15083 +#NO_APP
15084 +.LBE249:
15085 +.LBE248:
15086 +.LBE247:
15087 +.LBE242:
15088 +.LBE241:
15089 +       .loc 1 1257 0
15090 +       movl    __per_cpu_offset(,%esi,4), %edx
15091 +.LBB250:
15092 +.LBB251:
15093 +.LBB252:
15094 +       movl    $per_cpu__tvec_bases, %eax
15095 +.LBE252:
15096 +.LBE251:
15097 +.LBE250:
15098 +       movl    %ebx, (%eax,%edx)
15099 +       jmp     .L53
15100 +.LVL68:
15101 +.L47:
15102 +.LBB253:
15103 +.LBB254:
15104 +       .loc 1 1265 0
15105 +       movb    $1, boot_done.19029
15106 +       movl    $boot_tvec_bases, %ebx
15107 +.LVL69:
15108 +.L53:
15109 +       .loc 1 1268 0
15110 +       movb    $1, tvec_base_done.19028(%esi)
15111 +       jmp     .L54
15112 +.LVL70:
15113 +.L45:
15114 +.LBE254:
15115 +.LBE253:
15116 +       .loc 1 1270 0
15117 +       movl    __per_cpu_offset(,%esi,4), %edx
15118 +.LVL71:
15119 +.LBB255:
15120 +.LBB256:
15121 +       movl    $per_cpu__tvec_bases, %eax
15122 +.LVL72:
15123 +.LBE256:
15124 +.LBE255:
15125 +       movl    (%eax,%edx), %ebx
15126 +.LVL73:
15127 +.L54:
15128 +       .loc 1 1273 0
15129 +       movl    $1, (%ebx)
15130 +       xorl    %ebp, %ebp
15131 +.LVL74:
15132 +       leal    2048(%ebx), %edx
15133 +.LVL75:
15134 +       leal    2560(%ebx), %esi
15135 +.LVL76:
15136 +       leal    3072(%ebx), %edi
15137 +.LVL77:
15138 +       leal    3584(%ebx), %ecx
15139 +.LVL78:
15140 +.L55:
15141 +       leal    12(%ecx), %eax
15142 +.LVL79:
15143 +.LBB257:
15144 +       .loc 1 1276 0
15145 +       incl    %ebp
15146 +.LBB258:
15147 +.LBB259:
15148 +       .loc 2 32 0
15149 +       movl    %eax, 1548(%edx)
15150 +.LBE259:
15151 +.LBE258:
15152 +       .loc 1 1276 0
15153 +       addl    $8, %ecx
15154 +.LBB260:
15155 +.LBB261:
15156 +       .loc 2 33 0
15157 +       movl    %eax, 1552(%edx)
15158 +.LBE261:
15159 +.LBE260:
15160 +       .loc 1 1278 0
15161 +       leal    12(%edi), %eax
15162 +.LVL80:
15163 +       .loc 1 1276 0
15164 +       addl    $8, %edi
15165 +.LBB262:
15166 +.LBB263:
15167 +       .loc 2 32 0
15168 +       movl    %eax, 1036(%edx)
15169 +       .loc 2 33 0
15170 +       movl    %eax, 1040(%edx)
15171 +.LBE263:
15172 +.LBE262:
15173 +       .loc 1 1279 0
15174 +       leal    12(%esi), %eax
15175 +.LVL81:
15176 +       .loc 1 1276 0
15177 +       addl    $8, %esi
15178 +.LBB264:
15179 +.LBB265:
15180 +       .loc 2 32 0
15181 +       movl    %eax, 524(%edx)
15182 +       .loc 2 33 0
15183 +       movl    %eax, 528(%edx)
15184 +.LBE265:
15185 +.LBE264:
15186 +       .loc 1 1280 0
15187 +       leal    12(%edx), %eax
15188 +.LVL82:
15189 +.LBB266:
15190 +.LBB267:
15191 +       .loc 2 32 0
15192 +       movl    %eax, 12(%edx)
15193 +       .loc 2 33 0
15194 +       movl    %eax, 16(%edx)
15195 +.LBE267:
15196 +.LBE266:
15197 +       .loc 1 1276 0
15198 +       addl    $8, %edx
15199 +       cmpl    $64, %ebp
15200 +       jne     .L55
15201 +       xorl    %ecx, %ecx
15202 +.LVL83:
15203 +       movl    %ebx, %edx
15204 +.LVL84:
15205 +.L57:
15206 +       leal    12(%edx), %eax
15207 +.LVL85:
15208 +       .loc 1 1282 0
15209 +       incl    %ecx
15210 +.LBB268:
15211 +.LBB269:
15212 +       .loc 2 32 0
15213 +       movl    %eax, 12(%edx)
15214 +       .loc 2 33 0
15215 +       movl    %eax, 16(%edx)
15216 +.LBE269:
15217 +.LBE268:
15218 +       .loc 1 1282 0
15219 +       addl    $8, %edx
15220 +       cmpl    $256, %ecx
15221 +       jne     .L57
15222 +       .loc 1 1285 0
15223 +       movl    jiffies, %eax
15224 +.LVL86:
15225 +       movl    %eax, 8(%ebx)
15226 +       movl    $1, %eax
15227 +.LVL87:
15228 +.L44:
15229 +.LBE257:
15230 +.LBE240:
15231 +       .loc 1 1354 0
15232 +       addl    $16, %esp
15233 +       popl    %ebx
15234 +.LVL88:
15235 +       popl    %esi
15236 +.LVL89:
15237 +       popl    %edi
15238 +.LVL90:
15239 +       popl    %ebp
15240 +.LVL91:
15241 +       ret
15242 +.LFE923:
15243 +       .size   timer_cpu_notify, .-timer_cpu_notify
15244 +.globl init_timers
15245 +       .type   init_timers, @function
15246 +init_timers:
15247 +.LFB924:
15248 +       .loc 1 1362 0
15249 +       .loc 1 1364 0
15250 +       movl    $3, %edx
15251 +       movl    $timers_nb, %eax
15252 +.LBB273:
15253 +#APP
15254 +       movl %fs:per_cpu__cpu_number,%ecx
15255 +.LVL92:
15256 +#NO_APP
15257 +.LBE273:
15258 +       call    timer_cpu_notify
15259 +.LVL93:
15260 +.LVL94:
15261 +       .loc 1 1368 0
15262 +       cmpl    $32770, %eax
15263 +       jne     .L65
15264 +#APP
15265 +       1:      ud2
15266 +.pushsection __bug_table,"a"
15267 +2:     .long 1b, .LC0
15268 +       .word 1368, 0
15269 +       .org 2b+12
15270 +.popsection
15271 +#NO_APP
15272 +.L67:
15273 +       jmp     .L67
15274 +.L65:
15275 +       .loc 1 1369 0
15276 +       movl    $timers_nb, %eax
15277 +.LVL95:
15278 +       call    register_cpu_notifier
15279 +       .loc 1 1370 0
15280 +       xorl    %ecx, %ecx
15281 +       movl    $run_timer_softirq, %edx
15282 +       movl    $1, %eax
15283 +       jmp     open_softirq
15284 +.LFE924:
15285 +       .size   init_timers, .-init_timers
15286 +       .section        .rodata.str1.1
15287 +.LC2:
15288 +       .string "<4>huh, entered %p with preempt_count %08x, exited with %08x?\n"
15289 +       .text
15290 +       .type   run_timer_softirq, @function
15291 +run_timer_softirq:
15292 +.LFB904:
15293 +       .loc 1 872 0
15294 +.LVL96:
15295 +       pushl   %edi
15296 +.LCFI16:
15297 +.LBB322:
15298 +       .loc 1 873 0
15299 +       movl    $per_cpu__tvec_bases, %eax
15300 +.LVL97:
15301 +.LBE322:
15302 +       .loc 1 872 0
15303 +       pushl   %esi
15304 +.LCFI17:
15305 +       pushl   %ebx
15306 +.LCFI18:
15307 +       subl    $24, %esp
15308 +.LCFI19:
15309 +.LBB323:
15310 +       .loc 1 873 0
15311 +.LBB324:
15312 +#APP
15313 +       movl %fs:per_cpu__this_cpu_off,%edx
15314 +.LVL98:
15315 +#NO_APP
15316 +.LBE324:
15317 +.LBE323:
15318 +       movl    (%eax,%edx), %esi
15319 +.LVL99:
15320 +       .loc 1 875 0
15321 +       call    hrtimer_run_queues
15322 +.LVL100:
15323 +       .loc 1 877 0
15324 +       movl    jiffies, %eax
15325 +       cmpl    8(%esi), %eax
15326 +       js      .L85
15327 +.LBB325:
15328 +.LBB326:
15329 +       .loc 1 613 0
15330 +       movl    %esi, %eax
15331 +       call    _spin_lock_irq
15332 +       jmp     .L72
15333 +.LVL101:
15334 +.L73:
15335 +.LBB327:
15336 +       .loc 1 622 0
15337 +       movl    %ecx, %ebx
15338 +.LVL102:
15339 +       andl    $255, %ebx
15340 +       jne     .L74
15341 +.LBE327:
15342 +.LBE326:
15343 +       shrl    $8, %ecx
15344 +       movl    %esi, %eax
15345 +       andl    $63, %ecx
15346 +       leal    2060(%esi), %edx
15347 +.LVL103:
15348 +       call    cascade
15349 +.LBB328:
15350 +.LBB329:
15351 +       testl   %eax, %eax
15352 +       jne     .L74
15353 +.LBE329:
15354 +.LBE328:
15355 +       movl    8(%esi), %ecx
15356 +       leal    2572(%esi), %edx
15357 +       movl    %esi, %eax
15358 +       shrl    $14, %ecx
15359 +       andl    $63, %ecx
15360 +       call    cascade
15361 +.LBB330:
15362 +.LBB331:
15363 +       testl   %eax, %eax
15364 +       jne     .L74
15365 +.LBE331:
15366 +.LBE330:
15367 +       movl    8(%esi), %ecx
15368 +       leal    3084(%esi), %edx
15369 +       movl    %esi, %eax
15370 +       shrl    $20, %ecx
15371 +       andl    $63, %ecx
15372 +       call    cascade
15373 +.LBB332:
15374 +.LBB333:
15375 +       testl   %eax, %eax
15376 +       jne     .L74
15377 +       .loc 1 626 0
15378 +       movl    8(%esi), %ecx
15379 +       leal    3596(%esi), %edx
15380 +       movl    %esi, %eax
15381 +       shrl    $26, %ecx
15382 +       call    cascade
15383 +.LVL104:
15384 +.L74:
15385 +       .loc 1 627 0
15386 +       incl    8(%esi)
15387 +       leal    (%esi,%ebx,8), %ecx
15388 +.LBB334:
15389 +.LBB335:
15390 +.LBB336:
15391 +.LBB337:
15392 +       .loc 2 219 0
15393 +       leal    16(%esp), %ebx
15394 +.LVL105:
15395 +       .loc 2 218 0
15396 +       movl    12(%ecx), %eax
15397 +.LBE337:
15398 +.LBE336:
15399 +.LBE335:
15400 +.LBE334:
15401 +       .loc 1 627 0
15402 +       leal    12(%ecx), %edx
15403 +.LBB338:
15404 +.LBB339:
15405 +.LBB340:
15406 +.LBB341:
15407 +       .loc 2 219 0
15408 +       movl    %ebx, 4(%eax)
15409 +       .loc 2 218 0
15410 +       movl    %eax, 16(%esp)
15411 +       .loc 2 220 0
15412 +       movl    4(%edx), %eax
15413 +       movl    %eax, 20(%esp)
15414 +       .loc 2 221 0
15415 +       movl    %ebx, (%eax)
15416 +.LBE341:
15417 +.LBE340:
15418 +.LBB342:
15419 +.LBB343:
15420 +       .loc 2 33 0
15421 +       movl    %edx, 4(%edx)
15422 +       .loc 2 32 0
15423 +       movl    %edx, 12(%ecx)
15424 +       jmp     .L79
15425 +.L80:
15426 +.LBE343:
15427 +.LBE342:
15428 +.LBE339:
15429 +.LBE338:
15430 +.LBB344:
15431 +       .loc 1 634 0
15432 +       movl    12(%ebx), %edi
15433 +       .loc 1 635 0
15434 +       movl    16(%ebx), %eax
15435 +.LVL106:
15436 +.LBB345:
15437 +.LBB346:
15438 +       .loc 1 253 0
15439 +       movl    %ebx, 4(%esi)
15440 +.LBE346:
15441 +.LBE345:
15442 +.LBB347:
15443 +.LBB348:
15444 +       .loc 1 342 0
15445 +       movl    (%ebx), %ecx
15446 +.LVL107:
15447 +       movl    4(%ebx), %edx
15448 +.LVL108:
15449 +.LBB349:
15450 +.LBB350:
15451 +       .loc 2 157 0
15452 +       movl    %edx, 4(%ecx)
15453 +       .loc 2 158 0
15454 +       movl    %ecx, (%edx)
15455 +.LBE350:
15456 +.LBE349:
15457 +       .loc 1 345 0
15458 +       movl    $2097664, 4(%ebx)
15459 +       .loc 1 344 0
15460 +       movl    $0, (%ebx)
15461 +.LBE348:
15462 +.LBE347:
15463 +.LBB351:
15464 +.LBB352:
15465 +       .file 5 "include/asm/spinlock.h"
15466 +       .loc 5 108 0
15467 +#APP
15468 +       movb $1,(%esi)
15469 +#NO_APP
15470 +.LBE352:
15471 +.LBE351:
15472 +.LBB353:
15473 +.LBB354:
15474 +.LBB355:
15475 +.LBB356:
15476 +       .file 6 "include/asm/irqflags.h"
15477 +       .loc 6 36 0
15478 +#APP
15479 +       sti
15480 +#NO_APP
15481 +.LBE356:
15482 +.LBE355:
15483 +.LBE354:
15484 +.LBE353:
15485 +.LBB357:
15486 +       .loc 1 643 0
15487 +       movl    %esp, %edx
15488 +.LVL109:
15489 +       andl    $-8192, %edx
15490 +       movl    20(%edx), %ebx
15491 +.LVL110:
15492 +       .loc 1 644 0
15493 +       call    *%edi
15494 +.LVL111:
15495 +       .loc 1 645 0
15496 +       movl    %esp, %eax
15497 +       andl    $-8192, %eax
15498 +       movl    20(%eax), %eax
15499 +       cmpl    %eax, %ebx
15500 +       je      .L81
15501 +       .loc 1 646 0
15502 +       movl    %ebx, 8(%esp)
15503 +       movl    %edi, 4(%esp)
15504 +       movl    %eax, 12(%esp)
15505 +       movl    $.LC2, (%esp)
15506 +       call    printk
15507 +       .loc 1 651 0
15508 +#APP
15509 +       1:      ud2
15510 +.pushsection __bug_table,"a"
15511 +2:     .long 1b, .LC0
15512 +       .word 651, 0
15513 +       .org 2b+12
15514 +.popsection
15515 +#NO_APP
15516 +.L83:
15517 +       jmp     .L83
15518 +.L81:
15519 +.LBE357:
15520 +       .loc 1 654 0
15521 +       movl    %esi, %eax
15522 +       call    _spin_lock_irq
15523 +.LVL112:
15524 +.L79:
15525 +.LBE344:
15526 +.LBB358:
15527 +.LBB359:
15528 +       .loc 2 300 0
15529 +       movl    16(%esp), %ebx
15530 +.LVL113:
15531 +.LBE359:
15532 +.LBE358:
15533 +       .loc 1 629 0
15534 +       leal    16(%esp), %eax
15535 +       cmpl    %eax, %ebx
15536 +       jne     .L80
15537 +.L72:
15538 +.LBE333:
15539 +       .loc 1 614 0
15540 +       movl    jiffies, %eax
15541 +       movl    8(%esi), %ecx
15542 +       cmpl    %ecx, %eax
15543 +       jns     .L73
15544 +.LBB360:
15545 +.LBB361:
15546 +       .loc 1 253 0
15547 +       movl    $0, 4(%esi)
15548 +.LBE361:
15549 +.LBE360:
15550 +.LBB362:
15551 +.LBB363:
15552 +       .loc 5 108 0
15553 +#APP
15554 +       movb $1,(%esi)
15555 +#NO_APP
15556 +.LBE363:
15557 +.LBE362:
15558 +.LBB364:
15559 +.LBB365:
15560 +.LBB366:
15561 +.LBB367:
15562 +       .loc 6 36 0
15563 +#APP
15564 +       sti
15565 +#NO_APP
15566 +.L85:
15567 +.LBE367:
15568 +.LBE366:
15569 +.LBE365:
15570 +.LBE364:
15571 +.LBE332:
15572 +.LBE325:
15573 +       .loc 1 879 0
15574 +       addl    $24, %esp
15575 +       popl    %ebx
15576 +       popl    %esi
15577 +.LVL114:
15578 +       popl    %edi
15579 +.LVL115:
15580 +       ret
15581 +.LFE904:
15582 +       .size   run_timer_softirq, .-run_timer_softirq
15583 +.globl do_sysinfo
15584 +       .type   do_sysinfo, @function
15585 +do_sysinfo:
15586 +.LFB920:
15587 +       .loc 1 1132 0
15588 +.LVL116:
15589 +       pushl   %edi
15590 +.LCFI20:
15591 +.LBB368:
15592 +.LBB369:
15593 +.LBB370:
15594 +       .loc 4 447 0
15595 +       movl    $16, %ecx
15596 +.LBE370:
15597 +.LBE369:
15598 +.LBE368:
15599 +       .loc 1 1132 0
15600 +       pushl   %ebx
15601 +.LCFI21:
15602 +       movl    %eax, %ebx
15603 +       subl    $8, %esp
15604 +.LCFI22:
15605 +.LBB371:
15606 +.LBB372:
15607 +.LBB373:
15608 +       .loc 4 447 0
15609 +       xorl    %eax, %eax
15610 +.LVL117:
15611 +       movl    %ebx, %edi
15612 +.LVL118:
15613 +#APP
15614 +       rep ; stosl
15615 +.LVL119:
15616 +.LVL120:
15617 +#NO_APP
15618 +.L87:
15619 +.LBE373:
15620 +.LBE372:
15621 +.LBE371:
15622 +.LBB374:
15623 +.LBB375:
15624 +.LBB376:
15625 +       .file 7 "include/linux/seqlock.h"
15626 +       .loc 7 88 0
15627 +       movl    xtime_lock, %edi
15628 +.LVL121:
15629 +       .loc 7 89 0
15630 +#APP
15631 +       661:
15632 +       lock; addl $0,0(%esp)
15633 +662:
15634 +.section .altinstructions,"a"
15635 +  .align 4
15636 +  .long 661b
15637 +  .long 663f
15638 +  .byte 26
15639 +  .byte 662b-661b
15640 +  .byte 664f-663f
15641 +.previous
15642 +.section .altinstr_replacement,"ax"
15643 +663:
15644 +       lfence
15645 +664:
15646 +.previous
15647 +#NO_APP
15648 +.LBE376:
15649 +.LBE375:
15650 +       .loc 1 1150 0
15651 +       movl    %esp, %eax
15652 +       call    getnstimeofday
15653 +.LVL122:
15654 +       .loc 1 1152 0
15655 +       movl    wall_to_monotonic+4, %eax
15656 +       .loc 1 1151 0
15657 +       movl    wall_to_monotonic, %edx
15658 +       .loc 1 1152 0
15659 +       addl    4(%esp), %eax
15660 +       .loc 1 1151 0
15661 +       addl    (%esp), %edx
15662 +       .loc 1 1153 0
15663 +       cmpl    $999999999, %eax
15664 +       .loc 1 1151 0
15665 +       movl    %edx, (%esp)
15666 +       .loc 1 1152 0
15667 +       movl    %eax, 4(%esp)
15668 +       .loc 1 1153 0
15669 +       jle     .L88
15670 +       .loc 1 1154 0
15671 +       subl    $1000000000, %eax
15672 +       movl    %eax, 4(%esp)
15673 +       .loc 1 1155 0
15674 +       leal    1(%edx), %eax
15675 +       movl    %eax, (%esp)
15676 +.L88:
15677 +       .loc 1 1157 0
15678 +       xorl    %eax, %eax
15679 +       cmpl    $0, 4(%esp)
15680 +       setne   %al
15681 +       addl    (%esp), %eax
15682 +       movl    %eax, (%ebx)
15683 +       .loc 1 1159 0
15684 +       movl    avenrun, %eax
15685 +       sall    $5, %eax
15686 +       movl    %eax, 4(%ebx)
15687 +       .loc 1 1160 0
15688 +       movl    avenrun+4, %eax
15689 +       sall    $5, %eax
15690 +       movl    %eax, 8(%ebx)
15691 +       .loc 1 1161 0
15692 +       movl    avenrun+8, %eax
15693 +       sall    $5, %eax
15694 +       movl    %eax, 12(%ebx)
15695 +       .loc 1 1163 0
15696 +       movl    nr_threads, %eax
15697 +       movw    %ax, 40(%ebx)
15698 +.LBE374:
15699 +.LBB377:
15700 +.LBB378:
15701 +       .loc 7 103 0
15702 +#APP
15703 +       661:
15704 +       lock; addl $0,0(%esp)
15705 +662:
15706 +.section .altinstructions,"a"
15707 +  .align 4
15708 +  .long 661b
15709 +  .long 663f
15710 +  .byte 26
15711 +  .byte 662b-661b
15712 +  .byte 664f-663f
15713 +.previous
15714 +.section .altinstr_replacement,"ax"
15715 +663:
15716 +       lfence
15717 +664:
15718 +.previous
15719 +#NO_APP
15720 +.LBE378:
15721 +.LBE377:
15722 +       .loc 1 1164 0
15723 +       movl    %edi, %eax
15724 +       xorl    xtime_lock, %edi
15725 +       andl    $1, %eax
15726 +       orl     %edi, %eax
15727 +       jne     .L87
15728 +       .loc 1 1166 0
15729 +       movl    %ebx, %eax
15730 +       call    si_meminfo
15731 +       .loc 1 1167 0
15732 +       movl    %ebx, %eax
15733 +       call    si_swapinfo
15734 +       .loc 1 1178 0
15735 +       movl    16(%ebx), %eax
15736 +       movl    32(%ebx), %ecx
15737 +       leal    (%eax,%ecx), %edx
15738 +.LVL123:
15739 +       .loc 1 1179 0
15740 +       cmpl    %eax, %edx
15741 +       jb      .L91
15742 +       cmpl    %ecx, %edx
15743 +       jb      .L91
15744 +       .loc 1 1182 0
15745 +       movl    52(%ebx), %eax
15746 +.LVL124:
15747 +       xorl    %ecx, %ecx
15748 +.LVL125:
15749 +       jmp     .L94
15750 +.LVL126:
15751 +.L95:
15752 +       .loc 1 1187 0
15753 +       leal    (%edx,%edx), %edi
15754 +.LVL127:
15755 +       .loc 1 1188 0
15756 +       cmpl    %edx, %edi
15757 +       jb      .L91
15758 +       .loc 1 1184 0
15759 +       incl    %ecx
15760 +       .loc 1 1185 0
15761 +       movl    %edi, %edx
15762 +       shrl    %eax
15763 +.LVL128:
15764 +.L94:
15765 +       .loc 1 1183 0
15766 +       cmpl    $1, %eax
15767 +       ja      .L95
15768 +       .loc 1 1200 0
15769 +       sall    %cl, 16(%ebx)
15770 +       .loc 1 1201 0
15771 +       sall    %cl, 20(%ebx)
15772 +       .loc 1 1202 0
15773 +       sall    %cl, 24(%ebx)
15774 +       .loc 1 1203 0
15775 +       sall    %cl, 28(%ebx)
15776 +       .loc 1 1204 0
15777 +       sall    %cl, 32(%ebx)
15778 +       .loc 1 1205 0
15779 +       sall    %cl, 36(%ebx)
15780 +       .loc 1 1206 0
15781 +       sall    %cl, 44(%ebx)
15782 +       .loc 1 1207 0
15783 +       sall    %cl, 48(%ebx)
15784 +       .loc 1 1199 0
15785 +       movl    $1, 52(%ebx)
15786 +.L91:
15787 +       .loc 1 1211 0
15788 +       popl    %edi
15789 +.LVL129:
15790 +       xorl    %eax, %eax
15791 +.LVL130:
15792 +       popl    %edx
15793 +.LVL131:
15794 +       popl    %ebx
15795 +.LVL132:
15796 +       popl    %edi
15797 +       ret
15798 +.LFE920:
15799 +       .size   do_sysinfo, .-do_sysinfo
15800 +.globl sys_sysinfo
15801 +       .type   sys_sysinfo, @function
15802 +sys_sysinfo:
15803 +.LFB921:
15804 +       .loc 1 1214 0
15805 +.LVL133:
15806 +       pushl   %ebx
15807 +.LCFI23:
15808 +       subl    $64, %esp
15809 +.LCFI24:
15810 +       .loc 1 1217 0
15811 +       movl    %esp, %eax
15812 +       call    do_sysinfo
15813 +       .loc 1 1219 0
15814 +       movl    $64, %ecx
15815 +       movl    72(%esp), %eax
15816 +       movl    %esp, %edx
15817 +       call    copy_to_user
15818 +       cmpl    $1, %eax
15819 +       sbbl    %eax, %eax
15820 +       .loc 1 1223 0
15821 +       addl    $64, %esp
15822 +       popl    %ebx
15823 +       .loc 1 1219 0
15824 +       notl    %eax
15825 +       andl    $-14, %eax
15826 +       .loc 1 1223 0
15827 +       ret
15828 +.LFE921:
15829 +       .size   sys_sysinfo, .-sys_sysinfo
15830 +       .type   process_timeout, @function
15831 +process_timeout:
15832 +.LFB915:
15833 +       .loc 1 1025 0
15834 +.LVL134:
15835 +       .loc 1 1026 0
15836 +       jmp     wake_up_process
15837 +.LVL135:
15838 +.LFE915:
15839 +       .size   process_timeout, .-process_timeout
15840 +.globl sys_alarm
15841 +       .type   sys_alarm, @function
15842 +sys_alarm:
15843 +.LFB908:
15844 +       .loc 1 919 0
15845 +.LVL136:
15846 +       .loc 1 919 0
15847 +       movl    4(%esp), %eax
15848 +       .loc 1 920 0
15849 +       jmp     alarm_setitimer
15850 +.LFE908:
15851 +       .size   sys_alarm, .-sys_alarm
15852 +.globl do_timer
15853 +       .type   do_timer, @function
15854 +do_timer:
15855 +.LFB907:
15856 +       .loc 1 907 0
15857 +.LVL137:
15858 +       pushl   %ebp
15859 +.LCFI25:
15860 +       .loc 1 908 0
15861 +       xorl    %edx, %edx
15862 +.LVL138:
15863 +       .loc 1 907 0
15864 +       pushl   %edi
15865 +.LCFI26:
15866 +       pushl   %esi
15867 +.LCFI27:
15868 +       pushl   %ebx
15869 +.LCFI28:
15870 +       movl    %eax, %ebx
15871 +       subl    $4, %esp
15872 +.LCFI29:
15873 +       .loc 1 908 0
15874 +       addl    %eax, jiffies_64
15875 +       adcl    %edx, jiffies_64+4
15876 +.LBB385:
15877 +.LBB386:
15878 +       .loc 1 896 0
15879 +       call    update_wall_time
15880 +.LVL139:
15881 +.LBB387:
15882 +.LBB388:
15883 +       .loc 1 856 0
15884 +       movl    count.18791, %eax
15885 +       subl    %ebx, %eax
15886 +       .loc 1 857 0
15887 +       testl   %eax, %eax
15888 +       .loc 1 856 0
15889 +       movl    %eax, count.18791
15890 +       .loc 1 857 0
15891 +       jns     .L115
15892 +.LBB389:
15893 +       .loc 1 832 0
15894 +       call    nr_active
15895 +.LBE389:
15896 +       .loc 1 862 0
15897 +       movl    count.18791, %esi
15898 +       movl    avenrun, %ebx
15899 +.LVL140:
15900 +       movl    avenrun+4, %ecx
15901 +.LVL141:
15902 +.LBB390:
15903 +.LBB391:
15904 +       .loc 1 832 0
15905 +       sall    $11, %eax
15906 +.LBE391:
15907 +.LBE390:
15908 +       .loc 1 860 0
15909 +       imull   $164, %eax, %edx
15910 +       .loc 1 861 0
15911 +       imull   $34, %eax, %ebp
15912 +       .loc 1 862 0
15913 +       imull   $11, %eax, %edi
15914 +       .loc 1 860 0
15915 +       movl    %edx, (%esp)
15916 +       movl    avenrun+8, %edx
15917 +.L113:
15918 +       .loc 1 861 0
15919 +       imull   $2014, %ecx, %eax
15920 +.LVL142:
15921 +       .loc 1 860 0
15922 +       imull   $1884, %ebx, %ebx
15923 +       .loc 1 861 0
15924 +       leal    (%eax,%ebp), %ecx
15925 +       .loc 1 862 0
15926 +       imull   $2037, %edx, %eax
15927 +       .loc 1 860 0
15928 +       addl    (%esp), %ebx
15929 +       .loc 1 861 0
15930 +       shrl    $11, %ecx
15931 +       .loc 1 862 0
15932 +       leal    (%eax,%edi), %edx
15933 +       .loc 1 860 0
15934 +       shrl    $11, %ebx
15935 +       .loc 1 862 0
15936 +       shrl    $11, %edx
15937 +       .loc 1 864 0
15938 +       addl    $1250, %esi
15939 +       js      .L113
15940 +       movl    %edx, avenrun+8
15941 +       movl    %ecx, avenrun+4
15942 +       movl    %ebx, avenrun
15943 +       movl    %esi, count.18791
15944 +.LVL143:
15945 +.L115:
15946 +.LBE388:
15947 +.LBE387:
15948 +.LBE386:
15949 +.LBE385:
15950 +       .loc 1 910 0
15951 +       popl    %ecx
15952 +       popl    %ebx
15953 +.LVL144:
15954 +       popl    %esi
15955 +       popl    %edi
15956 +       popl    %ebp
15957 +       ret
15958 +.LFE907:
15959 +       .size   do_timer, .-do_timer
15960 +.globl run_local_timers
15961 +       .type   run_local_timers, @function
15962 +run_local_timers:
15963 +.LFB905:
15964 +       .loc 1 885 0
15965 +       .loc 1 886 0
15966 +       movl    $1, %eax
15967 +       call    raise_softirq
15968 +       .loc 1 887 0
15969 +       jmp     softlockup_tick
15970 +.LFE905:
15971 +       .size   run_local_timers, .-run_local_timers
15972 +       .type   lock_timer_base, @function
15973 +lock_timer_base:
15974 +.LFB892:
15975 +       .loc 1 363 0
15976 +.LVL145:
15977 +       pushl   %ebp
15978 +.LCFI30:
15979 +       movl    %edx, %ebp
15980 +       pushl   %edi
15981 +.LCFI31:
15982 +       movl    %eax, %edi
15983 +       pushl   %esi
15984 +.LCFI32:
15985 +       pushl   %ebx
15986 +.LCFI33:
15987 +.LVL146:
15988 +.L123:
15989 +.LBB392:
15990 +       .loc 1 367 0
15991 +       movl    20(%edi), %ebx
15992 +       .loc 1 369 0
15993 +       movl    %ebx, %esi
15994 +       andl    $-2, %esi
15995 +       je      .L124
15996 +       .loc 1 370 0
15997 +       movl    %esi, %eax
15998 +       call    _spin_lock_irqsave
15999 +       movl    %eax, (%ebp)
16000 +       .loc 1 371 0
16001 +       cmpl    20(%edi), %ebx
16002 +       je      .L129
16003 +       .loc 1 374 0
16004 +       movl    %eax, %edx
16005 +       movl    %esi, %eax
16006 +       call    _spin_unlock_irqrestore
16007 +.LVL147:
16008 +.L124:
16009 +.LBB393:
16010 +.LBB394:
16011 +       .file 8 "include/asm/processor.h"
16012 +       .loc 8 497 0
16013 +#APP
16014 +       rep;nop
16015 +#NO_APP
16016 +       jmp     .L123
16017 +.LVL148:
16018 +.L129:
16019 +.LBE394:
16020 +.LBE393:
16021 +.LBE392:
16022 +       .loc 1 378 0
16023 +       popl    %ebx
16024 +.LVL149:
16025 +       movl    %esi, %eax
16026 +       popl    %esi
16027 +.LVL150:
16028 +       popl    %edi
16029 +.LVL151:
16030 +       popl    %ebp
16031 +.LVL152:
16032 +       ret
16033 +.LFE892:
16034 +       .size   lock_timer_base, .-lock_timer_base
16035 +.globl try_to_del_timer_sync
16036 +       .type   try_to_del_timer_sync, @function
16037 +try_to_del_timer_sync:
16038 +.LFB897:
16039 +       .loc 1 527 0
16040 +.LVL153:
16041 +       pushl   %esi
16042 +.LCFI34:
16043 +       .loc 1 534 0
16044 +       orl     $-1, %esi
16045 +.LVL154:
16046 +       .loc 1 527 0
16047 +       pushl   %ebx
16048 +.LCFI35:
16049 +       movl    %eax, %ebx
16050 +       subl    $4, %esp
16051 +.LCFI36:
16052 +       .loc 1 532 0
16053 +       movl    %esp, %edx
16054 +       call    lock_timer_base
16055 +.LVL155:
16056 +.LVL156:
16057 +       .loc 1 534 0
16058 +       cmpl    %ebx, 4(%eax)
16059 +.LVL157:
16060 +       .loc 1 532 0
16061 +       movl    %eax, %ecx
16062 +.LVL158:
16063 +       .loc 1 534 0
16064 +       je      .L133
16065 +       .loc 1 538 0
16066 +       xorl    %esi, %esi
16067 +       cmpl    $0, (%ebx)
16068 +       je      .L133
16069 +.LBB395:
16070 +.LBB396:
16071 +       .loc 1 342 0
16072 +       movl    (%ebx), %edx
16073 +.LVL159:
16074 +       .loc 1 345 0
16075 +       movw    $1, %si
16076 +.LVL160:
16077 +       .loc 1 342 0
16078 +       movl    4(%ebx), %eax
16079 +.LVL161:
16080 +.LBB397:
16081 +.LBB398:
16082 +       .loc 2 157 0
16083 +       movl    %eax, 4(%edx)
16084 +       .loc 2 158 0
16085 +       movl    %edx, (%eax)
16086 +.LBE398:
16087 +.LBE397:
16088 +       .loc 1 345 0
16089 +       movl    $2097664, 4(%ebx)
16090 +       .loc 1 344 0
16091 +       movl    $0, (%ebx)
16092 +.LVL162:
16093 +.L133:
16094 +.LVL163:
16095 +.LBE396:
16096 +.LBE395:
16097 +       .loc 1 543 0
16098 +       movl    (%esp), %edx
16099 +.LVL164:
16100 +       movl    %ecx, %eax
16101 +.LVL165:
16102 +       call    _spin_unlock_irqrestore
16103 +.LVL166:
16104 +       .loc 1 546 0
16105 +       movl    %esi, %eax
16106 +       popl    %ebx
16107 +.LVL167:
16108 +       popl    %ebx
16109 +       popl    %esi
16110 +.LVL168:
16111 +       ret
16112 +.LFE897:
16113 +       .size   try_to_del_timer_sync, .-try_to_del_timer_sync
16114 +.globl del_timer_sync
16115 +       .type   del_timer_sync, @function
16116 +del_timer_sync:
16117 +.LFB898:
16118 +       .loc 1 568 0
16119 +.LVL169:
16120 +       pushl   %ebx
16121 +.LCFI37:
16122 +       movl    %eax, %ebx
16123 +.LVL170:
16124 +.L139:
16125 +.LBB399:
16126 +       .loc 1 570 0
16127 +       movl    %ebx, %eax
16128 +.LVL171:
16129 +       call    try_to_del_timer_sync
16130 +.LVL172:
16131 +       .loc 1 571 0
16132 +       testl   %eax, %eax
16133 +       jns     .L143
16134 +.LBB400:
16135 +.LBB401:
16136 +       .loc 8 497 0
16137 +#APP
16138 +       rep;nop
16139 +#NO_APP
16140 +       jmp     .L139
16141 +.L143:
16142 +.LBE401:
16143 +.LBE400:
16144 +.LBE399:
16145 +       .loc 1 575 0
16146 +       popl    %ebx
16147 +.LVL173:
16148 +       ret
16149 +.LFE898:
16150 +       .size   del_timer_sync, .-del_timer_sync
16151 +.globl __mod_timer
16152 +       .type   __mod_timer, @function
16153 +__mod_timer:
16154 +.LFB893:
16155 +       .loc 1 381 0
16156 +.LVL174:
16157 +       pushl   %ebp
16158 +.LCFI38:
16159 +       movl    %edx, %ebp
16160 +       pushl   %edi
16161 +.LCFI39:
16162 +       pushl   %esi
16163 +.LCFI40:
16164 +       pushl   %ebx
16165 +.LCFI41:
16166 +       movl    %eax, %ebx
16167 +       subl    $8, %esp
16168 +.LCFI42:
16169 +       .loc 1 387 0
16170 +       cmpl    $0, 12(%eax)
16171 +       jne     .L145
16172 +.LVL175:
16173 +#APP
16174 +       1:      ud2
16175 +.pushsection __bug_table,"a"
16176 +2:     .long 1b, .LC0
16177 +       .word 387, 0
16178 +       .org 2b+12
16179 +.popsection
16180 +#NO_APP
16181 +.L147:
16182 +       jmp     .L147
16183 +.L145:
16184 +       .loc 1 389 0
16185 +       leal    4(%esp), %edx
16186 +       call    lock_timer_base
16187 +.LVL176:
16188 +       .loc 1 391 0
16189 +       cmpl    $0, (%ebx)
16190 +       .loc 1 389 0
16191 +       movl    %eax, %esi
16192 +.LVL177:
16193 +       .loc 1 391 0
16194 +       movl    $0, (%esp)
16195 +.LVL178:
16196 +       je      .L150
16197 +.LBB402:
16198 +.LBB403:
16199 +       .loc 1 342 0
16200 +       movl    4(%ebx), %eax
16201 +.LVL179:
16202 +       movl    (%ebx), %edx
16203 +.LVL180:
16204 +.LBB404:
16205 +.LBB405:
16206 +       .loc 2 157 0
16207 +       movl    %eax, 4(%edx)
16208 +       .loc 2 158 0
16209 +       movl    %edx, (%eax)
16210 +.LBE405:
16211 +.LBE404:
16212 +       .loc 1 345 0
16213 +       movl    $2097664, 4(%ebx)
16214 +       movl    $1, (%esp)
16215 +.L150:
16216 +.LBE403:
16217 +.LBE402:
16218 +.LBB406:
16219 +.LBB407:
16220 +       .loc 1 396 0
16221 +#APP
16222 +       movl %fs:per_cpu__this_cpu_off,%edx
16223 +.LVL181:
16224 +#NO_APP
16225 +.LBE407:
16226 +       movl    $per_cpu__tvec_bases, %eax
16227 +.LVL182:
16228 +.LBE406:
16229 +       movl    (%eax,%edx), %edi
16230 +.LVL183:
16231 +       .loc 1 398 0
16232 +       cmpl    %edi, %esi
16233 +       je      .L151
16234 +       .loc 1 406 0
16235 +       cmpl    %ebx, 4(%esi)
16236 +       je      .L151
16237 +.LBB408:
16238 +.LBB409:
16239 +       .loc 1 113 0
16240 +       andl    $1, 20(%ebx)
16241 +.LBE409:
16242 +.LBE408:
16243 +.LBB410:
16244 +.LBB411:
16245 +       .loc 5 108 0
16246 +#APP
16247 +       movb $1,(%esi)
16248 +#NO_APP
16249 +.LBE411:
16250 +.LBE410:
16251 +       .loc 1 411 0
16252 +       movl    %edi, %eax
16253 +.LBB412:
16254 +.LBB413:
16255 +       .loc 1 113 0
16256 +       movl    %edi, %esi
16257 +.LBE413:
16258 +.LBE412:
16259 +       .loc 1 411 0
16260 +       call    _spin_lock
16261 +.LVL184:
16262 +.LBB414:
16263 +.LBB415:
16264 +       .loc 1 113 0
16265 +       movl    20(%ebx), %eax
16266 +       andl    $1, %eax
16267 +       orl     %edi, %eax
16268 +       movl    %eax, 20(%ebx)
16269 +.LVL185:
16270 +.L151:
16271 +.LBE415:
16272 +.LBE414:
16273 +       .loc 1 416 0
16274 +       movl    %ebp, 8(%ebx)
16275 +       .loc 1 417 0
16276 +       movl    %ebx, %edx
16277 +.LVL186:
16278 +       movl    %esi, %eax
16279 +       call    internal_add_timer
16280 +       .loc 1 418 0
16281 +       movl    %esi, %eax
16282 +.LVL187:
16283 +       movl    4(%esp), %edx
16284 +       call    _spin_unlock_irqrestore
16285 +       .loc 1 421 0
16286 +       movl    (%esp), %eax
16287 +       popl    %esi
16288 +.LVL188:
16289 +       popl    %edi
16290 +.LVL189:
16291 +       popl    %ebx
16292 +.LVL190:
16293 +       popl    %esi
16294 +       popl    %edi
16295 +       popl    %ebp
16296 +.LVL191:
16297 +       ret
16298 +.LFE893:
16299 +       .size   __mod_timer, .-__mod_timer
16300 +       .section        .rodata.str1.1
16301 +.LC3:
16302 +       .string "<3>schedule_timeout: wrong timeout value %lx\n"
16303 +       .section        .sched.text,"ax",@progbits
16304 +.globl schedule_timeout
16305 +       .type   schedule_timeout, @function
16306 +schedule_timeout:
16307 +.LFB916:
16308 +       .loc 1 1056 0
16309 +.LVL192:
16310 +       pushl   %esi
16311 +.LCFI43:
16312 +       pushl   %ebx
16313 +.LCFI44:
16314 +       movl    %eax, %ebx
16315 +       subl    $32, %esp
16316 +.LCFI45:
16317 +       .loc 1 1060 0
16318 +       cmpl    $2147483647, %eax
16319 +       jne     .L156
16320 +       .loc 1 1070 0
16321 +       call    schedule
16322 +.LVL193:
16323 +       jmp     .L158
16324 +.LVL194:
16325 +.L156:
16326 +       .loc 1 1080 0
16327 +       testl   %eax, %eax
16328 +       jns     .L159
16329 +       .loc 1 1081 0
16330 +       movl    %eax, 4(%esp)
16331 +       movl    $.LC3, (%esp)
16332 +       call    printk
16333 +.LVL195:
16334 +       .loc 1 1083 0
16335 +       call    dump_stack
16336 +.LBB416:
16337 +.LBB417:
16338 +.LBB418:
16339 +       .file 9 "include/asm/current.h"
16340 +       .loc 9 12 0
16341 +#APP
16342 +       movl %fs:per_cpu__current_task,%eax
16343 +.LVL196:
16344 +#NO_APP
16345 +.LBE418:
16346 +.LBE417:
16347 +.LBE416:
16348 +       .loc 1 1084 0
16349 +       movl    $0, (%eax)
16350 +       jmp     .L158
16351 +.LVL197:
16352 +.L159:
16353 +       .loc 1 1089 0
16354 +       movl    jiffies, %esi
16355 +.LBB419:
16356 +.LBB420:
16357 +       .file 10 "include/linux/timer.h"
16358 +       .loc 10 48 0
16359 +       leal    8(%esp), %ebx
16360 +       .loc 10 46 0
16361 +       movl    $process_timeout, 20(%esp)
16362 +.LBE420:
16363 +.LBE419:
16364 +       .loc 1 1089 0
16365 +       leal    (%eax,%esi), %esi
16366 +.LVL198:
16367 +.LBB421:
16368 +.LBB422:
16369 +.LBB423:
16370 +       .loc 9 12 0
16371 +#APP
16372 +       movl %fs:per_cpu__current_task,%eax
16373 +.LVL199:
16374 +#NO_APP
16375 +.LBE423:
16376 +.LBE422:
16377 +.LBE421:
16378 +.LBB424:
16379 +.LBB425:
16380 +       .loc 10 47 0
16381 +       movl    %eax, 24(%esp)
16382 +       .loc 10 48 0
16383 +       movl    %ebx, %eax
16384 +.LVL200:
16385 +       call    init_timer
16386 +.LBE425:
16387 +.LBE424:
16388 +       .loc 1 1092 0
16389 +       movl    %esi, %edx
16390 +       movl    %ebx, %eax
16391 +       call    __mod_timer
16392 +       .loc 1 1093 0
16393 +       call    schedule
16394 +       .loc 1 1094 0
16395 +       movl    %ebx, %eax
16396 +       .loc 1 1096 0
16397 +       movl    %esi, %ebx
16398 +.LVL201:
16399 +       .loc 1 1094 0
16400 +       call    del_timer_sync
16401 +       .loc 1 1096 0
16402 +       movl    jiffies, %eax
16403 +       subl    %eax, %ebx
16404 +.LVL202:
16405 +.L158:
16406 +       .loc 1 1098 0
16407 +       xorl    %eax, %eax
16408 +.LVL203:
16409 +       testl   %ebx, %ebx
16410 +       cmovns  %ebx, %eax
16411 +       .loc 1 1100 0
16412 +       addl    $32, %esp
16413 +       popl    %ebx
16414 +.LVL204:
16415 +       popl    %esi
16416 +.LVL205:
16417 +       ret
16418 +.LFE916:
16419 +       .size   schedule_timeout, .-schedule_timeout
16420 +.globl schedule_timeout_uninterruptible
16421 +       .type   schedule_timeout_uninterruptible, @function
16422 +schedule_timeout_uninterruptible:
16423 +.LFB918:
16424 +       .loc 1 1115 0
16425 +.LVL206:
16426 +.LBB426:
16427 +.LBB427:
16428 +.LBB428:
16429 +       .loc 9 12 0
16430 +#APP
16431 +       movl %fs:per_cpu__current_task,%edx
16432 +.LVL207:
16433 +#NO_APP
16434 +.LBE428:
16435 +.LBE427:
16436 +.LBE426:
16437 +       .loc 1 1116 0
16438 +       movl    $2, (%edx)
16439 +       .loc 1 1117 0
16440 +       jmp     schedule_timeout
16441 +.LVL208:
16442 +.LFE918:
16443 +       .size   schedule_timeout_uninterruptible, .-schedule_timeout_uninterruptible
16444 +       .text
16445 +.globl msleep
16446 +       .type   msleep, @function
16447 +msleep:
16448 +.LFB925:
16449 +       .loc 1 1566 0
16450 +.LVL209:
16451 +       .loc 1 1567 0
16452 +       call    msecs_to_jiffies
16453 +.LVL210:
16454 +       incl    %eax
16455 +.LVL211:
16456 +       jmp     .L165
16457 +.L166:
16458 +       .loc 1 1570 0
16459 +       call    schedule_timeout_uninterruptible
16460 +.LVL212:
16461 +.L165:
16462 +       .loc 1 1569 0
16463 +       testl   %eax, %eax
16464 +       jne     .L166
16465 +       .loc 1 1571 0
16466 +       ret
16467 +.LFE925:
16468 +       .size   msleep, .-msleep
16469 +       .section        .sched.text
16470 +.globl schedule_timeout_interruptible
16471 +       .type   schedule_timeout_interruptible, @function
16472 +schedule_timeout_interruptible:
16473 +.LFB917:
16474 +       .loc 1 1108 0
16475 +.LVL213:
16476 +.LBB429:
16477 +.LBB430:
16478 +.LBB431:
16479 +       .loc 9 12 0
16480 +#APP
16481 +       movl %fs:per_cpu__current_task,%edx
16482 +.LVL214:
16483 +#NO_APP
16484 +.LBE431:
16485 +.LBE430:
16486 +.LBE429:
16487 +       .loc 1 1109 0
16488 +       movl    $1, (%edx)
16489 +       .loc 1 1110 0
16490 +       jmp     schedule_timeout
16491 +.LVL215:
16492 +.LFE917:
16493 +       .size   schedule_timeout_interruptible, .-schedule_timeout_interruptible
16494 +       .text
16495 +.globl msleep_interruptible
16496 +       .type   msleep_interruptible, @function
16497 +msleep_interruptible:
16498 +.LFB926:
16499 +       .loc 1 1580 0
16500 +.LVL216:
16501 +       .loc 1 1581 0
16502 +       call    msecs_to_jiffies
16503 +.LVL217:
16504 +       leal    1(%eax), %edx
16505 +.LVL218:
16506 +       jmp     .L172
16507 +.L173:
16508 +       .loc 1 1584 0
16509 +       movl    %edx, %eax
16510 +       call    schedule_timeout_interruptible
16511 +.LVL219:
16512 +       movl    %eax, %edx
16513 +.LVL220:
16514 +.L172:
16515 +       .loc 1 1583 0
16516 +       testl   %edx, %edx
16517 +       je      .L174
16518 +.LBB445:
16519 +.LBB446:
16520 +.LBB447:
16521 +       .loc 9 12 0
16522 +#APP
16523 +       movl %fs:per_cpu__current_task,%eax
16524 +.LVL221:
16525 +#NO_APP
16526 +.LBE447:
16527 +.LBE446:
16528 +.LBE445:
16529 +.LBB448:
16530 +.LBB449:
16531 +.LBB450:
16532 +.LBB451:
16533 +       .file 11 "include/linux/sched.h"
16534 +       .loc 11 1569 0
16535 +       movl    4(%eax), %eax
16536 +.LVL222:
16537 +.LBB452:
16538 +.LBB453:
16539 +.LBB454:
16540 +.LBB455:
16541 +       .file 12 "include/asm/bitops.h"
16542 +       .loc 12 246 0
16543 +       movl    8(%eax), %eax
16544 +.LBE455:
16545 +.LBE454:
16546 +.LBE453:
16547 +.LBE452:
16548 +.LBE451:
16549 +.LBE450:
16550 +.LBE449:
16551 +.LBE448:
16552 +       .loc 1 1583 0
16553 +       testb   $4, %al
16554 +       je      .L173
16555 +.L174:
16556 +       .loc 1 1585 0
16557 +       movl    %edx, %eax
16558 +       jmp     jiffies_to_msecs
16559 +.LVL223:
16560 +.LFE926:
16561 +       .size   msleep_interruptible, .-msleep_interruptible
16562 +.globl update_process_times
16563 +       .type   update_process_times, @function
16564 +update_process_times:
16565 +.LFB901:
16566 +       .loc 1 811 0
16567 +.LVL224:
16568 +       pushl   %edi
16569 +.LCFI46:
16570 +       movl    %eax, %edi
16571 +       pushl   %esi
16572 +.LCFI47:
16573 +       pushl   %ebx
16574 +.LCFI48:
16575 +.LBB460:
16576 +       .loc 1 813 0
16577 +#APP
16578 +       movl %fs:per_cpu__cpu_number,%esi
16579 +.LVL225:
16580 +#NO_APP
16581 +.LBE460:
16582 +.LBB461:
16583 +.LBB462:
16584 +.LBB463:
16585 +       .loc 9 12 0
16586 +#APP
16587 +       movl %fs:per_cpu__current_task,%ebx
16588 +.LVL226:
16589 +#NO_APP
16590 +.LBE463:
16591 +.LBE462:
16592 +.LBE461:
16593 +       .loc 1 816 0
16594 +       testl   %eax, %eax
16595 +       je      .L178
16596 +       .loc 1 817 0
16597 +       movl    $1, %edx
16598 +       movl    %ebx, %eax
16599 +.LVL227:
16600 +       call    account_user_time
16601 +       jmp     .L180
16602 +.LVL228:
16603 +.L178:
16604 +       .loc 1 819 0
16605 +       movl    $1, %ecx
16606 +       movl    $65536, %edx
16607 +       movl    %ebx, %eax
16608 +.LVL229:
16609 +       call    account_system_time
16610 +.L180:
16611 +       .loc 1 820 0
16612 +       call    run_local_timers
16613 +       .loc 1 821 0
16614 +       movl    %esi, %eax
16615 +       call    rcu_pending
16616 +       testl   %eax, %eax
16617 +       je      .L181
16618 +       .loc 1 822 0
16619 +       movl    %edi, %edx
16620 +       movl    %esi, %eax
16621 +       call    rcu_check_callbacks
16622 +.L181:
16623 +       .loc 1 823 0
16624 +       call    scheduler_tick
16625 +       .loc 1 824 0
16626 +       movl    %ebx, %eax
16627 +       .loc 1 825 0
16628 +       popl    %ebx
16629 +.LVL230:
16630 +       popl    %esi
16631 +.LVL231:
16632 +       popl    %edi
16633 +.LVL232:
16634 +       .loc 1 824 0
16635 +       jmp     run_posix_cpu_timers
16636 +.LVL233:
16637 +.LFE901:
16638 +       .size   update_process_times, .-update_process_times
16639 +.globl sys_getpid
16640 +       .type   sys_getpid, @function
16641 +sys_getpid:
16642 +.LFB909:
16643 +       .loc 1 957 0
16644 +       pushl   %ebx
16645 +.LCFI49:
16646 +       subl    $40, %esp
16647 +.LCFI50:
16648 +       .loc 1 959 0
16649 +       movl    rec_event, %ebx
16650 +       testl   %ebx, %ebx
16651 +       je      .L185
16652 +.LBB474:
16653 +       .loc 1 964 0
16654 +       movl    $666, 36(%esp)
16655 +       .loc 1 966 0
16656 +       leal    24(%esp), %eax
16657 +.LBB475:
16658 +.LBB476:
16659 +.LBB477:
16660 +       .loc 9 12 0
16661 +#APP
16662 +       movl %fs:per_cpu__current_task,%edx
16663 +.LVL234:
16664 +#NO_APP
16665 +.LBE477:
16666 +.LBE476:
16667 +.LBE475:
16668 +       .loc 1 965 0
16669 +       movl    468(%edx), %ecx
16670 +.LVL235:
16671 +       .loc 1 966 0
16672 +       movl    %eax, 8(%esp)
16673 +       .loc 1 972 0
16674 +       movl    %esp, %eax
16675 +       .loc 1 967 0
16676 +       movl    %edx, 20(%esp)
16677 +       .loc 1 972 0
16678 +       movl    $1, %edx
16679 +.LVL236:
16680 +       .loc 1 969 0
16681 +       movl    $7, 16(%esp)
16682 +       .loc 1 965 0
16683 +       andl    $4095, %ecx
16684 +       .loc 1 968 0
16685 +       movl    %ecx, 24(%esp)
16686 +       .loc 1 972 0
16687 +       call    *%ebx
16688 +.LVL237:
16689 +.L185:
16690 +.LBE474:
16691 +.LBB478:
16692 +.LBB479:
16693 +.LBB480:
16694 +       .loc 9 12 0
16695 +#APP
16696 +       movl %fs:per_cpu__current_task,%eax
16697 +.LVL238:
16698 +#NO_APP
16699 +       movl    176(%eax), %eax
16700 +.LVL239:
16701 +.LBE480:
16702 +.LBE479:
16703 +.LBE478:
16704 +       .loc 1 977 0
16705 +       addl    $40, %esp
16706 +       popl    %ebx
16707 +       ret
16708 +.LFE909:
16709 +       .size   sys_getpid, .-sys_getpid
16710 +.globl sys_getppid
16711 +       .type   sys_getppid, @function
16712 +sys_getppid:
16713 +.LFB910:
16714 +       .loc 1 986 0
16715 +.LBB485:
16716 +.LBB486:
16717 +.LBB487:
16718 +.LBB488:
16719 +       .loc 9 12 0
16720 +#APP
16721 +       movl %fs:per_cpu__current_task,%eax
16722 +.LVL240:
16723 +#NO_APP
16724 +       movl    180(%eax), %eax
16725 +.LVL241:
16726 +       movl    176(%eax), %eax
16727 +.LBE488:
16728 +.LBE487:
16729 +.LBE486:
16730 +.LBE485:
16731 +       .loc 1 996 0
16732 +       ret
16733 +.LFE910:
16734 +       .size   sys_getppid, .-sys_getppid
16735 +.globl sys_getuid
16736 +       .type   sys_getuid, @function
16737 +sys_getuid:
16738 +.LFB911:
16739 +       .loc 1 999 0
16740 +.LBB492:
16741 +.LBB493:
16742 +.LBB494:
16743 +       .loc 9 12 0
16744 +#APP
16745 +       movl %fs:per_cpu__current_task,%eax
16746 +.LVL242:
16747 +#NO_APP
16748 +       movl    340(%eax), %eax
16749 +.LVL243:
16750 +.LBE494:
16751 +.LBE493:
16752 +.LBE492:
16753 +       .loc 1 1002 0
16754 +       ret
16755 +.LFE911:
16756 +       .size   sys_getuid, .-sys_getuid
16757 +.globl sys_geteuid
16758 +       .type   sys_geteuid, @function
16759 +sys_geteuid:
16760 +.LFB912:
16761 +       .loc 1 1005 0
16762 +.LBB498:
16763 +.LBB499:
16764 +.LBB500:
16765 +       .loc 9 12 0
16766 +#APP
16767 +       movl %fs:per_cpu__current_task,%eax
16768 +.LVL244:
16769 +#NO_APP
16770 +       movl    344(%eax), %eax
16771 +.LVL245:
16772 +.LBE500:
16773 +.LBE499:
16774 +.LBE498:
16775 +       .loc 1 1008 0
16776 +       ret
16777 +.LFE912:
16778 +       .size   sys_geteuid, .-sys_geteuid
16779 +.globl sys_getgid
16780 +       .type   sys_getgid, @function
16781 +sys_getgid:
16782 +.LFB913:
16783 +       .loc 1 1011 0
16784 +.LBB504:
16785 +.LBB505:
16786 +.LBB506:
16787 +       .loc 9 12 0
16788 +#APP
16789 +       movl %fs:per_cpu__current_task,%eax
16790 +.LVL246:
16791 +#NO_APP
16792 +       movl    356(%eax), %eax
16793 +.LVL247:
16794 +.LBE506:
16795 +.LBE505:
16796 +.LBE504:
16797 +       .loc 1 1014 0
16798 +       ret
16799 +.LFE913:
16800 +       .size   sys_getgid, .-sys_getgid
16801 +.globl sys_getegid
16802 +       .type   sys_getegid, @function
16803 +sys_getegid:
16804 +.LFB914:
16805 +       .loc 1 1017 0
16806 +.LBB510:
16807 +.LBB511:
16808 +.LBB512:
16809 +       .loc 9 12 0
16810 +#APP
16811 +       movl %fs:per_cpu__current_task,%eax
16812 +.LVL248:
16813 +#NO_APP
16814 +       movl    360(%eax), %eax
16815 +.LVL249:
16816 +.LBE512:
16817 +.LBE511:
16818 +.LBE510:
16819 +       .loc 1 1020 0
16820 +       ret
16821 +.LFE914:
16822 +       .size   sys_getegid, .-sys_getegid
16823 +.globl sys_gettid
16824 +       .type   sys_gettid, @function
16825 +sys_gettid:
16826 +.LFB919:
16827 +       .loc 1 1123 0
16828 +.LBB516:
16829 +.LBB517:
16830 +.LBB518:
16831 +       .loc 9 12 0
16832 +#APP
16833 +       movl %fs:per_cpu__current_task,%eax
16834 +.LVL250:
16835 +#NO_APP
16836 +       movl    172(%eax), %eax
16837 +.LVL251:
16838 +.LBE518:
16839 +.LBE517:
16840 +.LBE516:
16841 +       .loc 1 1125 0
16842 +       ret
16843 +.LFE919:
16844 +       .size   sys_gettid, .-sys_gettid
16845 +.globl mod_timer
16846 +       .type   mod_timer, @function
16847 +mod_timer:
16848 +.LFB895:
16849 +       .loc 1 467 0
16850 +.LVL252:
16851 +       .loc 1 468 0
16852 +       cmpl    $0, 12(%eax)
16853 +       jne     .L201
16854 +#APP
16855 +       1:      ud2
16856 +.pushsection __bug_table,"a"
16857 +2:     .long 1b, .LC0
16858 +       .word 468, 0
16859 +       .org 2b+12
16860 +.popsection
16861 +#NO_APP
16862 +.L203:
16863 +       jmp     .L203
16864 +.L201:
16865 +       .loc 1 476 0
16866 +       cmpl    %edx, 8(%eax)
16867 +       jne     .L204
16868 +       cmpl    $0, (%eax)
16869 +       jne     .L209
16870 +.L204:
16871 +       .loc 1 479 0
16872 +       jmp     __mod_timer
16873 +.LVL253:
16874 +.L209:
16875 +       .loc 1 480 0
16876 +       movl    $1, %eax
16877 +.LVL254:
16878 +       ret
16879 +.LFE895:
16880 +       .size   mod_timer, .-mod_timer
16881 +.globl del_timer
16882 +       .type   del_timer, @function
16883 +del_timer:
16884 +.LFB896:
16885 +       .loc 1 496 0
16886 +.LVL255:
16887 +       pushl   %esi
16888 +.LCFI51:
16889 +       .loc 1 502 0
16890 +       xorl    %esi, %esi
16891 +.LVL256:
16892 +       .loc 1 496 0
16893 +       pushl   %ebx
16894 +.LCFI52:
16895 +       movl    %eax, %ebx
16896 +       subl    $4, %esp
16897 +.LCFI53:
16898 +       .loc 1 502 0
16899 +       cmpl    $0, (%eax)
16900 +       je      .L213
16901 +       .loc 1 503 0
16902 +       movl    %esp, %edx
16903 +       call    lock_timer_base
16904 +.LVL257:
16905 +       .loc 1 504 0
16906 +       cmpl    $0, (%ebx)
16907 +       .loc 1 503 0
16908 +       movl    %eax, %ecx
16909 +.LVL258:
16910 +       .loc 1 504 0
16911 +       je      .L216
16912 +.LBB533:
16913 +.LBB534:
16914 +       .loc 1 342 0
16915 +       movl    (%ebx), %edx
16916 +.LVL259:
16917 +       .loc 1 345 0
16918 +       movw    $1, %si
16919 +.LVL260:
16920 +       .loc 1 342 0
16921 +       movl    4(%ebx), %eax
16922 +.LVL261:
16923 +.LBB535:
16924 +.LBB536:
16925 +       .loc 2 157 0
16926 +       movl    %eax, 4(%edx)
16927 +       .loc 2 158 0
16928 +       movl    %edx, (%eax)
16929 +.LBE536:
16930 +.LBE535:
16931 +       .loc 1 345 0
16932 +       movl    $2097664, 4(%ebx)
16933 +       .loc 1 344 0
16934 +       movl    $0, (%ebx)
16935 +.LVL262:
16936 +.L216:
16937 +.LVL263:
16938 +.LBE534:
16939 +.LBE533:
16940 +       .loc 1 508 0
16941 +       movl    (%esp), %edx
16942 +.LVL264:
16943 +       movl    %ecx, %eax
16944 +.LVL265:
16945 +       call    _spin_unlock_irqrestore
16946 +.LVL266:
16947 +.L213:
16948 +       .loc 1 512 0
16949 +       popl    %edx
16950 +       movl    %esi, %eax
16951 +       popl    %ebx
16952 +.LVL267:
16953 +       popl    %esi
16954 +.LVL268:
16955 +       ret
16956 +.LFE896:
16957 +       .size   del_timer, .-del_timer
16958 +.globl add_timer_on
16959 +       .type   add_timer_on, @function
16960 +add_timer_on:
16961 +.LFB894:
16962 +       .loc 1 433 0
16963 +.LVL269:
16964 +       pushl   %edi
16965 +.LCFI54:
16966 +       .loc 1 434 0
16967 +       movl    __per_cpu_offset(,%edx,4), %edx
16968 +.LVL270:
16969 +       .loc 1 433 0
16970 +       pushl   %esi
16971 +.LCFI55:
16972 +       movl    %eax, %esi
16973 +       pushl   %ebx
16974 +.LCFI56:
16975 +.LBB546:
16976 +       .loc 1 434 0
16977 +       movl    $per_cpu__tvec_bases, %eax
16978 +.LVL271:
16979 +.LBE546:
16980 +       .loc 1 438 0
16981 +       cmpl    $0, (%esi)
16982 +       .loc 1 434 0
16983 +       movl    (%eax,%edx), %edi
16984 +.LVL272:
16985 +       .loc 1 438 0
16986 +       jne     .L226
16987 +       cmpl    $0, 12(%esi)
16988 +       jne     .L222
16989 +.L226:
16990 +#APP
16991 +       1:      ud2
16992 +.pushsection __bug_table,"a"
16993 +2:     .long 1b, .LC0
16994 +       .word 438, 0
16995 +       .org 2b+12
16996 +.popsection
16997 +#NO_APP
16998 +.L224:
16999 +       jmp     .L224
17000 +.L222:
17001 +       .loc 1 439 0
17002 +       movl    %edi, %eax
17003 +       call    _spin_lock_irqsave
17004 +       .loc 1 441 0
17005 +       movl    %esi, %edx
17006 +       .loc 1 439 0
17007 +       movl    %eax, %ebx
17008 +.LVL273:
17009 +.LBB547:
17010 +.LBB548:
17011 +       .loc 1 113 0
17012 +       movl    20(%esi), %eax
17013 +       andl    $1, %eax
17014 +       orl     %edi, %eax
17015 +       movl    %eax, 20(%esi)
17016 +.LBE548:
17017 +.LBE547:
17018 +       .loc 1 441 0
17019 +       movl    %edi, %eax
17020 +       call    internal_add_timer
17021 +       .loc 1 442 0
17022 +       movl    %ebx, %edx
17023 +       movl    %edi, %eax
17024 +       .loc 1 443 0
17025 +       popl    %ebx
17026 +.LVL274:
17027 +       popl    %esi
17028 +.LVL275:
17029 +       popl    %edi
17030 +.LVL276:
17031 +       .loc 1 442 0
17032 +       jmp     _spin_unlock_irqrestore
17033 +.LVL277:
17034 +.LFE894:
17035 +       .size   add_timer_on, .-add_timer_on
17036 +.globl jiffies_64
17037 +       .section        .data.cacheline_aligned,"aw",@progbits
17038 +       .align 128
17039 +       .type   jiffies_64, @object
17040 +       .size   jiffies_64, 8
17041 +jiffies_64:
17042 +       .long   -75000
17043 +       .long   0
17044 +       .section        __ksymtab,"a",@progbits
17045 +       .align 4
17046 +       .type   __ksymtab_jiffies_64, @object
17047 +       .size   __ksymtab_jiffies_64, 8
17048 +__ksymtab_jiffies_64:
17049 +       .long   jiffies_64
17050 +       .long   __kstrtab_jiffies_64
17051 +       .align 4
17052 +       .type   __ksymtab_boot_tvec_bases, @object
17053 +       .size   __ksymtab_boot_tvec_bases, 8
17054 +__ksymtab_boot_tvec_bases:
17055 +       .long   boot_tvec_bases
17056 +       .long   __kstrtab_boot_tvec_bases
17057 +       .section        __ksymtab_gpl,"a",@progbits
17058 +       .align 4
17059 +       .type   __ksymtab___round_jiffies, @object
17060 +       .size   __ksymtab___round_jiffies, 8
17061 +__ksymtab___round_jiffies:
17062 +       .long   __round_jiffies
17063 +       .long   __kstrtab___round_jiffies
17064 +       .align 4
17065 +       .type   __ksymtab___round_jiffies_relative, @object
17066 +       .size   __ksymtab___round_jiffies_relative, 8
17067 +__ksymtab___round_jiffies_relative:
17068 +       .long   __round_jiffies_relative
17069 +       .long   __kstrtab___round_jiffies_relative
17070 +       .align 4
17071 +       .type   __ksymtab_round_jiffies, @object
17072 +       .size   __ksymtab_round_jiffies, 8
17073 +__ksymtab_round_jiffies:
17074 +       .long   round_jiffies
17075 +       .long   __kstrtab_round_jiffies
17076 +       .align 4
17077 +       .type   __ksymtab_round_jiffies_relative, @object
17078 +       .size   __ksymtab_round_jiffies_relative, 8
17079 +__ksymtab_round_jiffies_relative:
17080 +       .long   round_jiffies_relative
17081 +       .long   __kstrtab_round_jiffies_relative
17082 +       .section        __ksymtab
17083 +       .align 4
17084 +       .type   __ksymtab_init_timer, @object
17085 +       .size   __ksymtab_init_timer, 8
17086 +__ksymtab_init_timer:
17087 +       .long   init_timer
17088 +       .long   __kstrtab_init_timer
17089 +       .align 4
17090 +       .type   __ksymtab_init_timer_deferrable, @object
17091 +       .size   __ksymtab_init_timer_deferrable, 8
17092 +__ksymtab_init_timer_deferrable:
17093 +       .long   init_timer_deferrable
17094 +       .long   __kstrtab_init_timer_deferrable
17095 +       .align 4
17096 +       .type   __ksymtab___mod_timer, @object
17097 +       .size   __ksymtab___mod_timer, 8
17098 +__ksymtab___mod_timer:
17099 +       .long   __mod_timer
17100 +       .long   __kstrtab___mod_timer
17101 +       .align 4
17102 +       .type   __ksymtab_mod_timer, @object
17103 +       .size   __ksymtab_mod_timer, 8
17104 +__ksymtab_mod_timer:
17105 +       .long   mod_timer
17106 +       .long   __kstrtab_mod_timer
17107 +       .align 4
17108 +       .type   __ksymtab_del_timer, @object
17109 +       .size   __ksymtab_del_timer, 8
17110 +__ksymtab_del_timer:
17111 +       .long   del_timer
17112 +       .long   __kstrtab_del_timer
17113 +       .align 4
17114 +       .type   __ksymtab_try_to_del_timer_sync, @object
17115 +       .size   __ksymtab_try_to_del_timer_sync, 8
17116 +__ksymtab_try_to_del_timer_sync:
17117 +       .long   try_to_del_timer_sync
17118 +       .long   __kstrtab_try_to_del_timer_sync
17119 +       .align 4
17120 +       .type   __ksymtab_del_timer_sync, @object
17121 +       .size   __ksymtab_del_timer_sync, 8
17122 +__ksymtab_del_timer_sync:
17123 +       .long   del_timer_sync
17124 +       .long   __kstrtab_del_timer_sync
17125 +       .align 4
17126 +       .type   __ksymtab_avenrun, @object
17127 +       .size   __ksymtab_avenrun, 8
17128 +__ksymtab_avenrun:
17129 +       .long   avenrun
17130 +       .long   __kstrtab_avenrun
17131 +       .align 4
17132 +       .type   __ksymtab_schedule_timeout, @object
17133 +       .size   __ksymtab_schedule_timeout, 8
17134 +__ksymtab_schedule_timeout:
17135 +       .long   schedule_timeout
17136 +       .long   __kstrtab_schedule_timeout
17137 +       .align 4
17138 +       .type   __ksymtab_schedule_timeout_interruptible, @object
17139 +       .size   __ksymtab_schedule_timeout_interruptible, 8
17140 +__ksymtab_schedule_timeout_interruptible:
17141 +       .long   schedule_timeout_interruptible
17142 +       .long   __kstrtab_schedule_timeout_interruptible
17143 +       .align 4
17144 +       .type   __ksymtab_schedule_timeout_uninterruptible, @object
17145 +       .size   __ksymtab_schedule_timeout_uninterruptible, 8
17146 +__ksymtab_schedule_timeout_uninterruptible:
17147 +       .long   schedule_timeout_uninterruptible
17148 +       .long   __kstrtab_schedule_timeout_uninterruptible
17149 +       .align 4
17150 +       .type   __ksymtab_msleep, @object
17151 +       .size   __ksymtab_msleep, 8
17152 +__ksymtab_msleep:
17153 +       .long   msleep
17154 +       .long   __kstrtab_msleep
17155 +       .align 4
17156 +       .type   __ksymtab_msleep_interruptible, @object
17157 +       .size   __ksymtab_msleep_interruptible, 8
17158 +__ksymtab_msleep_interruptible:
17159 +       .long   msleep_interruptible
17160 +       .long   __kstrtab_msleep_interruptible
17161 +       .section        .init.data,"aw",@progbits
17162 +       .align 4
17163 +       .type   timers_nb, @object
17164 +       .size   timers_nb, 12
17165 +timers_nb:
17166 +       .long   timer_cpu_notify
17167 +       .zero   8
17168 +       .section        .data.percpu,"aw",@progbits
17169 +       .align 4
17170 +       .type   per_cpu__tvec_bases, @object
17171 +       .size   per_cpu__tvec_bases, 4
17172 +per_cpu__tvec_bases:
17173 +       .long   boot_tvec_bases
17174 +       .local  boot_done.19029
17175 +       .comm   boot_done.19029,1,1
17176 +       .section        .rodata
17177 +       .type   __func__.19031, @object
17178 +       .size   __func__.19031, 16
17179 +__func__.19031:
17180 +       .string "init_timers_cpu"
17181 +       .local  tvec_base_done.19028
17182 +       .comm   tvec_base_done.19028,32,32
17183 +       .data
17184 +       .align 4
17185 +       .type   count.18791, @object
17186 +       .size   count.18791, 4
17187 +count.18791:
17188 +       .long   1250
17189 +.globl boot_tvec_bases
17190 +       .bss
17191 +       .align 128
17192 +       .type   boot_tvec_bases, @object
17193 +       .size   boot_tvec_bases, 4224
17194 +boot_tvec_bases:
17195 +       .zero   4224
17196 +.globl avenrun
17197 +       .align 4
17198 +       .type   avenrun, @object
17199 +       .size   avenrun, 12
17200 +avenrun:
17201 +       .zero   12
17202 +.globl rec_event
17203 +       .align 4
17204 +       .type   rec_event, @object
17205 +       .size   rec_event, 4
17206 +rec_event:
17207 +       .zero   4
17208 +       .section        __ksymtab_strings,"a",@progbits
17209 +       .type   __kstrtab_jiffies_64, @object
17210 +       .size   __kstrtab_jiffies_64, 11
17211 +__kstrtab_jiffies_64:
17212 +       .string "jiffies_64"
17213 +       .type   __kstrtab_boot_tvec_bases, @object
17214 +       .size   __kstrtab_boot_tvec_bases, 16
17215 +__kstrtab_boot_tvec_bases:
17216 +       .string "boot_tvec_bases"
17217 +       .type   __kstrtab___round_jiffies, @object
17218 +       .size   __kstrtab___round_jiffies, 16
17219 +__kstrtab___round_jiffies:
17220 +       .string "__round_jiffies"
17221 +       .type   __kstrtab___round_jiffies_relative, @object
17222 +       .size   __kstrtab___round_jiffies_relative, 25
17223 +__kstrtab___round_jiffies_relative:
17224 +       .string "__round_jiffies_relative"
17225 +       .type   __kstrtab_round_jiffies, @object
17226 +       .size   __kstrtab_round_jiffies, 14
17227 +__kstrtab_round_jiffies:
17228 +       .string "round_jiffies"
17229 +       .type   __kstrtab_round_jiffies_relative, @object
17230 +       .size   __kstrtab_round_jiffies_relative, 23
17231 +__kstrtab_round_jiffies_relative:
17232 +       .string "round_jiffies_relative"
17233 +       .type   __kstrtab_init_timer, @object
17234 +       .size   __kstrtab_init_timer, 11
17235 +__kstrtab_init_timer:
17236 +       .string "init_timer"
17237 +       .type   __kstrtab_init_timer_deferrable, @object
17238 +       .size   __kstrtab_init_timer_deferrable, 22
17239 +__kstrtab_init_timer_deferrable:
17240 +       .string "init_timer_deferrable"
17241 +       .type   __kstrtab___mod_timer, @object
17242 +       .size   __kstrtab___mod_timer, 12
17243 +__kstrtab___mod_timer:
17244 +       .string "__mod_timer"
17245 +       .type   __kstrtab_mod_timer, @object
17246 +       .size   __kstrtab_mod_timer, 10
17247 +__kstrtab_mod_timer:
17248 +       .string "mod_timer"
17249 +       .type   __kstrtab_del_timer, @object
17250 +       .size   __kstrtab_del_timer, 10
17251 +__kstrtab_del_timer:
17252 +       .string "del_timer"
17253 +       .type   __kstrtab_try_to_del_timer_sync, @object
17254 +       .size   __kstrtab_try_to_del_timer_sync, 22
17255 +__kstrtab_try_to_del_timer_sync:
17256 +       .string "try_to_del_timer_sync"
17257 +       .type   __kstrtab_del_timer_sync, @object
17258 +       .size   __kstrtab_del_timer_sync, 15
17259 +__kstrtab_del_timer_sync:
17260 +       .string "del_timer_sync"
17261 +       .type   __kstrtab_avenrun, @object
17262 +       .size   __kstrtab_avenrun, 8
17263 +__kstrtab_avenrun:
17264 +       .string "avenrun"
17265 +       .type   __kstrtab_schedule_timeout, @object
17266 +       .size   __kstrtab_schedule_timeout, 17
17267 +__kstrtab_schedule_timeout:
17268 +       .string "schedule_timeout"
17269 +       .type   __kstrtab_schedule_timeout_interruptible, @object
17270 +       .size   __kstrtab_schedule_timeout_interruptible, 31
17271 +__kstrtab_schedule_timeout_interruptible:
17272 +       .string "schedule_timeout_interruptible"
17273 +       .align 32
17274 +       .type   __kstrtab_schedule_timeout_uninterruptible, @object
17275 +       .size   __kstrtab_schedule_timeout_uninterruptible, 33
17276 +__kstrtab_schedule_timeout_uninterruptible:
17277 +       .string "schedule_timeout_uninterruptible"
17278 +       .type   __kstrtab_msleep, @object
17279 +       .size   __kstrtab_msleep, 7
17280 +__kstrtab_msleep:
17281 +       .string "msleep"
17282 +       .type   __kstrtab_msleep_interruptible, @object
17283 +       .size   __kstrtab_msleep_interruptible, 21
17284 +__kstrtab_msleep_interruptible:
17285 +       .string "msleep_interruptible"
17286 +       .weak   xtime_lock
17287 +       .section        .debug_frame,"",@progbits
17288 +.Lframe0:
17289 +       .long   .LECIE0-.LSCIE0
17290 +.LSCIE0:
17291 +       .long   0xffffffff
17292 +       .byte   0x1
17293 +       .string ""
17294 +       .uleb128 0x1
17295 +       .sleb128 -4
17296 +       .byte   0x8
17297 +       .byte   0xc
17298 +       .uleb128 0x4
17299 +       .uleb128 0x4
17300 +       .byte   0x88
17301 +       .uleb128 0x1
17302 +       .align 4
17303 +.LECIE0:
17304 +.LSFDE0:
17305 +       .long   .LEFDE0-.LASFDE0
17306 +.LASFDE0:
17307 +       .long   .Lframe0
17308 +       .long   .LFB883
17309 +       .long   .LFE883-.LFB883
17310 +       .byte   0x4
17311 +       .long   .LCFI0-.LFB883
17312 +       .byte   0xe
17313 +       .uleb128 0x8
17314 +       .byte   0x4
17315 +       .long   .LCFI1-.LCFI0
17316 +       .byte   0xe
17317 +       .uleb128 0xc
17318 +       .byte   0x86
17319 +       .uleb128 0x3
17320 +       .byte   0x87
17321 +       .uleb128 0x2
17322 +       .byte   0x4
17323 +       .long   .LCFI2-.LCFI1
17324 +       .byte   0xe
17325 +       .uleb128 0x10
17326 +       .byte   0x83
17327 +       .uleb128 0x4
17328 +       .byte   0x4
17329 +       .long   .LCFI3-.LCFI2
17330 +       .byte   0xe
17331 +       .uleb128 0x14
17332 +       .align 4
17333 +.LEFDE0:
17334 +.LSFDE2:
17335 +       .long   .LEFDE2-.LASFDE2
17336 +.LASFDE2:
17337 +       .long   .Lframe0
17338 +       .long   .LFB884
17339 +       .long   .LFE884-.LFB884
17340 +       .align 4
17341 +.LEFDE2:
17342 +.LSFDE4:
17343 +       .long   .LEFDE4-.LASFDE4
17344 +.LASFDE4:
17345 +       .long   .Lframe0
17346 +       .long   .LFB885
17347 +       .long   .LFE885-.LFB885
17348 +       .align 4
17349 +.LEFDE4:
17350 +.LSFDE6:
17351 +       .long   .LEFDE6-.LASFDE6
17352 +.LASFDE6:
17353 +       .long   .Lframe0
17354 +       .long   .LFB886
17355 +       .long   .LFE886-.LFB886
17356 +       .align 4
17357 +.LEFDE6:
17358 +.LSFDE8:
17359 +       .long   .LEFDE8-.LASFDE8
17360 +.LASFDE8:
17361 +       .long   .Lframe0
17362 +       .long   .LFB888
17363 +       .long   .LFE888-.LFB888
17364 +       .byte   0x4
17365 +       .long   .LCFI4-.LFB888
17366 +       .byte   0xe
17367 +       .uleb128 0x8
17368 +       .byte   0x4
17369 +       .long   .LCFI5-.LCFI4
17370 +       .byte   0xe
17371 +       .uleb128 0xc
17372 +       .byte   0x83
17373 +       .uleb128 0x3
17374 +       .byte   0x86
17375 +       .uleb128 0x2
17376 +       .align 4
17377 +.LEFDE8:
17378 +.LSFDE10:
17379 +       .long   .LEFDE10-.LASFDE10
17380 +.LASFDE10:
17381 +       .long   .Lframe0
17382 +       .long   .LFB889
17383 +       .long   .LFE889-.LFB889
17384 +       .align 4
17385 +.LEFDE10:
17386 +.LSFDE12:
17387 +       .long   .LEFDE12-.LASFDE12
17388 +.LASFDE12:
17389 +       .long   .Lframe0
17390 +       .long   .LFB890
17391 +       .long   .LFE890-.LFB890
17392 +       .byte   0x4
17393 +       .long   .LCFI6-.LFB890
17394 +       .byte   0xe
17395 +       .uleb128 0x8
17396 +       .byte   0x83
17397 +       .uleb128 0x2
17398 +       .align 4
17399 +.LEFDE12:
17400 +.LSFDE14:
17401 +       .long   .LEFDE14-.LASFDE14
17402 +.LASFDE14:
17403 +       .long   .Lframe0
17404 +       .long   .LFB899
17405 +       .long   .LFE899-.LFB899
17406 +       .byte   0x4
17407 +       .long   .LCFI7-.LFB899
17408 +       .byte   0xe
17409 +       .uleb128 0x8
17410 +       .byte   0x87
17411 +       .uleb128 0x2
17412 +       .byte   0x4
17413 +       .long   .LCFI8-.LCFI7
17414 +       .byte   0xe
17415 +       .uleb128 0xc
17416 +       .byte   0x86
17417 +       .uleb128 0x3
17418 +       .byte   0x4
17419 +       .long   .LCFI9-.LCFI8
17420 +       .byte   0xe
17421 +       .uleb128 0x10
17422 +       .byte   0x4
17423 +       .long   .LCFI10-.LCFI9
17424 +       .byte   0xe
17425 +       .uleb128 0x18
17426 +       .byte   0x83
17427 +       .uleb128 0x4
17428 +       .align 4
17429 +.LEFDE14:
17430 +.LSFDE16:
17431 +       .long   .LEFDE16-.LASFDE16
17432 +.LASFDE16:
17433 +       .long   .Lframe0
17434 +       .long   .LFB923
17435 +       .long   .LFE923-.LFB923
17436 +       .byte   0x4
17437 +       .long   .LCFI11-.LFB923
17438 +       .byte   0xe
17439 +       .uleb128 0x8
17440 +       .byte   0x4
17441 +       .long   .LCFI12-.LCFI11
17442 +       .byte   0xe
17443 +       .uleb128 0xc
17444 +       .byte   0x4
17445 +       .long   .LCFI13-.LCFI12
17446 +       .byte   0xe
17447 +       .uleb128 0x10
17448 +       .byte   0x86
17449 +       .uleb128 0x4
17450 +       .byte   0x87
17451 +       .uleb128 0x3
17452 +       .byte   0x85
17453 +       .uleb128 0x2
17454 +       .byte   0x4
17455 +       .long   .LCFI14-.LCFI13
17456 +       .byte   0xe
17457 +       .uleb128 0x14
17458 +       .byte   0x4
17459 +       .long   .LCFI15-.LCFI14
17460 +       .byte   0xe
17461 +       .uleb128 0x24
17462 +       .byte   0x83
17463 +       .uleb128 0x5
17464 +       .align 4
17465 +.LEFDE16:
17466 +.LSFDE18:
17467 +       .long   .LEFDE18-.LASFDE18
17468 +.LASFDE18:
17469 +       .long   .Lframe0
17470 +       .long   .LFB924
17471 +       .long   .LFE924-.LFB924
17472 +       .align 4
17473 +.LEFDE18:
17474 +.LSFDE20:
17475 +       .long   .LEFDE20-.LASFDE20
17476 +.LASFDE20:
17477 +       .long   .Lframe0
17478 +       .long   .LFB904
17479 +       .long   .LFE904-.LFB904
17480 +       .byte   0x4
17481 +       .long   .LCFI16-.LFB904
17482 +       .byte   0xe
17483 +       .uleb128 0x8
17484 +       .byte   0x4
17485 +       .long   .LCFI17-.LCFI16
17486 +       .byte   0xe
17487 +       .uleb128 0xc
17488 +       .byte   0x4
17489 +       .long   .LCFI18-.LCFI17
17490 +       .byte   0xe
17491 +       .uleb128 0x10
17492 +       .byte   0x4
17493 +       .long   .LCFI19-.LCFI18
17494 +       .byte   0xe
17495 +       .uleb128 0x28
17496 +       .byte   0x83
17497 +       .uleb128 0x4
17498 +       .byte   0x86
17499 +       .uleb128 0x3
17500 +       .byte   0x87
17501 +       .uleb128 0x2
17502 +       .align 4
17503 +.LEFDE20:
17504 +.LSFDE22:
17505 +       .long   .LEFDE22-.LASFDE22
17506 +.LASFDE22:
17507 +       .long   .Lframe0
17508 +       .long   .LFB920
17509 +       .long   .LFE920-.LFB920
17510 +       .byte   0x4
17511 +       .long   .LCFI20-.LFB920
17512 +       .byte   0xe
17513 +       .uleb128 0x8
17514 +       .byte   0x4
17515 +       .long   .LCFI21-.LCFI20
17516 +       .byte   0xe
17517 +       .uleb128 0xc
17518 +       .byte   0x83
17519 +       .uleb128 0x3
17520 +       .byte   0x87
17521 +       .uleb128 0x2
17522 +       .byte   0x4
17523 +       .long   .LCFI22-.LCFI21
17524 +       .byte   0xe
17525 +       .uleb128 0x14
17526 +       .align 4
17527 +.LEFDE22:
17528 +.LSFDE24:
17529 +       .long   .LEFDE24-.LASFDE24
17530 +.LASFDE24:
17531 +       .long   .Lframe0
17532 +       .long   .LFB921
17533 +       .long   .LFE921-.LFB921
17534 +       .byte   0x4
17535 +       .long   .LCFI23-.LFB921
17536 +       .byte   0xe
17537 +       .uleb128 0x8
17538 +       .byte   0x4
17539 +       .long   .LCFI24-.LCFI23
17540 +       .byte   0xe
17541 +       .uleb128 0x48
17542 +       .byte   0x83
17543 +       .uleb128 0x2
17544 +       .align 4
17545 +.LEFDE24:
17546 +.LSFDE26:
17547 +       .long   .LEFDE26-.LASFDE26
17548 +.LASFDE26:
17549 +       .long   .Lframe0
17550 +       .long   .LFB915
17551 +       .long   .LFE915-.LFB915
17552 +       .align 4
17553 +.LEFDE26:
17554 +.LSFDE28:
17555 +       .long   .LEFDE28-.LASFDE28
17556 +.LASFDE28:
17557 +       .long   .Lframe0
17558 +       .long   .LFB908
17559 +       .long   .LFE908-.LFB908
17560 +       .align 4
17561 +.LEFDE28:
17562 +.LSFDE30:
17563 +       .long   .LEFDE30-.LASFDE30
17564 +.LASFDE30:
17565 +       .long   .Lframe0
17566 +       .long   .LFB907
17567 +       .long   .LFE907-.LFB907
17568 +       .byte   0x4
17569 +       .long   .LCFI25-.LFB907
17570 +       .byte   0xe
17571 +       .uleb128 0x8
17572 +       .byte   0x4
17573 +       .long   .LCFI26-.LCFI25
17574 +       .byte   0xe
17575 +       .uleb128 0xc
17576 +       .byte   0x4
17577 +       .long   .LCFI27-.LCFI26
17578 +       .byte   0xe
17579 +       .uleb128 0x10
17580 +       .byte   0x4
17581 +       .long   .LCFI28-.LCFI27
17582 +       .byte   0xe
17583 +       .uleb128 0x14
17584 +       .byte   0x83
17585 +       .uleb128 0x5
17586 +       .byte   0x86
17587 +       .uleb128 0x4
17588 +       .byte   0x87
17589 +       .uleb128 0x3
17590 +       .byte   0x85
17591 +       .uleb128 0x2
17592 +       .byte   0x4
17593 +       .long   .LCFI29-.LCFI28
17594 +       .byte   0xe
17595 +       .uleb128 0x18
17596 +       .align 4
17597 +.LEFDE30:
17598 +.LSFDE32:
17599 +       .long   .LEFDE32-.LASFDE32
17600 +.LASFDE32:
17601 +       .long   .Lframe0
17602 +       .long   .LFB905
17603 +       .long   .LFE905-.LFB905
17604 +       .align 4
17605 +.LEFDE32:
17606 +.LSFDE34:
17607 +       .long   .LEFDE34-.LASFDE34
17608 +.LASFDE34:
17609 +       .long   .Lframe0
17610 +       .long   .LFB892
17611 +       .long   .LFE892-.LFB892
17612 +       .byte   0x4
17613 +       .long   .LCFI30-.LFB892
17614 +       .byte   0xe
17615 +       .uleb128 0x8
17616 +       .byte   0x85
17617 +       .uleb128 0x2
17618 +       .byte   0x4
17619 +       .long   .LCFI31-.LCFI30
17620 +       .byte   0xe
17621 +       .uleb128 0xc
17622 +       .byte   0x87
17623 +       .uleb128 0x3
17624 +       .byte   0x4
17625 +       .long   .LCFI32-.LCFI31
17626 +       .byte   0xe
17627 +       .uleb128 0x10
17628 +       .byte   0x4
17629 +       .long   .LCFI33-.LCFI32
17630 +       .byte   0xe
17631 +       .uleb128 0x14
17632 +       .byte   0x83
17633 +       .uleb128 0x5
17634 +       .byte   0x86
17635 +       .uleb128 0x4
17636 +       .align 4
17637 +.LEFDE34:
17638 +.LSFDE36:
17639 +       .long   .LEFDE36-.LASFDE36
17640 +.LASFDE36:
17641 +       .long   .Lframe0
17642 +       .long   .LFB897
17643 +       .long   .LFE897-.LFB897
17644 +       .byte   0x4
17645 +       .long   .LCFI34-.LFB897
17646 +       .byte   0xe
17647 +       .uleb128 0x8
17648 +       .byte   0x86
17649 +       .uleb128 0x2
17650 +       .byte   0x4
17651 +       .long   .LCFI35-.LCFI34
17652 +       .byte   0xe
17653 +       .uleb128 0xc
17654 +       .byte   0x83
17655 +       .uleb128 0x3
17656 +       .byte   0x4
17657 +       .long   .LCFI36-.LCFI35
17658 +       .byte   0xe
17659 +       .uleb128 0x10
17660 +       .align 4
17661 +.LEFDE36:
17662 +.LSFDE38:
17663 +       .long   .LEFDE38-.LASFDE38
17664 +.LASFDE38:
17665 +       .long   .Lframe0
17666 +       .long   .LFB898
17667 +       .long   .LFE898-.LFB898
17668 +       .byte   0x4
17669 +       .long   .LCFI37-.LFB898
17670 +       .byte   0xe
17671 +       .uleb128 0x8
17672 +       .byte   0x83
17673 +       .uleb128 0x2
17674 +       .align 4
17675 +.LEFDE38:
17676 +.LSFDE40:
17677 +       .long   .LEFDE40-.LASFDE40
17678 +.LASFDE40:
17679 +       .long   .Lframe0
17680 +       .long   .LFB893
17681 +       .long   .LFE893-.LFB893
17682 +       .byte   0x4
17683 +       .long   .LCFI38-.LFB893
17684 +       .byte   0xe
17685 +       .uleb128 0x8
17686 +       .byte   0x85
17687 +       .uleb128 0x2
17688 +       .byte   0x4
17689 +       .long   .LCFI39-.LCFI38
17690 +       .byte   0xe
17691 +       .uleb128 0xc
17692 +       .byte   0x4
17693 +       .long   .LCFI40-.LCFI39
17694 +       .byte   0xe
17695 +       .uleb128 0x10
17696 +       .byte   0x4
17697 +       .long   .LCFI41-.LCFI40
17698 +       .byte   0xe
17699 +       .uleb128 0x14
17700 +       .byte   0x83
17701 +       .uleb128 0x5
17702 +       .byte   0x86
17703 +       .uleb128 0x4
17704 +       .byte   0x87
17705 +       .uleb128 0x3
17706 +       .byte   0x4
17707 +       .long   .LCFI42-.LCFI41
17708 +       .byte   0xe
17709 +       .uleb128 0x1c
17710 +       .align 4
17711 +.LEFDE40:
17712 +.LSFDE42:
17713 +       .long   .LEFDE42-.LASFDE42
17714 +.LASFDE42:
17715 +       .long   .Lframe0
17716 +       .long   .LFB916
17717 +       .long   .LFE916-.LFB916
17718 +       .byte   0x4
17719 +       .long   .LCFI43-.LFB916
17720 +       .byte   0xe
17721 +       .uleb128 0x8
17722 +       .byte   0x4
17723 +       .long   .LCFI44-.LCFI43
17724 +       .byte   0xe
17725 +       .uleb128 0xc
17726 +       .byte   0x83
17727 +       .uleb128 0x3
17728 +       .byte   0x86
17729 +       .uleb128 0x2
17730 +       .byte   0x4
17731 +       .long   .LCFI45-.LCFI44
17732 +       .byte   0xe
17733 +       .uleb128 0x2c
17734 +       .align 4
17735 +.LEFDE42:
17736 +.LSFDE44:
17737 +       .long   .LEFDE44-.LASFDE44
17738 +.LASFDE44:
17739 +       .long   .Lframe0
17740 +       .long   .LFB918
17741 +       .long   .LFE918-.LFB918
17742 +       .align 4
17743 +.LEFDE44:
17744 +.LSFDE46:
17745 +       .long   .LEFDE46-.LASFDE46
17746 +.LASFDE46:
17747 +       .long   .Lframe0
17748 +       .long   .LFB925
17749 +       .long   .LFE925-.LFB925
17750 +       .align 4
17751 +.LEFDE46:
17752 +.LSFDE48:
17753 +       .long   .LEFDE48-.LASFDE48
17754 +.LASFDE48:
17755 +       .long   .Lframe0
17756 +       .long   .LFB917
17757 +       .long   .LFE917-.LFB917
17758 +       .align 4
17759 +.LEFDE48:
17760 +.LSFDE50:
17761 +       .long   .LEFDE50-.LASFDE50
17762 +.LASFDE50:
17763 +       .long   .Lframe0
17764 +       .long   .LFB926
17765 +       .long   .LFE926-.LFB926
17766 +       .align 4
17767 +.LEFDE50:
17768 +.LSFDE52:
17769 +       .long   .LEFDE52-.LASFDE52
17770 +.LASFDE52:
17771 +       .long   .Lframe0
17772 +       .long   .LFB901
17773 +       .long   .LFE901-.LFB901
17774 +       .byte   0x4
17775 +       .long   .LCFI46-.LFB901
17776 +       .byte   0xe
17777 +       .uleb128 0x8
17778 +       .byte   0x87
17779 +       .uleb128 0x2
17780 +       .byte   0x4
17781 +       .long   .LCFI47-.LCFI46
17782 +       .byte   0xe
17783 +       .uleb128 0xc
17784 +       .byte   0x4
17785 +       .long   .LCFI48-.LCFI47
17786 +       .byte   0xe
17787 +       .uleb128 0x10
17788 +       .byte   0x83
17789 +       .uleb128 0x4
17790 +       .byte   0x86
17791 +       .uleb128 0x3
17792 +       .align 4
17793 +.LEFDE52:
17794 +.LSFDE54:
17795 +       .long   .LEFDE54-.LASFDE54
17796 +.LASFDE54:
17797 +       .long   .Lframe0
17798 +       .long   .LFB909
17799 +       .long   .LFE909-.LFB909
17800 +       .byte   0x4
17801 +       .long   .LCFI49-.LFB909
17802 +       .byte   0xe
17803 +       .uleb128 0x8
17804 +       .byte   0x4
17805 +       .long   .LCFI50-.LCFI49
17806 +       .byte   0xe
17807 +       .uleb128 0x30
17808 +       .byte   0x83
17809 +       .uleb128 0x2
17810 +       .align 4
17811 +.LEFDE54:
17812 +.LSFDE56:
17813 +       .long   .LEFDE56-.LASFDE56
17814 +.LASFDE56:
17815 +       .long   .Lframe0
17816 +       .long   .LFB910
17817 +       .long   .LFE910-.LFB910
17818 +       .align 4
17819 +.LEFDE56:
17820 +.LSFDE58:
17821 +       .long   .LEFDE58-.LASFDE58
17822 +.LASFDE58:
17823 +       .long   .Lframe0
17824 +       .long   .LFB911
17825 +       .long   .LFE911-.LFB911
17826 +       .align 4
17827 +.LEFDE58:
17828 +.LSFDE60:
17829 +       .long   .LEFDE60-.LASFDE60
17830 +.LASFDE60:
17831 +       .long   .Lframe0
17832 +       .long   .LFB912
17833 +       .long   .LFE912-.LFB912
17834 +       .align 4
17835 +.LEFDE60:
17836 +.LSFDE62:
17837 +       .long   .LEFDE62-.LASFDE62
17838 +.LASFDE62:
17839 +       .long   .Lframe0
17840 +       .long   .LFB913
17841 +       .long   .LFE913-.LFB913
17842 +       .align 4
17843 +.LEFDE62:
17844 +.LSFDE64:
17845 +       .long   .LEFDE64-.LASFDE64
17846 +.LASFDE64:
17847 +       .long   .Lframe0
17848 +       .long   .LFB914
17849 +       .long   .LFE914-.LFB914
17850 +       .align 4
17851 +.LEFDE64:
17852 +.LSFDE66:
17853 +       .long   .LEFDE66-.LASFDE66
17854 +.LASFDE66:
17855 +       .long   .Lframe0
17856 +       .long   .LFB919
17857 +       .long   .LFE919-.LFB919
17858 +       .align 4
17859 +.LEFDE66:
17860 +.LSFDE68:
17861 +       .long   .LEFDE68-.LASFDE68
17862 +.LASFDE68:
17863 +       .long   .Lframe0
17864 +       .long   .LFB895
17865 +       .long   .LFE895-.LFB895
17866 +       .align 4
17867 +.LEFDE68:
17868 +.LSFDE70:
17869 +       .long   .LEFDE70-.LASFDE70
17870 +.LASFDE70:
17871 +       .long   .Lframe0
17872 +       .long   .LFB896
17873 +       .long   .LFE896-.LFB896
17874 +       .byte   0x4
17875 +       .long   .LCFI51-.LFB896
17876 +       .byte   0xe
17877 +       .uleb128 0x8
17878 +       .byte   0x86
17879 +       .uleb128 0x2
17880 +       .byte   0x4
17881 +       .long   .LCFI52-.LCFI51
17882 +       .byte   0xe
17883 +       .uleb128 0xc
17884 +       .byte   0x83
17885 +       .uleb128 0x3
17886 +       .byte   0x4
17887 +       .long   .LCFI53-.LCFI52
17888 +       .byte   0xe
17889 +       .uleb128 0x10
17890 +       .align 4
17891 +.LEFDE70:
17892 +.LSFDE72:
17893 +       .long   .LEFDE72-.LASFDE72
17894 +.LASFDE72:
17895 +       .long   .Lframe0
17896 +       .long   .LFB894
17897 +       .long   .LFE894-.LFB894
17898 +       .byte   0x4
17899 +       .long   .LCFI54-.LFB894
17900 +       .byte   0xe
17901 +       .uleb128 0x8
17902 +       .byte   0x4
17903 +       .long   .LCFI55-.LCFI54
17904 +       .byte   0xe
17905 +       .uleb128 0xc
17906 +       .byte   0x86
17907 +       .uleb128 0x3
17908 +       .byte   0x87
17909 +       .uleb128 0x2
17910 +       .byte   0x4
17911 +       .long   .LCFI56-.LCFI55
17912 +       .byte   0xe
17913 +       .uleb128 0x10
17914 +       .byte   0x83
17915 +       .uleb128 0x4
17916 +       .align 4
17917 +.LEFDE72:
17918 +       .file 13 "include/linux/spinlock_types.h"
17919 +       .file 14 "include/asm/spinlock_types.h"
17920 +       .file 15 "include/linux/thread_info.h"
17921 +       .file 16 "include/asm/thread_info.h"
17922 +       .file 17 "include/linux/capability.h"
17923 +       .file 18 "include/asm/atomic.h"
17924 +       .file 19 "include/linux/cpumask.h"
17925 +       .file 20 "include/asm/page.h"
17926 +       .file 21 "include/linux/mm.h"
17927 +       .file 22 "include/linux/rbtree.h"
17928 +       .file 23 "include/linux/prio_tree.h"
17929 +       .file 24 "include/linux/mmzone.h"
17930 +       .file 25 "include/linux/mm_types.h"
17931 +       .file 26 "include/linux/fs.h"
17932 +       .file 27 "include/linux/futex.h"
17933 +       .file 28 "include/linux/types.h"
17934 +       .file 29 "include/asm/posix_types.h"
17935 +       .file 30 "include/asm/types.h"
17936 +       .file 31 "include/linux/time.h"
17937 +       .file 32 "include/linux/mutex.h"
17938 +       .file 33 "include/linux/rwsem.h"
17939 +       .file 34 "include/asm/rwsem.h"
17940 +       .file 35 "include/linux/fs_struct.h"
17941 +       .file 36 "include/linux/dcache.h"
17942 +       .file 37 "include/linux/rcupdate.h"
17943 +       .file 38 "include/linux/sysfs.h"
17944 +       .file 39 "include/linux/namei.h"
17945 +       .file 40 "include/asm/alternative.h"
17946 +       .file 41 "include/linux/module.h"
17947 +       .file 42 "include/linux/kobject.h"
17948 +       .file 43 "include/linux/kref.h"
17949 +       .file 44 "include/linux/wait.h"
17950 +       .file 45 "include/asm/uaccess.h"
17951 +       .file 46 "include/asm/module.h"
17952 +       .file 47 "include/asm-generic/bug.h"
17953 +       .file 48 "include/asm/local.h"
17954 +       .file 49 "include/asm-generic/atomic.h"
17955 +       .file 50 "include/linux/elf.h"
17956 +       .file 51 "include/linux/aio.h"
17957 +       .file 52 "include/linux/workqueue.h"
17958 +       .file 53 "include/linux/aio_abi.h"
17959 +       .file 54 "include/linux/uio.h"
17960 +       .file 55 "include/linux/nfs_fs_i.h"
17961 +       .file 56 "include/linux/kernel.h"
17962 +       .file 57 "include/linux/pid.h"
17963 +       .file 58 "include/linux/lockdep.h"
17964 +       .file 59 "include/linux/quota.h"
17965 +       .file 60 "include/linux/dqblk_xfs.h"
17966 +       .file 61 "include/asm/semaphore.h"
17967 +       .file 62 "include/linux/backing-dev.h"
17968 +       .file 63 "include/linux/dqblk_v1.h"
17969 +       .file 64 "include/linux/dqblk_v2.h"
17970 +       .file 65 "include/linux/stat.h"
17971 +       .file 66 "include/linux/radix-tree.h"
17972 +       .file 67 "include/asm/mmu.h"
17973 +       .file 68 "include/linux/completion.h"
17974 +       .file 69 "include/asm-generic/cputime.h"
17975 +       .file 70 "include/linux/signal.h"
17976 +       .file 71 "include/linux/sem.h"
17977 +       .file 72 "include/asm/math_emu.h"
17978 +       .file 73 "include/asm/vm86.h"
17979 +       .file 74 "include/asm/signal.h"
17980 +       .file 75 "include/linux/hrtimer.h"
17981 +       .file 76 "include/linux/ktime.h"
17982 +       .file 77 "include/linux/resource.h"
17983 +       .file 78 "include/asm-generic/signal.h"
17984 +       .file 79 "include/linux/seccomp.h"
17985 +       .file 80 "include/linux/plist.h"
17986 +       .file 81 "include/linux/swap.h"
17987 +       .file 82 "include/asm-generic/siginfo.h"
17988 +       .file 83 "include/linux/task_io_accounting.h"
17989 +       .file 84 "include/linux/slab.h"
17990 +       .file 85 "include/linux/notifier.h"
17991 +       .file 86 "include/linux/interrupt.h"
17992 +       .file 87 "include/linux/arrays.h"
17993 +       .file 88 "include/asm/percpu.h"
17994 +       .file 89 "include/asm/smp.h"
17995 +       .file 90 "include/linux/timex.h"
17996 +       .file 91 "include/linux/jiffies.h"
17997 +       .file 92 "include/linux/pm.h"
17998 +       .file 93 "include/linux/device.h"
17999 +       .file 94 "include/linux/klist.h"
18000 +       .file 95 "include/asm/device.h"
18001 +       .file 96 "include/asm/fixmap.h"
18002 +       .file 97 "include/asm/acpi.h"
18003 +       .file 98 "include/asm/io_apic.h"
18004 +       .file 99 "include/asm/genapic.h"
18005 +       .file 100 "include/asm/mpspec.h"
18006 +       .file 101 "include/asm/mpspec_def.h"
18007 +       .file 102 "include/linux/kernel_stat.h"
18008 +       .file 103 "include/asm/desc.h"
18009 +       .file 104 "include/asm/irq_regs.h"
18010 +       .file 105 "include/asm/ptrace.h"
18011 +       .file 106 "include/linux/irq.h"
18012 +       .file 107 "include/linux/irqreturn.h"
18013 +       .file 108 "include/linux/profile.h"
18014 +       .file 109 "include/linux/ioport.h"
18015 +       .file 110 "include/linux/vmstat.h"
18016 +       .text
18017 +.Letext0:
18018 +       .section        .debug_loc,"",@progbits
18019 +.Ldebug_loc0:
18020 +.LLST0:
18021 +       .long   .LFB883
18022 +       .long   .LCFI0
18023 +       .value  0x2
18024 +       .byte   0x74
18025 +       .sleb128 4
18026 +       .long   .LCFI0
18027 +       .long   .LCFI1
18028 +       .value  0x2
18029 +       .byte   0x74
18030 +       .sleb128 8
18031 +       .long   .LCFI1
18032 +       .long   .LCFI2
18033 +       .value  0x2
18034 +       .byte   0x74
18035 +       .sleb128 12
18036 +       .long   .LCFI2
18037 +       .long   .LCFI3
18038 +       .value  0x2
18039 +       .byte   0x74
18040 +       .sleb128 16
18041 +       .long   .LCFI3
18042 +       .long   .LFE883
18043 +       .value  0x2
18044 +       .byte   0x74
18045 +       .sleb128 20
18046 +       .long   0x0
18047 +       .long   0x0
18048 +.LLST1:
18049 +       .long   .LVL0
18050 +       .long   .LVL2
18051 +       .value  0x1
18052 +       .byte   0x50
18053 +       .long   .LVL2
18054 +       .long   .LVL3
18055 +       .value  0x1
18056 +       .byte   0x51
18057 +       .long   .LVL3
18058 +       .long   .LVL4
18059 +       .value  0x1
18060 +       .byte   0x53
18061 +       .long   .LVL4
18062 +       .long   .LVL5
18063 +       .value  0x1
18064 +       .byte   0x50
18065 +       .long   .LVL6
18066 +       .long   .LVL7
18067 +       .value  0x1
18068 +       .byte   0x50
18069 +       .long   .LVL7
18070 +       .long   .LVL8
18071 +       .value  0x1
18072 +       .byte   0x52
18073 +       .long   0x0
18074 +       .long   0x0
18075 +.LLST2:
18076 +       .long   .LVL0
18077 +       .long   .LVL1
18078 +       .value  0x1
18079 +       .byte   0x52
18080 +       .long   0x0
18081 +       .long   0x0
18082 +.LLST4:
18083 +       .long   .LVL11
18084 +       .long   .LVL12
18085 +       .value  0x1
18086 +       .byte   0x50
18087 +       .long   0x0
18088 +       .long   0x0
18089 +.LLST5:
18090 +       .long   .LVL11
18091 +       .long   .LVL13
18092 +       .value  0x1
18093 +       .byte   0x52
18094 +       .long   0x0
18095 +       .long   0x0
18096 +.LLST7:
18097 +       .long   .LVL14
18098 +       .long   .LVL16
18099 +       .value  0x1
18100 +       .byte   0x50
18101 +       .long   0x0
18102 +       .long   0x0
18103 +.LLST8:
18104 +       .long   .LVL15
18105 +       .long   .LVL16
18106 +       .value  0x1
18107 +       .byte   0x52
18108 +       .long   0x0
18109 +       .long   0x0
18110 +.LLST10:
18111 +       .long   .LVL17
18112 +       .long   .LVL19
18113 +       .value  0x1
18114 +       .byte   0x50
18115 +       .long   0x0
18116 +       .long   0x0
18117 +.LLST11:
18118 +       .long   .LVL18
18119 +       .long   .LVL19
18120 +       .value  0x1
18121 +       .byte   0x52
18122 +       .long   0x0
18123 +       .long   0x0
18124 +.LLST12:
18125 +       .long   .LFB888
18126 +       .long   .LCFI4
18127 +       .value  0x2
18128 +       .byte   0x74
18129 +       .sleb128 4
18130 +       .long   .LCFI4
18131 +       .long   .LCFI5
18132 +       .value  0x2
18133 +       .byte   0x74
18134 +       .sleb128 8
18135 +       .long   .LCFI5
18136 +       .long   .LFE888
18137 +       .value  0x2
18138 +       .byte   0x74
18139 +       .sleb128 12
18140 +       .long   0x0
18141 +       .long   0x0
18142 +.LLST13:
18143 +       .long   .LVL20
18144 +       .long   .LVL21
18145 +       .value  0x1
18146 +       .byte   0x50
18147 +       .long   .LVL21
18148 +       .long   .LVL35
18149 +       .value  0x1
18150 +       .byte   0x53
18151 +       .long   0x0
18152 +       .long   0x0
18153 +.LLST14:
18154 +       .long   .LVL21
18155 +       .long   .LVL29
18156 +       .value  0x1
18157 +       .byte   0x50
18158 +       .long   .LVL29
18159 +       .long   .LVL30
18160 +       .value  0x1
18161 +       .byte   0x50
18162 +       .long   .LVL32
18163 +       .long   .LVL34
18164 +       .value  0x1
18165 +       .byte   0x50
18166 +       .long   0x0
18167 +       .long   0x0
18168 +.LLST15:
18169 +       .long   .LVL22
18170 +       .long   .LVL23
18171 +       .value  0x1
18172 +       .byte   0x51
18173 +       .long   .LVL24
18174 +       .long   .LVL25
18175 +       .value  0x1
18176 +       .byte   0x51
18177 +       .long   .LVL26
18178 +       .long   .LVL27
18179 +       .value  0x1
18180 +       .byte   0x51
18181 +       .long   .LVL28
18182 +       .long   .LVL31
18183 +       .value  0x1
18184 +       .byte   0x51
18185 +       .long   .LVL32
18186 +       .long   .LVL33
18187 +       .value  0x1
18188 +       .byte   0x51
18189 +       .long   0x0
18190 +       .long   0x0
18191 +.LLST16:
18192 +       .long   .LVL23
18193 +       .long   .LVL24
18194 +       .value  0x1
18195 +       .byte   0x51
18196 +       .long   .LVL25
18197 +       .long   .LVL26
18198 +       .value  0x1
18199 +       .byte   0x51
18200 +       .long   .LVL27
18201 +       .long   .LVL28
18202 +       .value  0x1
18203 +       .byte   0x51
18204 +       .long   .LVL31
18205 +       .long   .LVL32
18206 +       .value  0x1
18207 +       .byte   0x51
18208 +       .long   .LVL33
18209 +       .long   .LFE888
18210 +       .value  0x1
18211 +       .byte   0x51
18212 +       .long   0x0
18213 +       .long   0x0
18214 +.LLST18:
18215 +       .long   .LFB890
18216 +       .long   .LCFI6
18217 +       .value  0x2
18218 +       .byte   0x74
18219 +       .sleb128 4
18220 +       .long   .LCFI6
18221 +       .long   .LFE890
18222 +       .value  0x2
18223 +       .byte   0x74
18224 +       .sleb128 8
18225 +       .long   0x0
18226 +       .long   0x0
18227 +.LLST19:
18228 +       .long   .LVL38
18229 +       .long   .LVL39
18230 +       .value  0x1
18231 +       .byte   0x50
18232 +       .long   .LVL39
18233 +       .long   .LVL40
18234 +       .value  0x1
18235 +       .byte   0x53
18236 +       .long   0x0
18237 +       .long   0x0
18238 +.LLST20:
18239 +       .long   .LFB899
18240 +       .long   .LCFI7
18241 +       .value  0x2
18242 +       .byte   0x74
18243 +       .sleb128 4
18244 +       .long   .LCFI7
18245 +       .long   .LCFI8
18246 +       .value  0x2
18247 +       .byte   0x74
18248 +       .sleb128 8
18249 +       .long   .LCFI8
18250 +       .long   .LCFI9
18251 +       .value  0x2
18252 +       .byte   0x74
18253 +       .sleb128 12
18254 +       .long   .LCFI9
18255 +       .long   .LCFI10
18256 +       .value  0x2
18257 +       .byte   0x74
18258 +       .sleb128 16
18259 +       .long   .LCFI10
18260 +       .long   .LFE899
18261 +       .value  0x2
18262 +       .byte   0x74
18263 +       .sleb128 24
18264 +       .long   0x0
18265 +       .long   0x0
18266 +.LLST21:
18267 +       .long   .LVL41
18268 +       .long   .LVL42
18269 +       .value  0x1
18270 +       .byte   0x50
18271 +       .long   .LVL42
18272 +       .long   .LVL55
18273 +       .value  0x1
18274 +       .byte   0x57
18275 +       .long   0x0
18276 +       .long   0x0
18277 +.LLST22:
18278 +       .long   .LVL41
18279 +       .long   .LVL43
18280 +       .value  0x1
18281 +       .byte   0x52
18282 +       .long   0x0
18283 +       .long   0x0
18284 +.LLST23:
18285 +       .long   .LVL41
18286 +       .long   .LVL47
18287 +       .value  0x1
18288 +       .byte   0x51
18289 +       .long   .LVL47
18290 +       .long   .LVL50
18291 +       .value  0x1
18292 +       .byte   0x56
18293 +       .long   .LVL50
18294 +       .long   .LVL51
18295 +       .value  0x1
18296 +       .byte   0x51
18297 +       .long   .LVL51
18298 +       .long   .LVL54
18299 +       .value  0x1
18300 +       .byte   0x56
18301 +       .long   0x0
18302 +       .long   0x0
18303 +.LLST24:
18304 +       .long   .LVL44
18305 +       .long   .LVL47
18306 +       .value  0x1
18307 +       .byte   0x52
18308 +       .long   .LVL48
18309 +       .long   .LFE899
18310 +       .value  0x1
18311 +       .byte   0x52
18312 +       .long   0x0
18313 +       .long   0x0
18314 +.LLST25:
18315 +       .long   .LVL45
18316 +       .long   .LVL53
18317 +       .value  0x1
18318 +       .byte   0x53
18319 +       .long   0x0
18320 +       .long   0x0
18321 +.LLST26:
18322 +       .long   .LVL42
18323 +       .long   .LVL46
18324 +       .value  0x1
18325 +       .byte   0x50
18326 +       .long   .LVL49
18327 +       .long   .LVL52
18328 +       .value  0x1
18329 +       .byte   0x50
18330 +       .long   0x0
18331 +       .long   0x0
18332 +.LLST27:
18333 +       .long   .LFB923
18334 +       .long   .LCFI11
18335 +       .value  0x2
18336 +       .byte   0x74
18337 +       .sleb128 4
18338 +       .long   .LCFI11
18339 +       .long   .LCFI12
18340 +       .value  0x2
18341 +       .byte   0x74
18342 +       .sleb128 8
18343 +       .long   .LCFI12
18344 +       .long   .LCFI13
18345 +       .value  0x2
18346 +       .byte   0x74
18347 +       .sleb128 12
18348 +       .long   .LCFI13
18349 +       .long   .LCFI14
18350 +       .value  0x2
18351 +       .byte   0x74
18352 +       .sleb128 16
18353 +       .long   .LCFI14
18354 +       .long   .LCFI15
18355 +       .value  0x2
18356 +       .byte   0x74
18357 +       .sleb128 20
18358 +       .long   .LCFI15
18359 +       .long   .LFE923
18360 +       .value  0x2
18361 +       .byte   0x74
18362 +       .sleb128 36
18363 +       .long   0x0
18364 +       .long   0x0
18365 +.LLST28:
18366 +       .long   .LVL56
18367 +       .long   .LVL58
18368 +       .value  0x1
18369 +       .byte   0x50
18370 +       .long   .LVL59
18371 +       .long   .LVL60
18372 +       .value  0x1
18373 +       .byte   0x50
18374 +       .long   .LVL68
18375 +       .long   .LVL72
18376 +       .value  0x1
18377 +       .byte   0x50
18378 +       .long   .LVL73
18379 +       .long   .LVL79
18380 +       .value  0x1
18381 +       .byte   0x50
18382 +       .long   0x0
18383 +       .long   0x0
18384 +.LLST29:
18385 +       .long   .LVL56
18386 +       .long   .LVL61
18387 +       .value  0x1
18388 +       .byte   0x52
18389 +       .long   .LVL68
18390 +       .long   .LVL71
18391 +       .value  0x1
18392 +       .byte   0x52
18393 +       .long   .LVL73
18394 +       .long   .LVL75
18395 +       .value  0x1
18396 +       .byte   0x52
18397 +       .long   .LVL87
18398 +       .long   .LFE923
18399 +       .value  0x1
18400 +       .byte   0x52
18401 +       .long   0x0
18402 +       .long   0x0
18403 +.LLST30:
18404 +       .long   .LVL56
18405 +       .long   .LVL57
18406 +       .value  0x1
18407 +       .byte   0x51
18408 +       .long   .LVL57
18409 +       .long   .LVL76
18410 +       .value  0x1
18411 +       .byte   0x56
18412 +       .long   .LVL76
18413 +       .long   .LVL78
18414 +       .value  0x1
18415 +       .byte   0x51
18416 +       .long   .LVL87
18417 +       .long   .LVL89
18418 +       .value  0x1
18419 +       .byte   0x56
18420 +       .long   .LVL89
18421 +       .long   .LFE923
18422 +       .value  0x1
18423 +       .byte   0x51
18424 +       .long   0x0
18425 +       .long   0x0
18426 +.LLST31:
18427 +       .long   .LVL74
18428 +       .long   .LVL83
18429 +       .value  0x1
18430 +       .byte   0x55
18431 +       .long   .LVL83
18432 +       .long   .LVL91
18433 +       .value  0x1
18434 +       .byte   0x51
18435 +       .long   0x0
18436 +       .long   0x0
18437 +.LLST32:
18438 +       .long   .LVL62
18439 +       .long   .LVL63
18440 +       .value  0x1
18441 +       .byte   0x57
18442 +       .long   .LVL64
18443 +       .long   .LVL68
18444 +       .value  0x1
18445 +       .byte   0x53
18446 +       .long   .LVL69
18447 +       .long   .LVL70
18448 +       .value  0x1
18449 +       .byte   0x53
18450 +       .long   .LVL73
18451 +       .long   .LVL84
18452 +       .value  0x1
18453 +       .byte   0x53
18454 +       .long   .LVL84
18455 +       .long   .LVL87
18456 +       .value  0x1
18457 +       .byte   0x52
18458 +       .long   .LVL87
18459 +       .long   .LVL88
18460 +       .value  0x1
18461 +       .byte   0x53
18462 +       .long   .LVL88
18463 +       .long   .LVL90
18464 +       .value  0x1
18465 +       .byte   0x57
18466 +       .long   .LVL90
18467 +       .long   .LFE923
18468 +       .value  0x1
18469 +       .byte   0x52
18470 +       .long   0x0
18471 +       .long   0x0
18472 +.LLST33:
18473 +       .long   .LVL67
18474 +       .long   .LVL68
18475 +       .value  0x1
18476 +       .byte   0x51
18477 +       .long   .LVL69
18478 +       .long   .LVL70
18479 +       .value  0x1
18480 +       .byte   0x51
18481 +       .long   .LVL73
18482 +       .long   .LVL78
18483 +       .value  0x1
18484 +       .byte   0x51
18485 +       .long   0x0
18486 +       .long   0x0
18487 +.LLST34:
18488 +       .long   .LVL66
18489 +       .long   .LVL68
18490 +       .value  0x1
18491 +       .byte   0x57
18492 +       .long   .LVL69
18493 +       .long   .LVL70
18494 +       .value  0x1
18495 +       .byte   0x57
18496 +       .long   .LVL73
18497 +       .long   .LVL77
18498 +       .value  0x1
18499 +       .byte   0x57
18500 +       .long   0x0
18501 +       .long   0x0
18502 +.LLST35:
18503 +       .long   .LVL79
18504 +       .long   .LVL80
18505 +       .value  0x1
18506 +       .byte   0x50
18507 +       .long   0x0
18508 +       .long   0x0
18509 +.LLST36:
18510 +       .long   .LVL80
18511 +       .long   .LVL81
18512 +       .value  0x1
18513 +       .byte   0x50
18514 +       .long   0x0
18515 +       .long   0x0
18516 +.LLST37:
18517 +       .long   .LVL81
18518 +       .long   .LVL82
18519 +       .value  0x1
18520 +       .byte   0x50
18521 +       .long   0x0
18522 +       .long   0x0
18523 +.LLST38:
18524 +       .long   .LVL82
18525 +       .long   .LVL85
18526 +       .value  0x1
18527 +       .byte   0x50
18528 +       .long   0x0
18529 +       .long   0x0
18530 +.LLST39:
18531 +       .long   .LVL85
18532 +       .long   .LVL86
18533 +       .value  0x1
18534 +       .byte   0x50
18535 +       .long   0x0
18536 +       .long   0x0
18537 +.LLST41:
18538 +       .long   .LVL94
18539 +       .long   .LVL95
18540 +       .value  0x1
18541 +       .byte   0x50
18542 +       .long   0x0
18543 +       .long   0x0
18544 +.LLST42:
18545 +       .long   .LVL92
18546 +       .long   .LVL93
18547 +       .value  0x1
18548 +       .byte   0x51
18549 +       .long   0x0
18550 +       .long   0x0
18551 +.LLST43:
18552 +       .long   .LFB904
18553 +       .long   .LCFI16
18554 +       .value  0x2
18555 +       .byte   0x74
18556 +       .sleb128 4
18557 +       .long   .LCFI16
18558 +       .long   .LCFI17
18559 +       .value  0x2
18560 +       .byte   0x74
18561 +       .sleb128 8
18562 +       .long   .LCFI17
18563 +       .long   .LCFI18
18564 +       .value  0x2
18565 +       .byte   0x74
18566 +       .sleb128 12
18567 +       .long   .LCFI18
18568 +       .long   .LCFI19
18569 +       .value  0x2
18570 +       .byte   0x74
18571 +       .sleb128 16
18572 +       .long   .LCFI19
18573 +       .long   .LFE904
18574 +       .value  0x2
18575 +       .byte   0x74
18576 +       .sleb128 40
18577 +       .long   0x0
18578 +       .long   0x0
18579 +.LLST44:
18580 +       .long   .LVL96
18581 +       .long   .LVL97
18582 +       .value  0x1
18583 +       .byte   0x50
18584 +       .long   0x0
18585 +       .long   0x0
18586 +.LLST45:
18587 +       .long   .LVL99
18588 +       .long   .LVL114
18589 +       .value  0x1
18590 +       .byte   0x56
18591 +       .long   0x0
18592 +       .long   0x0
18593 +.LLST46:
18594 +       .long   .LVL98
18595 +       .long   .LVL100
18596 +       .value  0x1
18597 +       .byte   0x52
18598 +       .long   0x0
18599 +       .long   0x0
18600 +.LLST47:
18601 +       .long   .LVL102
18602 +       .long   .LVL105
18603 +       .value  0x1
18604 +       .byte   0x53
18605 +       .long   0x0
18606 +       .long   0x0
18607 +.LLST48:
18608 +       .long   .LVL101
18609 +       .long   .LVL103
18610 +       .value  0x1
18611 +       .byte   0x52
18612 +       .long   .LVL104
18613 +       .long   .LVL108
18614 +       .value  0x1
18615 +       .byte   0x52
18616 +       .long   .LVL112
18617 +       .long   .LFE904
18618 +       .value  0x1
18619 +       .byte   0x52
18620 +       .long   0x0
18621 +       .long   0x0
18622 +.LLST49:
18623 +       .long   .LVL101
18624 +       .long   .LVL115
18625 +       .value  0x1
18626 +       .byte   0x57
18627 +       .long   0x0
18628 +       .long   0x0
18629 +.LLST50:
18630 +       .long   .LVL106
18631 +       .long   .LVL111
18632 +       .value  0x1
18633 +       .byte   0x50
18634 +       .long   0x0
18635 +       .long   0x0
18636 +.LLST51:
18637 +       .long   .LVL107
18638 +       .long   .LVL111
18639 +       .value  0x1
18640 +       .byte   0x51
18641 +       .long   0x0
18642 +       .long   0x0
18643 +.LLST52:
18644 +       .long   .LVL108
18645 +       .long   .LVL109
18646 +       .value  0x1
18647 +       .byte   0x52
18648 +       .long   0x0
18649 +       .long   0x0
18650 +.LLST53:
18651 +       .long   .LVL110
18652 +       .long   .LVL113
18653 +       .value  0x1
18654 +       .byte   0x53
18655 +       .long   0x0
18656 +       .long   0x0
18657 +.LLST54:
18658 +       .long   .LFB920
18659 +       .long   .LCFI20
18660 +       .value  0x2
18661 +       .byte   0x74
18662 +       .sleb128 4
18663 +       .long   .LCFI20
18664 +       .long   .LCFI21
18665 +       .value  0x2
18666 +       .byte   0x74
18667 +       .sleb128 8
18668 +       .long   .LCFI21
18669 +       .long   .LCFI22
18670 +       .value  0x2
18671 +       .byte   0x74
18672 +       .sleb128 12
18673 +       .long   .LCFI22
18674 +       .long   .LFE920
18675 +       .value  0x2
18676 +       .byte   0x74
18677 +       .sleb128 20
18678 +       .long   0x0
18679 +       .long   0x0
18680 +.LLST55:
18681 +       .long   .LVL116
18682 +       .long   .LVL117
18683 +       .value  0x1
18684 +       .byte   0x50
18685 +       .long   .LVL117
18686 +       .long   .LVL132
18687 +       .value  0x1
18688 +       .byte   0x53
18689 +       .long   0x0
18690 +       .long   0x0
18691 +.LLST56:
18692 +       .long   .LVL123
18693 +       .long   .LVL126
18694 +       .value  0x1
18695 +       .byte   0x52
18696 +       .long   .LVL126
18697 +       .long   .LVL129
18698 +       .value  0x1
18699 +       .byte   0x57
18700 +       .long   0x0
18701 +       .long   0x0
18702 +.LLST57:
18703 +       .long   .LVL124
18704 +       .long   .LVL130
18705 +       .value  0x1
18706 +       .byte   0x50
18707 +       .long   0x0
18708 +       .long   0x0
18709 +.LLST58:
18710 +       .long   .LVL119
18711 +       .long   .LVL122
18712 +       .value  0x1
18713 +       .byte   0x51
18714 +       .long   0x0
18715 +       .long   0x0
18716 +.LLST59:
18717 +       .long   .LVL118
18718 +       .long   .LVL121
18719 +       .value  0x1
18720 +       .byte   0x57
18721 +       .long   0x0
18722 +       .long   0x0
18723 +.LLST60:
18724 +       .long   .LVL120
18725 +       .long   .LVL127
18726 +       .value  0x1
18727 +       .byte   0x57
18728 +       .long   .LVL128
18729 +       .long   .LVL129
18730 +       .value  0x1
18731 +       .byte   0x57
18732 +       .long   0x0
18733 +       .long   0x0
18734 +.LLST61:
18735 +       .long   .LFB921
18736 +       .long   .LCFI23
18737 +       .value  0x2
18738 +       .byte   0x74
18739 +       .sleb128 4
18740 +       .long   .LCFI23
18741 +       .long   .LCFI24
18742 +       .value  0x2
18743 +       .byte   0x74
18744 +       .sleb128 8
18745 +       .long   .LCFI24
18746 +       .long   .LFE921
18747 +       .value  0x3
18748 +       .byte   0x74
18749 +       .sleb128 72
18750 +       .long   0x0
18751 +       .long   0x0
18752 +.LLST63:
18753 +       .long   .LVL134
18754 +       .long   .LVL135
18755 +       .value  0x1
18756 +       .byte   0x50
18757 +       .long   0x0
18758 +       .long   0x0
18759 +.LLST65:
18760 +       .long   .LFB907
18761 +       .long   .LCFI25
18762 +       .value  0x2
18763 +       .byte   0x74
18764 +       .sleb128 4
18765 +       .long   .LCFI25
18766 +       .long   .LCFI26
18767 +       .value  0x2
18768 +       .byte   0x74
18769 +       .sleb128 8
18770 +       .long   .LCFI26
18771 +       .long   .LCFI27
18772 +       .value  0x2
18773 +       .byte   0x74
18774 +       .sleb128 12
18775 +       .long   .LCFI27
18776 +       .long   .LCFI28
18777 +       .value  0x2
18778 +       .byte   0x74
18779 +       .sleb128 16
18780 +       .long   .LCFI28
18781 +       .long   .LCFI29
18782 +       .value  0x2
18783 +       .byte   0x74
18784 +       .sleb128 20
18785 +       .long   .LCFI29
18786 +       .long   .LFE907
18787 +       .value  0x2
18788 +       .byte   0x74
18789 +       .sleb128 24
18790 +       .long   0x0
18791 +       .long   0x0
18792 +.LLST66:
18793 +       .long   .LVL137
18794 +       .long   .LVL138
18795 +       .value  0x1
18796 +       .byte   0x50
18797 +       .long   .LVL138
18798 +       .long   .LVL139
18799 +       .value  0x6
18800 +       .byte   0x50
18801 +       .byte   0x93
18802 +       .uleb128 0x4
18803 +       .byte   0x52
18804 +       .byte   0x93
18805 +       .uleb128 0x4
18806 +       .long   .LVL139
18807 +       .long   .LVL140
18808 +       .value  0x1
18809 +       .byte   0x53
18810 +       .long   .LVL143
18811 +       .long   .LVL144
18812 +       .value  0x1
18813 +       .byte   0x53
18814 +       .long   0x0
18815 +       .long   0x0
18816 +.LLST67:
18817 +       .long   .LVL141
18818 +       .long   .LVL142
18819 +       .value  0x1
18820 +       .byte   0x50
18821 +       .long   0x0
18822 +       .long   0x0
18823 +.LLST69:
18824 +       .long   .LFB892
18825 +       .long   .LCFI30
18826 +       .value  0x2
18827 +       .byte   0x74
18828 +       .sleb128 4
18829 +       .long   .LCFI30
18830 +       .long   .LCFI31
18831 +       .value  0x2
18832 +       .byte   0x74
18833 +       .sleb128 8
18834 +       .long   .LCFI31
18835 +       .long   .LCFI32
18836 +       .value  0x2
18837 +       .byte   0x74
18838 +       .sleb128 12
18839 +       .long   .LCFI32
18840 +       .long   .LCFI33
18841 +       .value  0x2
18842 +       .byte   0x74
18843 +       .sleb128 16
18844 +       .long   .LCFI33
18845 +       .long   .LFE892
18846 +       .value  0x2
18847 +       .byte   0x74
18848 +       .sleb128 20
18849 +       .long   0x0
18850 +       .long   0x0
18851 +.LLST70:
18852 +       .long   .LVL145
18853 +       .long   .LVL146
18854 +       .value  0x1
18855 +       .byte   0x50
18856 +       .long   .LVL146
18857 +       .long   .LVL151
18858 +       .value  0x1
18859 +       .byte   0x57
18860 +       .long   0x0
18861 +       .long   0x0
18862 +.LLST71:
18863 +       .long   .LVL145
18864 +       .long   .LVL146
18865 +       .value  0x1
18866 +       .byte   0x52
18867 +       .long   .LVL146
18868 +       .long   .LVL152
18869 +       .value  0x1
18870 +       .byte   0x55
18871 +       .long   0x0
18872 +       .long   0x0
18873 +.LLST72:
18874 +       .long   .LVL146
18875 +       .long   .LVL150
18876 +       .value  0x1
18877 +       .byte   0x56
18878 +       .long   0x0
18879 +       .long   0x0
18880 +.LLST73:
18881 +       .long   .LVL146
18882 +       .long   .LVL149
18883 +       .value  0x1
18884 +       .byte   0x53
18885 +       .long   0x0
18886 +       .long   0x0
18887 +.LLST74:
18888 +       .long   .LFB897
18889 +       .long   .LCFI34
18890 +       .value  0x2
18891 +       .byte   0x74
18892 +       .sleb128 4
18893 +       .long   .LCFI34
18894 +       .long   .LCFI35
18895 +       .value  0x2
18896 +       .byte   0x74
18897 +       .sleb128 8
18898 +       .long   .LCFI35
18899 +       .long   .LCFI36
18900 +       .value  0x2
18901 +       .byte   0x74
18902 +       .sleb128 12
18903 +       .long   .LCFI36
18904 +       .long   .LFE897
18905 +       .value  0x2
18906 +       .byte   0x74
18907 +       .sleb128 16
18908 +       .long   0x0
18909 +       .long   0x0
18910 +.LLST75:
18911 +       .long   .LVL153
18912 +       .long   .LVL155
18913 +       .value  0x1
18914 +       .byte   0x50
18915 +       .long   .LVL155
18916 +       .long   .LVL167
18917 +       .value  0x1
18918 +       .byte   0x53
18919 +       .long   0x0
18920 +       .long   0x0
18921 +.LLST76:
18922 +       .long   .LVL156
18923 +       .long   .LVL157
18924 +       .value  0x1
18925 +       .byte   0x50
18926 +       .long   .LVL158
18927 +       .long   .LVL166
18928 +       .value  0x1
18929 +       .byte   0x51
18930 +       .long   0x0
18931 +       .long   0x0
18932 +.LLST77:
18933 +       .long   .LVL154
18934 +       .long   .LVL160
18935 +       .value  0x1
18936 +       .byte   0x56
18937 +       .long   .LVL162
18938 +       .long   .LVL168
18939 +       .value  0x1
18940 +       .byte   0x56
18941 +       .long   0x0
18942 +       .long   0x0
18943 +.LLST78:
18944 +       .long   .LVL159
18945 +       .long   .LVL164
18946 +       .value  0x1
18947 +       .byte   0x52
18948 +       .long   0x0
18949 +       .long   0x0
18950 +.LLST79:
18951 +       .long   .LVL161
18952 +       .long   .LVL165
18953 +       .value  0x1
18954 +       .byte   0x50
18955 +       .long   0x0
18956 +       .long   0x0
18957 +.LLST80:
18958 +       .long   .LFB898
18959 +       .long   .LCFI37
18960 +       .value  0x2
18961 +       .byte   0x74
18962 +       .sleb128 4
18963 +       .long   .LCFI37
18964 +       .long   .LFE898
18965 +       .value  0x2
18966 +       .byte   0x74
18967 +       .sleb128 8
18968 +       .long   0x0
18969 +       .long   0x0
18970 +.LLST81:
18971 +       .long   .LVL169
18972 +       .long   .LVL170
18973 +       .value  0x1
18974 +       .byte   0x50
18975 +       .long   .LVL170
18976 +       .long   .LVL173
18977 +       .value  0x1
18978 +       .byte   0x53
18979 +       .long   0x0
18980 +       .long   0x0
18981 +.LLST82:
18982 +       .long   .LVL170
18983 +       .long   .LVL171
18984 +       .value  0x1
18985 +       .byte   0x50
18986 +       .long   .LVL172
18987 +       .long   .LVL173
18988 +       .value  0x1
18989 +       .byte   0x50
18990 +       .long   0x0
18991 +       .long   0x0
18992 +.LLST83:
18993 +       .long   .LFB893
18994 +       .long   .LCFI38
18995 +       .value  0x2
18996 +       .byte   0x74
18997 +       .sleb128 4
18998 +       .long   .LCFI38
18999 +       .long   .LCFI39
19000 +       .value  0x2
19001 +       .byte   0x74
19002 +       .sleb128 8
19003 +       .long   .LCFI39
19004 +       .long   .LCFI40
19005 +       .value  0x2
19006 +       .byte   0x74
19007 +       .sleb128 12
19008 +       .long   .LCFI40
19009 +       .long   .LCFI41
19010 +       .value  0x2
19011 +       .byte   0x74
19012 +       .sleb128 16
19013 +       .long   .LCFI41
19014 +       .long   .LCFI42
19015 +       .value  0x2
19016 +       .byte   0x74
19017 +       .sleb128 20
19018 +       .long   .LCFI42
19019 +       .long   .LFE893
19020 +       .value  0x2
19021 +       .byte   0x74
19022 +       .sleb128 28
19023 +       .long   0x0
19024 +       .long   0x0
19025 +.LLST84:
19026 +       .long   .LVL174
19027 +       .long   .LVL176
19028 +       .value  0x1
19029 +       .byte   0x50
19030 +       .long   .LVL176
19031 +       .long   .LVL190
19032 +       .value  0x1
19033 +       .byte   0x53
19034 +       .long   0x0
19035 +       .long   0x0
19036 +.LLST85:
19037 +       .long   .LVL174
19038 +       .long   .LVL175
19039 +       .value  0x1
19040 +       .byte   0x52
19041 +       .long   .LVL175
19042 +       .long   .LVL191
19043 +       .value  0x1
19044 +       .byte   0x55
19045 +       .long   0x0
19046 +       .long   0x0
19047 +.LLST86:
19048 +       .long   .LVL177
19049 +       .long   .LVL188
19050 +       .value  0x1
19051 +       .byte   0x56
19052 +       .long   0x0
19053 +       .long   0x0
19054 +.LLST87:
19055 +       .long   .LVL183
19056 +       .long   .LVL189
19057 +       .value  0x1
19058 +       .byte   0x57
19059 +       .long   0x0
19060 +       .long   0x0
19061 +.LLST88:
19062 +       .long   .LVL180
19063 +       .long   .LVL181
19064 +       .value  0x1
19065 +       .byte   0x52
19066 +       .long   0x0
19067 +       .long   0x0
19068 +.LLST89:
19069 +       .long   .LVL179
19070 +       .long   .LVL182
19071 +       .value  0x1
19072 +       .byte   0x50
19073 +       .long   0x0
19074 +       .long   0x0
19075 +.LLST90:
19076 +       .long   .LVL181
19077 +       .long   .LVL184
19078 +       .value  0x1
19079 +       .byte   0x52
19080 +       .long   .LVL185
19081 +       .long   .LVL186
19082 +       .value  0x1
19083 +       .byte   0x52
19084 +       .long   0x0
19085 +       .long   0x0
19086 +.LLST91:
19087 +       .long   .LFB916
19088 +       .long   .LCFI43
19089 +       .value  0x2
19090 +       .byte   0x74
19091 +       .sleb128 4
19092 +       .long   .LCFI43
19093 +       .long   .LCFI44
19094 +       .value  0x2
19095 +       .byte   0x74
19096 +       .sleb128 8
19097 +       .long   .LCFI44
19098 +       .long   .LCFI45
19099 +       .value  0x2
19100 +       .byte   0x74
19101 +       .sleb128 12
19102 +       .long   .LCFI45
19103 +       .long   .LFE916
19104 +       .value  0x2
19105 +       .byte   0x74
19106 +       .sleb128 44
19107 +       .long   0x0
19108 +       .long   0x0
19109 +.LLST92:
19110 +       .long   .LVL192
19111 +       .long   .LVL193
19112 +       .value  0x1
19113 +       .byte   0x50
19114 +       .long   .LVL193
19115 +       .long   .LVL194
19116 +       .value  0x1
19117 +       .byte   0x53
19118 +       .long   .LVL194
19119 +       .long   .LVL195
19120 +       .value  0x1
19121 +       .byte   0x50
19122 +       .long   .LVL195
19123 +       .long   .LVL197
19124 +       .value  0x1
19125 +       .byte   0x53
19126 +       .long   .LVL197
19127 +       .long   .LVL199
19128 +       .value  0x1
19129 +       .byte   0x50
19130 +       .long   .LVL201
19131 +       .long   .LVL204
19132 +       .value  0x1
19133 +       .byte   0x53
19134 +       .long   0x0
19135 +       .long   0x0
19136 +.LLST93:
19137 +       .long   .LVL198
19138 +       .long   .LVL205
19139 +       .value  0x1
19140 +       .byte   0x56
19141 +       .long   0x0
19142 +       .long   0x0
19143 +.LLST94:
19144 +       .long   .LVL196
19145 +       .long   .LVL197
19146 +       .value  0x1
19147 +       .byte   0x50
19148 +       .long   .LVL202
19149 +       .long   .LVL203
19150 +       .value  0x1
19151 +       .byte   0x50
19152 +       .long   0x0
19153 +       .long   0x0
19154 +.LLST95:
19155 +       .long   .LVL199
19156 +       .long   .LVL200
19157 +       .value  0x1
19158 +       .byte   0x50
19159 +       .long   0x0
19160 +       .long   0x0
19161 +.LLST97:
19162 +       .long   .LVL206
19163 +       .long   .LVL208
19164 +       .value  0x1
19165 +       .byte   0x50
19166 +       .long   0x0
19167 +       .long   0x0
19168 +.LLST98:
19169 +       .long   .LVL207
19170 +       .long   .LVL208
19171 +       .value  0x1
19172 +       .byte   0x52
19173 +       .long   0x0
19174 +       .long   0x0
19175 +.LLST100:
19176 +       .long   .LVL209
19177 +       .long   .LVL210
19178 +       .value  0x1
19179 +       .byte   0x50
19180 +       .long   0x0
19181 +       .long   0x0
19182 +.LLST101:
19183 +       .long   .LVL211
19184 +       .long   .LVL212
19185 +       .value  0x1
19186 +       .byte   0x50
19187 +       .long   .LVL212
19188 +       .long   .LFE925
19189 +       .value  0x1
19190 +       .byte   0x50
19191 +       .long   0x0
19192 +       .long   0x0
19193 +.LLST103:
19194 +       .long   .LVL213
19195 +       .long   .LVL215
19196 +       .value  0x1
19197 +       .byte   0x50
19198 +       .long   0x0
19199 +       .long   0x0
19200 +.LLST104:
19201 +       .long   .LVL214
19202 +       .long   .LVL215
19203 +       .value  0x1
19204 +       .byte   0x52
19205 +       .long   0x0
19206 +       .long   0x0
19207 +.LLST106:
19208 +       .long   .LVL216
19209 +       .long   .LVL217
19210 +       .value  0x1
19211 +       .byte   0x50
19212 +       .long   0x0
19213 +       .long   0x0
19214 +.LLST107:
19215 +       .long   .LVL218
19216 +       .long   .LVL219
19217 +       .value  0x1
19218 +       .byte   0x52
19219 +       .long   .LVL220
19220 +       .long   .LVL223
19221 +       .value  0x1
19222 +       .byte   0x52
19223 +       .long   0x0
19224 +       .long   0x0
19225 +.LLST108:
19226 +       .long   .LVL221
19227 +       .long   .LVL222
19228 +       .value  0x1
19229 +       .byte   0x50
19230 +       .long   0x0
19231 +       .long   0x0
19232 +.LLST109:
19233 +       .long   .LFB901
19234 +       .long   .LCFI46
19235 +       .value  0x2
19236 +       .byte   0x74
19237 +       .sleb128 4
19238 +       .long   .LCFI46
19239 +       .long   .LCFI47
19240 +       .value  0x2
19241 +       .byte   0x74
19242 +       .sleb128 8
19243 +       .long   .LCFI47
19244 +       .long   .LCFI48
19245 +       .value  0x2
19246 +       .byte   0x74
19247 +       .sleb128 12
19248 +       .long   .LCFI48
19249 +       .long   .LFE901
19250 +       .value  0x2
19251 +       .byte   0x74
19252 +       .sleb128 16
19253 +       .long   0x0
19254 +       .long   0x0
19255 +.LLST110:
19256 +       .long   .LVL224
19257 +       .long   .LVL227
19258 +       .value  0x1
19259 +       .byte   0x50
19260 +       .long   .LVL227
19261 +       .long   .LVL228
19262 +       .value  0x1
19263 +       .byte   0x57
19264 +       .long   .LVL228
19265 +       .long   .LVL229
19266 +       .value  0x1
19267 +       .byte   0x50
19268 +       .long   .LVL229
19269 +       .long   .LVL232
19270 +       .value  0x1
19271 +       .byte   0x57
19272 +       .long   0x0
19273 +       .long   0x0
19274 +.LLST111:
19275 +       .long   .LVL226
19276 +       .long   .LVL230
19277 +       .value  0x1
19278 +       .byte   0x53
19279 +       .long   .LVL230
19280 +       .long   .LVL233
19281 +       .value  0x1
19282 +       .byte   0x50
19283 +       .long   0x0
19284 +       .long   0x0
19285 +.LLST112:
19286 +       .long   .LVL225
19287 +       .long   .LVL231
19288 +       .value  0x1
19289 +       .byte   0x56
19290 +       .long   0x0
19291 +       .long   0x0
19292 +.LLST113:
19293 +       .long   .LFB909
19294 +       .long   .LCFI49
19295 +       .value  0x2
19296 +       .byte   0x74
19297 +       .sleb128 4
19298 +       .long   .LCFI49
19299 +       .long   .LCFI50
19300 +       .value  0x2
19301 +       .byte   0x74
19302 +       .sleb128 8
19303 +       .long   .LCFI50
19304 +       .long   .LFE909
19305 +       .value  0x2
19306 +       .byte   0x74
19307 +       .sleb128 48
19308 +       .long   0x0
19309 +       .long   0x0
19310 +.LLST114:
19311 +       .long   .LVL235
19312 +       .long   .LVL237
19313 +       .value  0x1
19314 +       .byte   0x51
19315 +       .long   0x0
19316 +       .long   0x0
19317 +.LLST115:
19318 +       .long   .LVL234
19319 +       .long   .LVL236
19320 +       .value  0x1
19321 +       .byte   0x52
19322 +       .long   0x0
19323 +       .long   0x0
19324 +.LLST116:
19325 +       .long   .LVL238
19326 +       .long   .LVL239
19327 +       .value  0x1
19328 +       .byte   0x50
19329 +       .long   0x0
19330 +       .long   0x0
19331 +.LLST118:
19332 +       .long   .LVL240
19333 +       .long   .LVL241
19334 +       .value  0x1
19335 +       .byte   0x50
19336 +       .long   0x0
19337 +       .long   0x0
19338 +.LLST120:
19339 +       .long   .LVL242
19340 +       .long   .LVL243
19341 +       .value  0x1
19342 +       .byte   0x50
19343 +       .long   0x0
19344 +       .long   0x0
19345 +.LLST122:
19346 +       .long   .LVL244
19347 +       .long   .LVL245
19348 +       .value  0x1
19349 +       .byte   0x50
19350 +       .long   0x0
19351 +       .long   0x0
19352 +.LLST124:
19353 +       .long   .LVL246
19354 +       .long   .LVL247
19355 +       .value  0x1
19356 +       .byte   0x50
19357 +       .long   0x0
19358 +       .long   0x0
19359 +.LLST126:
19360 +       .long   .LVL248
19361 +       .long   .LVL249
19362 +       .value  0x1
19363 +       .byte   0x50
19364 +       .long   0x0
19365 +       .long   0x0
19366 +.LLST128:
19367 +       .long   .LVL250
19368 +       .long   .LVL251
19369 +       .value  0x1
19370 +       .byte   0x50
19371 +       .long   0x0
19372 +       .long   0x0
19373 +.LLST130:
19374 +       .long   .LVL252
19375 +       .long   .LVL253
19376 +       .value  0x1
19377 +       .byte   0x50
19378 +       .long   .LVL253
19379 +       .long   .LVL254
19380 +       .value  0x1
19381 +       .byte   0x50
19382 +       .long   0x0
19383 +       .long   0x0
19384 +.LLST131:
19385 +       .long   .LVL252
19386 +       .long   .LVL253
19387 +       .value  0x1
19388 +       .byte   0x52
19389 +       .long   .LVL253
19390 +       .long   .LFE895
19391 +       .value  0x1
19392 +       .byte   0x52
19393 +       .long   0x0
19394 +       .long   0x0
19395 +.LLST132:
19396 +       .long   .LFB896
19397 +       .long   .LCFI51
19398 +       .value  0x2
19399 +       .byte   0x74
19400 +       .sleb128 4
19401 +       .long   .LCFI51
19402 +       .long   .LCFI52
19403 +       .value  0x2
19404 +       .byte   0x74
19405 +       .sleb128 8
19406 +       .long   .LCFI52
19407 +       .long   .LCFI53
19408 +       .value  0x2
19409 +       .byte   0x74
19410 +       .sleb128 12
19411 +       .long   .LCFI53
19412 +       .long   .LFE896
19413 +       .value  0x2
19414 +       .byte   0x74
19415 +       .sleb128 16
19416 +       .long   0x0
19417 +       .long   0x0
19418 +.LLST133:
19419 +       .long   .LVL255
19420 +       .long   .LVL257
19421 +       .value  0x1
19422 +       .byte   0x50
19423 +       .long   .LVL257
19424 +       .long   .LVL267
19425 +       .value  0x1
19426 +       .byte   0x53
19427 +       .long   0x0
19428 +       .long   0x0
19429 +.LLST134:
19430 +       .long   .LVL258
19431 +       .long   .LVL266
19432 +       .value  0x1
19433 +       .byte   0x51
19434 +       .long   0x0
19435 +       .long   0x0
19436 +.LLST135:
19437 +       .long   .LVL256
19438 +       .long   .LVL260
19439 +       .value  0x1
19440 +       .byte   0x56
19441 +       .long   .LVL262
19442 +       .long   .LVL268
19443 +       .value  0x1
19444 +       .byte   0x56
19445 +       .long   0x0
19446 +       .long   0x0
19447 +.LLST136:
19448 +       .long   .LVL259
19449 +       .long   .LVL264
19450 +       .value  0x1
19451 +       .byte   0x52
19452 +       .long   0x0
19453 +       .long   0x0
19454 +.LLST137:
19455 +       .long   .LVL261
19456 +       .long   .LVL265
19457 +       .value  0x1
19458 +       .byte   0x50
19459 +       .long   0x0
19460 +       .long   0x0
19461 +.LLST138:
19462 +       .long   .LFB894
19463 +       .long   .LCFI54
19464 +       .value  0x2
19465 +       .byte   0x74
19466 +       .sleb128 4
19467 +       .long   .LCFI54
19468 +       .long   .LCFI55
19469 +       .value  0x2
19470 +       .byte   0x74
19471 +       .sleb128 8
19472 +       .long   .LCFI55
19473 +       .long   .LCFI56
19474 +       .value  0x2
19475 +       .byte   0x74
19476 +       .sleb128 12
19477 +       .long   .LCFI56
19478 +       .long   .LFE894
19479 +       .value  0x2
19480 +       .byte   0x74
19481 +       .sleb128 16
19482 +       .long   0x0
19483 +       .long   0x0
19484 +.LLST139:
19485 +       .long   .LVL269
19486 +       .long   .LVL271
19487 +       .value  0x1
19488 +       .byte   0x50
19489 +       .long   .LVL271
19490 +       .long   .LVL275
19491 +       .value  0x1
19492 +       .byte   0x56
19493 +       .long   0x0
19494 +       .long   0x0
19495 +.LLST140:
19496 +       .long   .LVL269
19497 +       .long   .LVL270
19498 +       .value  0x1
19499 +       .byte   0x52
19500 +       .long   0x0
19501 +       .long   0x0
19502 +.LLST141:
19503 +       .long   .LVL272
19504 +       .long   .LVL276
19505 +       .value  0x1
19506 +       .byte   0x57
19507 +       .long   0x0
19508 +       .long   0x0
19509 +.LLST142:
19510 +       .long   .LVL273
19511 +       .long   .LVL274
19512 +       .value  0x1
19513 +       .byte   0x53
19514 +       .long   .LVL274
19515 +       .long   .LVL277
19516 +       .value  0x1
19517 +       .byte   0x52
19518 +       .long   0x0
19519 +       .long   0x0
19520 +       .section        .debug_info
19521 +       .long   0xaa89
19522 +       .value  0x2
19523 +       .long   .Ldebug_abbrev0
19524 +       .byte   0x4
19525 +       .uleb128 0x1
19526 +       .long   .Ldebug_line0
19527 +       .long   0x0
19528 +       .long   .LASF1718
19529 +       .byte   0x1
19530 +       .long   .LASF1719
19531 +       .long   .LASF1720
19532 +       .uleb128 0x2
19533 +       .string "int"
19534 +       .byte   0x4
19535 +       .byte   0x5
19536 +       .uleb128 0x3
19537 +       .long   .LASF0
19538 +       .byte   0x4
19539 +       .byte   0x7
19540 +       .uleb128 0x3
19541 +       .long   .LASF1
19542 +       .byte   0x4
19543 +       .byte   0x7
19544 +       .uleb128 0x4
19545 +       .byte   0x4
19546 +       .long   0x3c
19547 +       .uleb128 0x5
19548 +       .long   0x48
19549 +       .byte   0x1
19550 +       .uleb128 0x6
19551 +       .long   0x21
19552 +       .byte   0x0
19553 +       .uleb128 0x7
19554 +       .long   .LASF4
19555 +       .byte   0x1d
19556 +       .byte   0xb
19557 +       .long   0x53
19558 +       .uleb128 0x3
19559 +       .long   .LASF2
19560 +       .byte   0x2
19561 +       .byte   0x7
19562 +       .uleb128 0x3
19563 +       .long   .LASF3
19564 +       .byte   0x4
19565 +       .byte   0x5
19566 +       .uleb128 0x7
19567 +       .long   .LASF5
19568 +       .byte   0x1d
19569 +       .byte   0xe
19570 +       .long   0x21
19571 +       .uleb128 0x7
19572 +       .long   .LASF6
19573 +       .byte   0x1d
19574 +       .byte   0x12
19575 +       .long   0x77
19576 +       .uleb128 0x3
19577 +       .long   .LASF0
19578 +       .byte   0x4
19579 +       .byte   0x7
19580 +       .uleb128 0x7
19581 +       .long   .LASF7
19582 +       .byte   0x1d
19583 +       .byte   0x13
19584 +       .long   0x21
19585 +       .uleb128 0x7
19586 +       .long   .LASF8
19587 +       .byte   0x1d
19588 +       .byte   0x15
19589 +       .long   0x5a
19590 +       .uleb128 0x7
19591 +       .long   .LASF9
19592 +       .byte   0x1d
19593 +       .byte   0x17
19594 +       .long   0x5a
19595 +       .uleb128 0x7
19596 +       .long   .LASF10
19597 +       .byte   0x1d
19598 +       .byte   0x18
19599 +       .long   0x21
19600 +       .uleb128 0x7
19601 +       .long   .LASF11
19602 +       .byte   0x1d
19603 +       .byte   0x19
19604 +       .long   0x21
19605 +       .uleb128 0x4
19606 +       .byte   0x4
19607 +       .long   0xbb
19608 +       .uleb128 0x3
19609 +       .long   .LASF12
19610 +       .byte   0x1
19611 +       .byte   0x6
19612 +       .uleb128 0x7
19613 +       .long   .LASF13
19614 +       .byte   0x1d
19615 +       .byte   0x1e
19616 +       .long   0x77
19617 +       .uleb128 0x7
19618 +       .long   .LASF14
19619 +       .byte   0x1d
19620 +       .byte   0x1f
19621 +       .long   0x77
19622 +       .uleb128 0x7
19623 +       .long   .LASF15
19624 +       .byte   0x1d
19625 +       .byte   0x26
19626 +       .long   0xe3
19627 +       .uleb128 0x3
19628 +       .long   .LASF16
19629 +       .byte   0x8
19630 +       .byte   0x5
19631 +       .uleb128 0x7
19632 +       .long   .LASF17
19633 +       .byte   0x1e
19634 +       .byte   0x6
19635 +       .long   0x53
19636 +       .uleb128 0x7
19637 +       .long   .LASF18
19638 +       .byte   0x1e
19639 +       .byte   0xd
19640 +       .long   0x100
19641 +       .uleb128 0x3
19642 +       .long   .LASF19
19643 +       .byte   0x1
19644 +       .byte   0x6
19645 +       .uleb128 0x7
19646 +       .long   .LASF20
19647 +       .byte   0x1e
19648 +       .byte   0xe
19649 +       .long   0x112
19650 +       .uleb128 0x3
19651 +       .long   .LASF21
19652 +       .byte   0x1
19653 +       .byte   0x8
19654 +       .uleb128 0x7
19655 +       .long   .LASF22
19656 +       .byte   0x1e
19657 +       .byte   0x10
19658 +       .long   0x124
19659 +       .uleb128 0x3
19660 +       .long   .LASF23
19661 +       .byte   0x2
19662 +       .byte   0x5
19663 +       .uleb128 0x7
19664 +       .long   .LASF24
19665 +       .byte   0x1e
19666 +       .byte   0x11
19667 +       .long   0x53
19668 +       .uleb128 0x7
19669 +       .long   .LASF25
19670 +       .byte   0x1e
19671 +       .byte   0x13
19672 +       .long   0x21
19673 +       .uleb128 0x7
19674 +       .long   .LASF26
19675 +       .byte   0x1e
19676 +       .byte   0x14
19677 +       .long   0x77
19678 +       .uleb128 0x7
19679 +       .long   .LASF27
19680 +       .byte   0x1e
19681 +       .byte   0x17
19682 +       .long   0xe3
19683 +       .uleb128 0x7
19684 +       .long   .LASF28
19685 +       .byte   0x1e
19686 +       .byte   0x18
19687 +       .long   0x162
19688 +       .uleb128 0x3
19689 +       .long   .LASF29
19690 +       .byte   0x8
19691 +       .byte   0x7
19692 +       .uleb128 0x8
19693 +       .string "s8"
19694 +       .byte   0x1e
19695 +       .byte   0x27
19696 +       .long   0x100
19697 +       .uleb128 0x8
19698 +       .string "u32"
19699 +       .byte   0x1e
19700 +       .byte   0x2e
19701 +       .long   0x77
19702 +       .uleb128 0x8
19703 +       .string "s64"
19704 +       .byte   0x1e
19705 +       .byte   0x30
19706 +       .long   0xe3
19707 +       .uleb128 0x8
19708 +       .string "u64"
19709 +       .byte   0x1e
19710 +       .byte   0x31
19711 +       .long   0x162
19712 +       .uleb128 0x7
19713 +       .long   .LASF30
19714 +       .byte   0x1c
19715 +       .byte   0x13
19716 +       .long   0x141
19717 +       .uleb128 0x7
19718 +       .long   .LASF31
19719 +       .byte   0x1c
19720 +       .byte   0x16
19721 +       .long   0x194
19722 +       .uleb128 0x7
19723 +       .long   .LASF32
19724 +       .byte   0x1c
19725 +       .byte   0x18
19726 +       .long   0x48
19727 +       .uleb128 0x7
19728 +       .long   .LASF33
19729 +       .byte   0x1c
19730 +       .byte   0x1b
19731 +       .long   0x61
19732 +       .uleb128 0x7
19733 +       .long   .LASF34
19734 +       .byte   0x1c
19735 +       .byte   0x1f
19736 +       .long   0x9f
19737 +       .uleb128 0x7
19738 +       .long   .LASF35
19739 +       .byte   0x1c
19740 +       .byte   0x20
19741 +       .long   0xaa
19742 +       .uleb128 0x3
19743 +       .long   .LASF36
19744 +       .byte   0x1
19745 +       .byte   0x2
19746 +       .uleb128 0x7
19747 +       .long   .LASF37
19748 +       .byte   0x1c
19749 +       .byte   0x26
19750 +       .long   0xc2
19751 +       .uleb128 0x7
19752 +       .long   .LASF38
19753 +       .byte   0x1c
19754 +       .byte   0x27
19755 +       .long   0xcd
19756 +       .uleb128 0x7
19757 +       .long   .LASF39
19758 +       .byte   0x1c
19759 +       .byte   0x3a
19760 +       .long   0xd8
19761 +       .uleb128 0x7
19762 +       .long   .LASF40
19763 +       .byte   0x1c
19764 +       .byte   0x43
19765 +       .long   0x6c
19766 +       .uleb128 0x7
19767 +       .long   .LASF41
19768 +       .byte   0x1c
19769 +       .byte   0x48
19770 +       .long   0x7e
19771 +       .uleb128 0x7
19772 +       .long   .LASF42
19773 +       .byte   0x1c
19774 +       .byte   0x52
19775 +       .long   0x89
19776 +       .uleb128 0x7
19777 +       .long   .LASF43
19778 +       .byte   0x1c
19779 +       .byte   0x57
19780 +       .long   0x94
19781 +       .uleb128 0x7
19782 +       .long   .LASF44
19783 +       .byte   0x1c
19784 +       .byte   0x8d
19785 +       .long   0x189
19786 +       .uleb128 0x7
19787 +       .long   .LASF45
19788 +       .byte   0x1c
19789 +       .byte   0x98
19790 +       .long   0x2f
19791 +       .uleb128 0x7
19792 +       .long   .LASF46
19793 +       .byte   0x1c
19794 +       .byte   0xc1
19795 +       .long   0x77
19796 +       .uleb128 0x7
19797 +       .long   .LASF47
19798 +       .byte   0x1c
19799 +       .byte   0xc4
19800 +       .long   0x189
19801 +       .uleb128 0x9
19802 +       .long   0x297
19803 +       .byte   0x10
19804 +       .byte   0xf
19805 +       .byte   0x12
19806 +       .uleb128 0xa
19807 +       .long   .LASF48
19808 +       .byte   0xf
19809 +       .byte   0x13
19810 +       .long   0x2f
19811 +       .byte   0x2
19812 +       .byte   0x23
19813 +       .uleb128 0x0
19814 +       .uleb128 0xa
19815 +       .long   .LASF49
19816 +       .byte   0xf
19817 +       .byte   0x13
19818 +       .long   0x2f
19819 +       .byte   0x2
19820 +       .byte   0x23
19821 +       .uleb128 0x4
19822 +       .uleb128 0xa
19823 +       .long   .LASF50
19824 +       .byte   0xf
19825 +       .byte   0x13
19826 +       .long   0x2f
19827 +       .byte   0x2
19828 +       .byte   0x23
19829 +       .uleb128 0x8
19830 +       .uleb128 0xa
19831 +       .long   .LASF51
19832 +       .byte   0xf
19833 +       .byte   0x13
19834 +       .long   0x2f
19835 +       .byte   0x2
19836 +       .byte   0x23
19837 +       .uleb128 0xc
19838 +       .byte   0x0
19839 +       .uleb128 0x9
19840 +       .long   0x2d8
19841 +       .byte   0x14
19842 +       .byte   0xf
19843 +       .byte   0x16
19844 +       .uleb128 0xa
19845 +       .long   .LASF52
19846 +       .byte   0xf
19847 +       .byte   0x17
19848 +       .long   0x2d8
19849 +       .byte   0x2
19850 +       .byte   0x23
19851 +       .uleb128 0x0
19852 +       .uleb128 0xb
19853 +       .string "val"
19854 +       .byte   0xf
19855 +       .byte   0x18
19856 +       .long   0x173
19857 +       .byte   0x2
19858 +       .byte   0x23
19859 +       .uleb128 0x4
19860 +       .uleb128 0xa
19861 +       .long   .LASF53
19862 +       .byte   0xf
19863 +       .byte   0x19
19864 +       .long   0x173
19865 +       .byte   0x2
19866 +       .byte   0x23
19867 +       .uleb128 0x8
19868 +       .uleb128 0xa
19869 +       .long   .LASF54
19870 +       .byte   0xf
19871 +       .byte   0x1a
19872 +       .long   0x189
19873 +       .byte   0x2
19874 +       .byte   0x23
19875 +       .uleb128 0xc
19876 +       .byte   0x0
19877 +       .uleb128 0x4
19878 +       .byte   0x4
19879 +       .long   0x173
19880 +       .uleb128 0xc
19881 +       .long   0x2f7
19882 +       .byte   0x14
19883 +       .byte   0xf
19884 +       .byte   0x11
19885 +       .uleb128 0xd
19886 +       .long   0x256
19887 +       .uleb128 0xe
19888 +       .long   .LASF55
19889 +       .byte   0xf
19890 +       .byte   0x1b
19891 +       .long   0x297
19892 +       .byte   0x0
19893 +       .uleb128 0xf
19894 +       .long   0x319
19895 +       .long   .LASF59
19896 +       .byte   0x18
19897 +       .byte   0xf
19898 +       .byte   0xf
19899 +       .uleb128 0xb
19900 +       .string "fn"
19901 +       .byte   0xf
19902 +       .byte   0x10
19903 +       .long   0x32f
19904 +       .byte   0x2
19905 +       .byte   0x23
19906 +       .uleb128 0x0
19907 +       .uleb128 0x10
19908 +       .long   0x2de
19909 +       .byte   0x2
19910 +       .byte   0x23
19911 +       .uleb128 0x4
19912 +       .byte   0x0
19913 +       .uleb128 0x11
19914 +       .long   0x329
19915 +       .byte   0x1
19916 +       .long   0x5a
19917 +       .uleb128 0x6
19918 +       .long   0x329
19919 +       .byte   0x0
19920 +       .uleb128 0x4
19921 +       .byte   0x4
19922 +       .long   0x2f7
19923 +       .uleb128 0x4
19924 +       .byte   0x4
19925 +       .long   0x319
19926 +       .uleb128 0x9
19927 +       .long   0x34c
19928 +       .byte   0x4
19929 +       .byte   0x14
19930 +       .byte   0x5b
19931 +       .uleb128 0xb
19932 +       .string "pgd"
19933 +       .byte   0x14
19934 +       .byte   0x5b
19935 +       .long   0x2f
19936 +       .byte   0x2
19937 +       .byte   0x23
19938 +       .uleb128 0x0
19939 +       .byte   0x0
19940 +       .uleb128 0x7
19941 +       .long   .LASF56
19942 +       .byte   0x14
19943 +       .byte   0x5b
19944 +       .long   0x335
19945 +       .uleb128 0x9
19946 +       .long   0x36e
19947 +       .byte   0x4
19948 +       .byte   0x14
19949 +       .byte   0x5c
19950 +       .uleb128 0xa
19951 +       .long   .LASF57
19952 +       .byte   0x14
19953 +       .byte   0x5c
19954 +       .long   0x2f
19955 +       .byte   0x2
19956 +       .byte   0x23
19957 +       .uleb128 0x0
19958 +       .byte   0x0
19959 +       .uleb128 0x7
19960 +       .long   .LASF58
19961 +       .byte   0x14
19962 +       .byte   0x5c
19963 +       .long   0x357
19964 +       .uleb128 0xf
19965 +       .long   0x4fa
19966 +       .long   .LASF60
19967 +       .byte   0x54
19968 +       .byte   0x49
19969 +       .byte   0x48
19970 +       .uleb128 0xb
19971 +       .string "ebx"
19972 +       .byte   0x49
19973 +       .byte   0x4c
19974 +       .long   0x5a
19975 +       .byte   0x2
19976 +       .byte   0x23
19977 +       .uleb128 0x0
19978 +       .uleb128 0xb
19979 +       .string "ecx"
19980 +       .byte   0x49
19981 +       .byte   0x4d
19982 +       .long   0x5a
19983 +       .byte   0x2
19984 +       .byte   0x23
19985 +       .uleb128 0x4
19986 +       .uleb128 0xb
19987 +       .string "edx"
19988 +       .byte   0x49
19989 +       .byte   0x4e
19990 +       .long   0x5a
19991 +       .byte   0x2
19992 +       .byte   0x23
19993 +       .uleb128 0x8
19994 +       .uleb128 0xb
19995 +       .string "esi"
19996 +       .byte   0x49
19997 +       .byte   0x4f
19998 +       .long   0x5a
19999 +       .byte   0x2
20000 +       .byte   0x23
20001 +       .uleb128 0xc
20002 +       .uleb128 0xb
20003 +       .string "edi"
20004 +       .byte   0x49
20005 +       .byte   0x50
20006 +       .long   0x5a
20007 +       .byte   0x2
20008 +       .byte   0x23
20009 +       .uleb128 0x10
20010 +       .uleb128 0xb
20011 +       .string "ebp"
20012 +       .byte   0x49
20013 +       .byte   0x51
20014 +       .long   0x5a
20015 +       .byte   0x2
20016 +       .byte   0x23
20017 +       .uleb128 0x14
20018 +       .uleb128 0xb
20019 +       .string "eax"
20020 +       .byte   0x49
20021 +       .byte   0x52
20022 +       .long   0x5a
20023 +       .byte   0x2
20024 +       .byte   0x23
20025 +       .uleb128 0x18
20026 +       .uleb128 0xa
20027 +       .long   .LASF61
20028 +       .byte   0x49
20029 +       .byte   0x53
20030 +       .long   0x5a
20031 +       .byte   0x2
20032 +       .byte   0x23
20033 +       .uleb128 0x1c
20034 +       .uleb128 0xa
20035 +       .long   .LASF62
20036 +       .byte   0x49
20037 +       .byte   0x54
20038 +       .long   0x5a
20039 +       .byte   0x2
20040 +       .byte   0x23
20041 +       .uleb128 0x20
20042 +       .uleb128 0xa
20043 +       .long   .LASF63
20044 +       .byte   0x49
20045 +       .byte   0x55
20046 +       .long   0x5a
20047 +       .byte   0x2
20048 +       .byte   0x23
20049 +       .uleb128 0x24
20050 +       .uleb128 0xa
20051 +       .long   .LASF64
20052 +       .byte   0x49
20053 +       .byte   0x56
20054 +       .long   0x5a
20055 +       .byte   0x2
20056 +       .byte   0x23
20057 +       .uleb128 0x28
20058 +       .uleb128 0xa
20059 +       .long   .LASF65
20060 +       .byte   0x49
20061 +       .byte   0x57
20062 +       .long   0x5a
20063 +       .byte   0x2
20064 +       .byte   0x23
20065 +       .uleb128 0x2c
20066 +       .uleb128 0xb
20067 +       .string "eip"
20068 +       .byte   0x49
20069 +       .byte   0x58
20070 +       .long   0x5a
20071 +       .byte   0x2
20072 +       .byte   0x23
20073 +       .uleb128 0x30
20074 +       .uleb128 0xb
20075 +       .string "cs"
20076 +       .byte   0x49
20077 +       .byte   0x59
20078 +       .long   0x53
20079 +       .byte   0x2
20080 +       .byte   0x23
20081 +       .uleb128 0x34
20082 +       .uleb128 0xa
20083 +       .long   .LASF66
20084 +       .byte   0x49
20085 +       .byte   0x59
20086 +       .long   0x53
20087 +       .byte   0x2
20088 +       .byte   0x23
20089 +       .uleb128 0x36
20090 +       .uleb128 0xa
20091 +       .long   .LASF67
20092 +       .byte   0x49
20093 +       .byte   0x5a
20094 +       .long   0x5a
20095 +       .byte   0x2
20096 +       .byte   0x23
20097 +       .uleb128 0x38
20098 +       .uleb128 0xb
20099 +       .string "esp"
20100 +       .byte   0x49
20101 +       .byte   0x5b
20102 +       .long   0x5a
20103 +       .byte   0x2
20104 +       .byte   0x23
20105 +       .uleb128 0x3c
20106 +       .uleb128 0xb
20107 +       .string "ss"
20108 +       .byte   0x49
20109 +       .byte   0x5c
20110 +       .long   0x53
20111 +       .byte   0x2
20112 +       .byte   0x23
20113 +       .uleb128 0x40
20114 +       .uleb128 0xa
20115 +       .long   .LASF68
20116 +       .byte   0x49
20117 +       .byte   0x5c
20118 +       .long   0x53
20119 +       .byte   0x2
20120 +       .byte   0x23
20121 +       .uleb128 0x42
20122 +       .uleb128 0xb
20123 +       .string "es"
20124 +       .byte   0x49
20125 +       .byte   0x60
20126 +       .long   0x53
20127 +       .byte   0x2
20128 +       .byte   0x23
20129 +       .uleb128 0x44
20130 +       .uleb128 0xa
20131 +       .long   .LASF69
20132 +       .byte   0x49
20133 +       .byte   0x60
20134 +       .long   0x53
20135 +       .byte   0x2
20136 +       .byte   0x23
20137 +       .uleb128 0x46
20138 +       .uleb128 0xb
20139 +       .string "ds"
20140 +       .byte   0x49
20141 +       .byte   0x61
20142 +       .long   0x53
20143 +       .byte   0x2
20144 +       .byte   0x23
20145 +       .uleb128 0x48
20146 +       .uleb128 0xa
20147 +       .long   .LASF70
20148 +       .byte   0x49
20149 +       .byte   0x61
20150 +       .long   0x53
20151 +       .byte   0x2
20152 +       .byte   0x23
20153 +       .uleb128 0x4a
20154 +       .uleb128 0xb
20155 +       .string "fs"
20156 +       .byte   0x49
20157 +       .byte   0x62
20158 +       .long   0x53
20159 +       .byte   0x2
20160 +       .byte   0x23
20161 +       .uleb128 0x4c
20162 +       .uleb128 0xa
20163 +       .long   .LASF71
20164 +       .byte   0x49
20165 +       .byte   0x62
20166 +       .long   0x53
20167 +       .byte   0x2
20168 +       .byte   0x23
20169 +       .uleb128 0x4e
20170 +       .uleb128 0xb
20171 +       .string "gs"
20172 +       .byte   0x49
20173 +       .byte   0x63
20174 +       .long   0x53
20175 +       .byte   0x2
20176 +       .byte   0x23
20177 +       .uleb128 0x50
20178 +       .uleb128 0xa
20179 +       .long   .LASF72
20180 +       .byte   0x49
20181 +       .byte   0x63
20182 +       .long   0x53
20183 +       .byte   0x2
20184 +       .byte   0x23
20185 +       .uleb128 0x52
20186 +       .byte   0x0
20187 +       .uleb128 0xf
20188 +       .long   0x515
20189 +       .long   .LASF73
20190 +       .byte   0x20
20191 +       .byte   0x49
20192 +       .byte   0x66
20193 +       .uleb128 0xa
20194 +       .long   .LASF74
20195 +       .byte   0x49
20196 +       .byte   0x67
20197 +       .long   0x515
20198 +       .byte   0x2
20199 +       .byte   0x23
20200 +       .uleb128 0x0
20201 +       .byte   0x0
20202 +       .uleb128 0x12
20203 +       .long   0x525
20204 +       .long   0x2f
20205 +       .uleb128 0x13
20206 +       .long   0x28
20207 +       .byte   0x7
20208 +       .byte   0x0
20209 +       .uleb128 0xf
20210 +       .long   0x587
20211 +       .long   .LASF75
20212 +       .byte   0xa0
20213 +       .byte   0x49
20214 +       .byte   0x6a
20215 +       .uleb128 0xa
20216 +       .long   .LASF76
20217 +       .byte   0x49
20218 +       .byte   0x6b
20219 +       .long   0x379
20220 +       .byte   0x2
20221 +       .byte   0x23
20222 +       .uleb128 0x0
20223 +       .uleb128 0xa
20224 +       .long   .LASF53
20225 +       .byte   0x49
20226 +       .byte   0x6c
20227 +       .long   0x2f
20228 +       .byte   0x2
20229 +       .byte   0x23
20230 +       .uleb128 0x54
20231 +       .uleb128 0xa
20232 +       .long   .LASF77
20233 +       .byte   0x49
20234 +       .byte   0x6d
20235 +       .long   0x2f
20236 +       .byte   0x2
20237 +       .byte   0x23
20238 +       .uleb128 0x58
20239 +       .uleb128 0xa
20240 +       .long   .LASF78
20241 +       .byte   0x49
20242 +       .byte   0x6e
20243 +       .long   0x2f
20244 +       .byte   0x2
20245 +       .byte   0x23
20246 +       .uleb128 0x5c
20247 +       .uleb128 0xa
20248 +       .long   .LASF79
20249 +       .byte   0x49
20250 +       .byte   0x6f
20251 +       .long   0x4fa
20252 +       .byte   0x2
20253 +       .byte   0x23
20254 +       .uleb128 0x60
20255 +       .uleb128 0xa
20256 +       .long   .LASF80
20257 +       .byte   0x49
20258 +       .byte   0x70
20259 +       .long   0x4fa
20260 +       .byte   0x3
20261 +       .byte   0x23
20262 +       .uleb128 0x80
20263 +       .byte   0x0
20264 +       .uleb128 0xf
20265 +       .long   0x674
20266 +       .long   .LASF81
20267 +       .byte   0x40
20268 +       .byte   0x69
20269 +       .byte   0x9
20270 +       .uleb128 0xb
20271 +       .string "ebx"
20272 +       .byte   0x69
20273 +       .byte   0xa
20274 +       .long   0x5a
20275 +       .byte   0x2
20276 +       .byte   0x23
20277 +       .uleb128 0x0
20278 +       .uleb128 0xb
20279 +       .string "ecx"
20280 +       .byte   0x69
20281 +       .byte   0xb
20282 +       .long   0x5a
20283 +       .byte   0x2
20284 +       .byte   0x23
20285 +       .uleb128 0x4
20286 +       .uleb128 0xb
20287 +       .string "edx"
20288 +       .byte   0x69
20289 +       .byte   0xc
20290 +       .long   0x5a
20291 +       .byte   0x2
20292 +       .byte   0x23
20293 +       .uleb128 0x8
20294 +       .uleb128 0xb
20295 +       .string "esi"
20296 +       .byte   0x69
20297 +       .byte   0xd
20298 +       .long   0x5a
20299 +       .byte   0x2
20300 +       .byte   0x23
20301 +       .uleb128 0xc
20302 +       .uleb128 0xb
20303 +       .string "edi"
20304 +       .byte   0x69
20305 +       .byte   0xe
20306 +       .long   0x5a
20307 +       .byte   0x2
20308 +       .byte   0x23
20309 +       .uleb128 0x10
20310 +       .uleb128 0xb
20311 +       .string "ebp"
20312 +       .byte   0x69
20313 +       .byte   0xf
20314 +       .long   0x5a
20315 +       .byte   0x2
20316 +       .byte   0x23
20317 +       .uleb128 0x14
20318 +       .uleb128 0xb
20319 +       .string "eax"
20320 +       .byte   0x69
20321 +       .byte   0x10
20322 +       .long   0x5a
20323 +       .byte   0x2
20324 +       .byte   0x23
20325 +       .uleb128 0x18
20326 +       .uleb128 0xb
20327 +       .string "xds"
20328 +       .byte   0x69
20329 +       .byte   0x11
20330 +       .long   0x21
20331 +       .byte   0x2
20332 +       .byte   0x23
20333 +       .uleb128 0x1c
20334 +       .uleb128 0xb
20335 +       .string "xes"
20336 +       .byte   0x69
20337 +       .byte   0x12
20338 +       .long   0x21
20339 +       .byte   0x2
20340 +       .byte   0x23
20341 +       .uleb128 0x20
20342 +       .uleb128 0xb
20343 +       .string "xfs"
20344 +       .byte   0x69
20345 +       .byte   0x13
20346 +       .long   0x21
20347 +       .byte   0x2
20348 +       .byte   0x23
20349 +       .uleb128 0x24
20350 +       .uleb128 0xa
20351 +       .long   .LASF65
20352 +       .byte   0x69
20353 +       .byte   0x15
20354 +       .long   0x5a
20355 +       .byte   0x2
20356 +       .byte   0x23
20357 +       .uleb128 0x28
20358 +       .uleb128 0xb
20359 +       .string "eip"
20360 +       .byte   0x69
20361 +       .byte   0x16
20362 +       .long   0x5a
20363 +       .byte   0x2
20364 +       .byte   0x23
20365 +       .uleb128 0x2c
20366 +       .uleb128 0xb
20367 +       .string "xcs"
20368 +       .byte   0x69
20369 +       .byte   0x17
20370 +       .long   0x21
20371 +       .byte   0x2
20372 +       .byte   0x23
20373 +       .uleb128 0x30
20374 +       .uleb128 0xa
20375 +       .long   .LASF67
20376 +       .byte   0x69
20377 +       .byte   0x18
20378 +       .long   0x5a
20379 +       .byte   0x2
20380 +       .byte   0x23
20381 +       .uleb128 0x34
20382 +       .uleb128 0xb
20383 +       .string "esp"
20384 +       .byte   0x69
20385 +       .byte   0x19
20386 +       .long   0x5a
20387 +       .byte   0x2
20388 +       .byte   0x23
20389 +       .uleb128 0x38
20390 +       .uleb128 0xb
20391 +       .string "xss"
20392 +       .byte   0x69
20393 +       .byte   0x1a
20394 +       .long   0x21
20395 +       .byte   0x2
20396 +       .byte   0x23
20397 +       .uleb128 0x3c
20398 +       .byte   0x0
20399 +       .uleb128 0x4
20400 +       .byte   0x4
20401 +       .long   0x587
20402 +       .uleb128 0xf
20403 +       .long   0x7ad
20404 +       .long   .LASF82
20405 +       .byte   0x54
20406 +       .byte   0x48
20407 +       .byte   0xd
20408 +       .uleb128 0xa
20409 +       .long   .LASF83
20410 +       .byte   0x48
20411 +       .byte   0xe
20412 +       .long   0x5a
20413 +       .byte   0x2
20414 +       .byte   0x23
20415 +       .uleb128 0x0
20416 +       .uleb128 0xa
20417 +       .long   .LASF84
20418 +       .byte   0x48
20419 +       .byte   0xf
20420 +       .long   0x5a
20421 +       .byte   0x2
20422 +       .byte   0x23
20423 +       .uleb128 0x4
20424 +       .uleb128 0xa
20425 +       .long   .LASF85
20426 +       .byte   0x48
20427 +       .byte   0x10
20428 +       .long   0x5a
20429 +       .byte   0x2
20430 +       .byte   0x23
20431 +       .uleb128 0x8
20432 +       .uleb128 0xa
20433 +       .long   .LASF86
20434 +       .byte   0x48
20435 +       .byte   0x11
20436 +       .long   0x5a
20437 +       .byte   0x2
20438 +       .byte   0x23
20439 +       .uleb128 0xc
20440 +       .uleb128 0xa
20441 +       .long   .LASF87
20442 +       .byte   0x48
20443 +       .byte   0x12
20444 +       .long   0x5a
20445 +       .byte   0x2
20446 +       .byte   0x23
20447 +       .uleb128 0x10
20448 +       .uleb128 0xa
20449 +       .long   .LASF88
20450 +       .byte   0x48
20451 +       .byte   0x13
20452 +       .long   0x5a
20453 +       .byte   0x2
20454 +       .byte   0x23
20455 +       .uleb128 0x14
20456 +       .uleb128 0xa
20457 +       .long   .LASF89
20458 +       .byte   0x48
20459 +       .byte   0x14
20460 +       .long   0x5a
20461 +       .byte   0x2
20462 +       .byte   0x23
20463 +       .uleb128 0x18
20464 +       .uleb128 0xa
20465 +       .long   .LASF90
20466 +       .byte   0x48
20467 +       .byte   0x15
20468 +       .long   0x5a
20469 +       .byte   0x2
20470 +       .byte   0x23
20471 +       .uleb128 0x1c
20472 +       .uleb128 0xa
20473 +       .long   .LASF91
20474 +       .byte   0x48
20475 +       .byte   0x16
20476 +       .long   0x5a
20477 +       .byte   0x2
20478 +       .byte   0x23
20479 +       .uleb128 0x20
20480 +       .uleb128 0xa
20481 +       .long   .LASF92
20482 +       .byte   0x48
20483 +       .byte   0x17
20484 +       .long   0x5a
20485 +       .byte   0x2
20486 +       .byte   0x23
20487 +       .uleb128 0x24
20488 +       .uleb128 0xa
20489 +       .long   .LASF93
20490 +       .byte   0x48
20491 +       .byte   0x18
20492 +       .long   0x5a
20493 +       .byte   0x2
20494 +       .byte   0x23
20495 +       .uleb128 0x28
20496 +       .uleb128 0xa
20497 +       .long   .LASF94
20498 +       .byte   0x48
20499 +       .byte   0x19
20500 +       .long   0x5a
20501 +       .byte   0x2
20502 +       .byte   0x23
20503 +       .uleb128 0x2c
20504 +       .uleb128 0xa
20505 +       .long   .LASF95
20506 +       .byte   0x48
20507 +       .byte   0x1a
20508 +       .long   0x5a
20509 +       .byte   0x2
20510 +       .byte   0x23
20511 +       .uleb128 0x30
20512 +       .uleb128 0xa
20513 +       .long   .LASF96
20514 +       .byte   0x48
20515 +       .byte   0x1b
20516 +       .long   0x5a
20517 +       .byte   0x2
20518 +       .byte   0x23
20519 +       .uleb128 0x34
20520 +       .uleb128 0xa
20521 +       .long   .LASF97
20522 +       .byte   0x48
20523 +       .byte   0x1c
20524 +       .long   0x5a
20525 +       .byte   0x2
20526 +       .byte   0x23
20527 +       .uleb128 0x38
20528 +       .uleb128 0xa
20529 +       .long   .LASF98
20530 +       .byte   0x48
20531 +       .byte   0x1d
20532 +       .long   0x5a
20533 +       .byte   0x2
20534 +       .byte   0x23
20535 +       .uleb128 0x3c
20536 +       .uleb128 0xa
20537 +       .long   .LASF99
20538 +       .byte   0x48
20539 +       .byte   0x1e
20540 +       .long   0x5a
20541 +       .byte   0x2
20542 +       .byte   0x23
20543 +       .uleb128 0x40
20544 +       .uleb128 0xa
20545 +       .long   .LASF100
20546 +       .byte   0x48
20547 +       .byte   0x1f
20548 +       .long   0x5a
20549 +       .byte   0x2
20550 +       .byte   0x23
20551 +       .uleb128 0x44
20552 +       .uleb128 0xa
20553 +       .long   .LASF101
20554 +       .byte   0x48
20555 +       .byte   0x20
20556 +       .long   0x5a
20557 +       .byte   0x2
20558 +       .byte   0x23
20559 +       .uleb128 0x48
20560 +       .uleb128 0xa
20561 +       .long   .LASF102
20562 +       .byte   0x48
20563 +       .byte   0x21
20564 +       .long   0x5a
20565 +       .byte   0x2
20566 +       .byte   0x23
20567 +       .uleb128 0x4c
20568 +       .uleb128 0xa
20569 +       .long   .LASF103
20570 +       .byte   0x48
20571 +       .byte   0x22
20572 +       .long   0x5a
20573 +       .byte   0x2
20574 +       .byte   0x23
20575 +       .uleb128 0x50
20576 +       .byte   0x0
20577 +       .uleb128 0xf
20578 +       .long   0x7f2
20579 +       .long   .LASF104
20580 +       .byte   0xc
20581 +       .byte   0x2f
20582 +       .byte   0xa
20583 +       .uleb128 0xa
20584 +       .long   .LASF105
20585 +       .byte   0x2f
20586 +       .byte   0xb
20587 +       .long   0x2f
20588 +       .byte   0x2
20589 +       .byte   0x23
20590 +       .uleb128 0x0
20591 +       .uleb128 0xa
20592 +       .long   .LASF106
20593 +       .byte   0x2f
20594 +       .byte   0xd
20595 +       .long   0x7f2
20596 +       .byte   0x2
20597 +       .byte   0x23
20598 +       .uleb128 0x4
20599 +       .uleb128 0xa
20600 +       .long   .LASF107
20601 +       .byte   0x2f
20602 +       .byte   0xe
20603 +       .long   0x53
20604 +       .byte   0x2
20605 +       .byte   0x23
20606 +       .uleb128 0x8
20607 +       .uleb128 0xa
20608 +       .long   .LASF53
20609 +       .byte   0x2f
20610 +       .byte   0x10
20611 +       .long   0x53
20612 +       .byte   0x2
20613 +       .byte   0x23
20614 +       .uleb128 0xa
20615 +       .byte   0x0
20616 +       .uleb128 0x4
20617 +       .byte   0x4
20618 +       .long   0x7f8
20619 +       .uleb128 0x14
20620 +       .long   0xbb
20621 +       .uleb128 0x15
20622 +       .long   0x8dc
20623 +       .long   .LASF108
20624 +       .byte   0x40
20625 +       .byte   0x38
20626 +       .value  0x154
20627 +       .uleb128 0x16
20628 +       .long   .LASF109
20629 +       .byte   0x38
20630 +       .value  0x15b
20631 +       .long   0x5a
20632 +       .byte   0x2
20633 +       .byte   0x23
20634 +       .uleb128 0x0
20635 +       .uleb128 0x16
20636 +       .long   .LASF110
20637 +       .byte   0x38
20638 +       .value  0x15c
20639 +       .long   0x8dc
20640 +       .byte   0x2
20641 +       .byte   0x23
20642 +       .uleb128 0x4
20643 +       .uleb128 0x16
20644 +       .long   .LASF111
20645 +       .byte   0x38
20646 +       .value  0x15d
20647 +       .long   0x2f
20648 +       .byte   0x2
20649 +       .byte   0x23
20650 +       .uleb128 0x10
20651 +       .uleb128 0x16
20652 +       .long   .LASF112
20653 +       .byte   0x38
20654 +       .value  0x15e
20655 +       .long   0x2f
20656 +       .byte   0x2
20657 +       .byte   0x23
20658 +       .uleb128 0x14
20659 +       .uleb128 0x16
20660 +       .long   .LASF113
20661 +       .byte   0x38
20662 +       .value  0x15f
20663 +       .long   0x2f
20664 +       .byte   0x2
20665 +       .byte   0x23
20666 +       .uleb128 0x18
20667 +       .uleb128 0x16
20668 +       .long   .LASF114
20669 +       .byte   0x38
20670 +       .value  0x160
20671 +       .long   0x2f
20672 +       .byte   0x2
20673 +       .byte   0x23
20674 +       .uleb128 0x1c
20675 +       .uleb128 0x16
20676 +       .long   .LASF115
20677 +       .byte   0x38
20678 +       .value  0x161
20679 +       .long   0x2f
20680 +       .byte   0x2
20681 +       .byte   0x23
20682 +       .uleb128 0x20
20683 +       .uleb128 0x16
20684 +       .long   .LASF116
20685 +       .byte   0x38
20686 +       .value  0x162
20687 +       .long   0x2f
20688 +       .byte   0x2
20689 +       .byte   0x23
20690 +       .uleb128 0x24
20691 +       .uleb128 0x16
20692 +       .long   .LASF117
20693 +       .byte   0x38
20694 +       .value  0x163
20695 +       .long   0x53
20696 +       .byte   0x2
20697 +       .byte   0x23
20698 +       .uleb128 0x28
20699 +       .uleb128 0x17
20700 +       .string "pad"
20701 +       .byte   0x38
20702 +       .value  0x164
20703 +       .long   0x53
20704 +       .byte   0x2
20705 +       .byte   0x23
20706 +       .uleb128 0x2a
20707 +       .uleb128 0x16
20708 +       .long   .LASF118
20709 +       .byte   0x38
20710 +       .value  0x165
20711 +       .long   0x2f
20712 +       .byte   0x2
20713 +       .byte   0x23
20714 +       .uleb128 0x2c
20715 +       .uleb128 0x16
20716 +       .long   .LASF119
20717 +       .byte   0x38
20718 +       .value  0x166
20719 +       .long   0x2f
20720 +       .byte   0x2
20721 +       .byte   0x23
20722 +       .uleb128 0x30
20723 +       .uleb128 0x16
20724 +       .long   .LASF120
20725 +       .byte   0x38
20726 +       .value  0x167
20727 +       .long   0x77
20728 +       .byte   0x2
20729 +       .byte   0x23
20730 +       .uleb128 0x34
20731 +       .uleb128 0x17
20732 +       .string "_f"
20733 +       .byte   0x38
20734 +       .value  0x168
20735 +       .long   0x8ec
20736 +       .byte   0x2
20737 +       .byte   0x23
20738 +       .uleb128 0x38
20739 +       .byte   0x0
20740 +       .uleb128 0x12
20741 +       .long   0x8ec
20742 +       .long   0x2f
20743 +       .uleb128 0x13
20744 +       .long   0x28
20745 +       .byte   0x2
20746 +       .byte   0x0
20747 +       .uleb128 0x12
20748 +       .long   0x8fc
20749 +       .long   0xbb
20750 +       .uleb128 0x13
20751 +       .long   0x28
20752 +       .byte   0x7
20753 +       .byte   0x0
20754 +       .uleb128 0x9
20755 +       .long   0x913
20756 +       .byte   0x4
20757 +       .byte   0x13
20758 +       .byte   0x58
20759 +       .uleb128 0xa
20760 +       .long   .LASF121
20761 +       .byte   0x13
20762 +       .byte   0x58
20763 +       .long   0x913
20764 +       .byte   0x2
20765 +       .byte   0x23
20766 +       .uleb128 0x0
20767 +       .byte   0x0
20768 +       .uleb128 0x12
20769 +       .long   0x923
20770 +       .long   0x2f
20771 +       .uleb128 0x13
20772 +       .long   0x28
20773 +       .byte   0x0
20774 +       .byte   0x0
20775 +       .uleb128 0x7
20776 +       .long   .LASF122
20777 +       .byte   0x13
20778 +       .byte   0x58
20779 +       .long   0x8fc
20780 +       .uleb128 0x4
20781 +       .byte   0x4
20782 +       .long   0x934
20783 +       .uleb128 0x18
20784 +       .byte   0x1
20785 +       .long   0x21
20786 +       .uleb128 0x4
20787 +       .byte   0x4
20788 +       .long   0x940
20789 +       .uleb128 0x19
20790 +       .byte   0x1
20791 +       .uleb128 0xf
20792 +       .long   0x967
20793 +       .long   .LASF123
20794 +       .byte   0x8
20795 +       .byte   0x8
20796 +       .byte   0x1d
20797 +       .uleb128 0xb
20798 +       .string "a"
20799 +       .byte   0x8
20800 +       .byte   0x1e
20801 +       .long   0x2f
20802 +       .byte   0x2
20803 +       .byte   0x23
20804 +       .uleb128 0x0
20805 +       .uleb128 0xb
20806 +       .string "b"
20807 +       .byte   0x8
20808 +       .byte   0x1e
20809 +       .long   0x2f
20810 +       .byte   0x2
20811 +       .byte   0x23
20812 +       .uleb128 0x4
20813 +       .byte   0x0
20814 +       .uleb128 0x12
20815 +       .long   0x977
20816 +       .long   0xbb
20817 +       .uleb128 0x13
20818 +       .long   0x28
20819 +       .byte   0xf
20820 +       .byte   0x0
20821 +       .uleb128 0xf
20822 +       .long   0xa02
20823 +       .long   .LASF124
20824 +       .byte   0x70
20825 +       .byte   0x8
20826 +       .byte   0xf0
20827 +       .uleb128 0xb
20828 +       .string "cwd"
20829 +       .byte   0x8
20830 +       .byte   0xf1
20831 +       .long   0x5a
20832 +       .byte   0x2
20833 +       .byte   0x23
20834 +       .uleb128 0x0
20835 +       .uleb128 0xb
20836 +       .string "swd"
20837 +       .byte   0x8
20838 +       .byte   0xf2
20839 +       .long   0x5a
20840 +       .byte   0x2
20841 +       .byte   0x23
20842 +       .uleb128 0x4
20843 +       .uleb128 0xb
20844 +       .string "twd"
20845 +       .byte   0x8
20846 +       .byte   0xf3
20847 +       .long   0x5a
20848 +       .byte   0x2
20849 +       .byte   0x23
20850 +       .uleb128 0x8
20851 +       .uleb128 0xb
20852 +       .string "fip"
20853 +       .byte   0x8
20854 +       .byte   0xf4
20855 +       .long   0x5a
20856 +       .byte   0x2
20857 +       .byte   0x23
20858 +       .uleb128 0xc
20859 +       .uleb128 0xb
20860 +       .string "fcs"
20861 +       .byte   0x8
20862 +       .byte   0xf5
20863 +       .long   0x5a
20864 +       .byte   0x2
20865 +       .byte   0x23
20866 +       .uleb128 0x10
20867 +       .uleb128 0xb
20868 +       .string "foo"
20869 +       .byte   0x8
20870 +       .byte   0xf6
20871 +       .long   0x5a
20872 +       .byte   0x2
20873 +       .byte   0x23
20874 +       .uleb128 0x14
20875 +       .uleb128 0xb
20876 +       .string "fos"
20877 +       .byte   0x8
20878 +       .byte   0xf7
20879 +       .long   0x5a
20880 +       .byte   0x2
20881 +       .byte   0x23
20882 +       .uleb128 0x18
20883 +       .uleb128 0xa
20884 +       .long   .LASF125
20885 +       .byte   0x8
20886 +       .byte   0xf8
20887 +       .long   0xa02
20888 +       .byte   0x2
20889 +       .byte   0x23
20890 +       .uleb128 0x1c
20891 +       .uleb128 0xa
20892 +       .long   .LASF126
20893 +       .byte   0x8
20894 +       .byte   0xf9
20895 +       .long   0x5a
20896 +       .byte   0x2
20897 +       .byte   0x23
20898 +       .uleb128 0x6c
20899 +       .byte   0x0
20900 +       .uleb128 0x12
20901 +       .long   0xa12
20902 +       .long   0x5a
20903 +       .uleb128 0x13
20904 +       .long   0x28
20905 +       .byte   0x13
20906 +       .byte   0x0
20907 +       .uleb128 0x1a
20908 +       .long   0xae2
20909 +       .long   .LASF127
20910 +       .value  0x200
20911 +       .byte   0x8
20912 +       .byte   0xfc
20913 +       .uleb128 0xb
20914 +       .string "cwd"
20915 +       .byte   0x8
20916 +       .byte   0xfd
20917 +       .long   0x53
20918 +       .byte   0x2
20919 +       .byte   0x23
20920 +       .uleb128 0x0
20921 +       .uleb128 0xb
20922 +       .string "swd"
20923 +       .byte   0x8
20924 +       .byte   0xfe
20925 +       .long   0x53
20926 +       .byte   0x2
20927 +       .byte   0x23
20928 +       .uleb128 0x2
20929 +       .uleb128 0xb
20930 +       .string "twd"
20931 +       .byte   0x8
20932 +       .byte   0xff
20933 +       .long   0x53
20934 +       .byte   0x2
20935 +       .byte   0x23
20936 +       .uleb128 0x4
20937 +       .uleb128 0x17
20938 +       .string "fop"
20939 +       .byte   0x8
20940 +       .value  0x100
20941 +       .long   0x53
20942 +       .byte   0x2
20943 +       .byte   0x23
20944 +       .uleb128 0x6
20945 +       .uleb128 0x17
20946 +       .string "fip"
20947 +       .byte   0x8
20948 +       .value  0x101
20949 +       .long   0x5a
20950 +       .byte   0x2
20951 +       .byte   0x23
20952 +       .uleb128 0x8
20953 +       .uleb128 0x17
20954 +       .string "fcs"
20955 +       .byte   0x8
20956 +       .value  0x102
20957 +       .long   0x5a
20958 +       .byte   0x2
20959 +       .byte   0x23
20960 +       .uleb128 0xc
20961 +       .uleb128 0x17
20962 +       .string "foo"
20963 +       .byte   0x8
20964 +       .value  0x103
20965 +       .long   0x5a
20966 +       .byte   0x2
20967 +       .byte   0x23
20968 +       .uleb128 0x10
20969 +       .uleb128 0x17
20970 +       .string "fos"
20971 +       .byte   0x8
20972 +       .value  0x104
20973 +       .long   0x5a
20974 +       .byte   0x2
20975 +       .byte   0x23
20976 +       .uleb128 0x14
20977 +       .uleb128 0x16
20978 +       .long   .LASF128
20979 +       .byte   0x8
20980 +       .value  0x105
20981 +       .long   0x5a
20982 +       .byte   0x2
20983 +       .byte   0x23
20984 +       .uleb128 0x18
20985 +       .uleb128 0x16
20986 +       .long   .LASF129
20987 +       .byte   0x8
20988 +       .value  0x106
20989 +       .long   0x5a
20990 +       .byte   0x2
20991 +       .byte   0x23
20992 +       .uleb128 0x1c
20993 +       .uleb128 0x16
20994 +       .long   .LASF125
20995 +       .byte   0x8
20996 +       .value  0x107
20997 +       .long   0xae2
20998 +       .byte   0x2
20999 +       .byte   0x23
21000 +       .uleb128 0x20
21001 +       .uleb128 0x16
21002 +       .long   .LASF130
21003 +       .byte   0x8
21004 +       .value  0x108
21005 +       .long   0xae2
21006 +       .byte   0x3
21007 +       .byte   0x23
21008 +       .uleb128 0xa0
21009 +       .uleb128 0x16
21010 +       .long   .LASF131
21011 +       .byte   0x8
21012 +       .value  0x109
21013 +       .long   0xaf2
21014 +       .byte   0x3
21015 +       .byte   0x23
21016 +       .uleb128 0x120
21017 +       .byte   0x0
21018 +       .uleb128 0x12
21019 +       .long   0xaf2
21020 +       .long   0x5a
21021 +       .uleb128 0x13
21022 +       .long   0x28
21023 +       .byte   0x1f
21024 +       .byte   0x0
21025 +       .uleb128 0x12
21026 +       .long   0xb02
21027 +       .long   0x5a
21028 +       .uleb128 0x13
21029 +       .long   0x28
21030 +       .byte   0x37
21031 +       .byte   0x0
21032 +       .uleb128 0x15
21033 +       .long   0xbff
21034 +       .long   .LASF132
21035 +       .byte   0x7c
21036 +       .byte   0x8
21037 +       .value  0x10c
21038 +       .uleb128 0x17
21039 +       .string "cwd"
21040 +       .byte   0x8
21041 +       .value  0x10d
21042 +       .long   0x5a
21043 +       .byte   0x2
21044 +       .byte   0x23
21045 +       .uleb128 0x0
21046 +       .uleb128 0x17
21047 +       .string "swd"
21048 +       .byte   0x8
21049 +       .value  0x10e
21050 +       .long   0x5a
21051 +       .byte   0x2
21052 +       .byte   0x23
21053 +       .uleb128 0x4
21054 +       .uleb128 0x17
21055 +       .string "twd"
21056 +       .byte   0x8
21057 +       .value  0x10f
21058 +       .long   0x5a
21059 +       .byte   0x2
21060 +       .byte   0x23
21061 +       .uleb128 0x8
21062 +       .uleb128 0x17
21063 +       .string "fip"
21064 +       .byte   0x8
21065 +       .value  0x110
21066 +       .long   0x5a
21067 +       .byte   0x2
21068 +       .byte   0x23
21069 +       .uleb128 0xc
21070 +       .uleb128 0x17
21071 +       .string "fcs"
21072 +       .byte   0x8
21073 +       .value  0x111
21074 +       .long   0x5a
21075 +       .byte   0x2
21076 +       .byte   0x23
21077 +       .uleb128 0x10
21078 +       .uleb128 0x17
21079 +       .string "foo"
21080 +       .byte   0x8
21081 +       .value  0x112
21082 +       .long   0x5a
21083 +       .byte   0x2
21084 +       .byte   0x23
21085 +       .uleb128 0x14
21086 +       .uleb128 0x17
21087 +       .string "fos"
21088 +       .byte   0x8
21089 +       .value  0x113
21090 +       .long   0x5a
21091 +       .byte   0x2
21092 +       .byte   0x23
21093 +       .uleb128 0x18
21094 +       .uleb128 0x16
21095 +       .long   .LASF125
21096 +       .byte   0x8
21097 +       .value  0x114
21098 +       .long   0xa02
21099 +       .byte   0x2
21100 +       .byte   0x23
21101 +       .uleb128 0x1c
21102 +       .uleb128 0x16
21103 +       .long   .LASF133
21104 +       .byte   0x8
21105 +       .value  0x115
21106 +       .long   0x112
21107 +       .byte   0x2
21108 +       .byte   0x23
21109 +       .uleb128 0x6c
21110 +       .uleb128 0x16
21111 +       .long   .LASF134
21112 +       .byte   0x8
21113 +       .value  0x115
21114 +       .long   0x112
21115 +       .byte   0x2
21116 +       .byte   0x23
21117 +       .uleb128 0x6d
21118 +       .uleb128 0x16
21119 +       .long   .LASF135
21120 +       .byte   0x8
21121 +       .value  0x115
21122 +       .long   0x112
21123 +       .byte   0x2
21124 +       .byte   0x23
21125 +       .uleb128 0x6e
21126 +       .uleb128 0x16
21127 +       .long   .LASF136
21128 +       .byte   0x8
21129 +       .value  0x115
21130 +       .long   0x112
21131 +       .byte   0x2
21132 +       .byte   0x23
21133 +       .uleb128 0x6f
21134 +       .uleb128 0x17
21135 +       .string "rm"
21136 +       .byte   0x8
21137 +       .value  0x115
21138 +       .long   0x112
21139 +       .byte   0x2
21140 +       .byte   0x23
21141 +       .uleb128 0x70
21142 +       .uleb128 0x16
21143 +       .long   .LASF137
21144 +       .byte   0x8
21145 +       .value  0x115
21146 +       .long   0x112
21147 +       .byte   0x2
21148 +       .byte   0x23
21149 +       .uleb128 0x71
21150 +       .uleb128 0x16
21151 +       .long   .LASF82
21152 +       .byte   0x8
21153 +       .value  0x116
21154 +       .long   0xbff
21155 +       .byte   0x2
21156 +       .byte   0x23
21157 +       .uleb128 0x74
21158 +       .uleb128 0x16
21159 +       .long   .LASF138
21160 +       .byte   0x8
21161 +       .value  0x117
21162 +       .long   0x2f
21163 +       .byte   0x2
21164 +       .byte   0x23
21165 +       .uleb128 0x78
21166 +       .byte   0x0
21167 +       .uleb128 0x4
21168 +       .byte   0x4
21169 +       .long   0x67a
21170 +       .uleb128 0x1b
21171 +       .long   0xc38
21172 +       .long   .LASF472
21173 +       .value  0x200
21174 +       .byte   0x8
21175 +       .value  0x11a
21176 +       .uleb128 0x1c
21177 +       .long   .LASF139
21178 +       .byte   0x8
21179 +       .value  0x11b
21180 +       .long   0x977
21181 +       .uleb128 0x1c
21182 +       .long   .LASF140
21183 +       .byte   0x8
21184 +       .value  0x11c
21185 +       .long   0xa12
21186 +       .uleb128 0x1c
21187 +       .long   .LASF141
21188 +       .byte   0x8
21189 +       .value  0x11d
21190 +       .long   0xb02
21191 +       .byte   0x0
21192 +       .uleb128 0x1d
21193 +       .long   0xc51
21194 +       .byte   0x4
21195 +       .byte   0x8
21196 +       .value  0x120
21197 +       .uleb128 0x17
21198 +       .string "seg"
21199 +       .byte   0x8
21200 +       .value  0x121
21201 +       .long   0x2f
21202 +       .byte   0x2
21203 +       .byte   0x23
21204 +       .uleb128 0x0
21205 +       .byte   0x0
21206 +       .uleb128 0x1e
21207 +       .long   .LASF142
21208 +       .byte   0x8
21209 +       .value  0x122
21210 +       .long   0xc38
21211 +       .uleb128 0x1f
21212 +       .long   0xdbe
21213 +       .long   .LASF143
21214 +       .value  0x290
21215 +       .byte   0x8
21216 +       .value  0x124
21217 +       .uleb128 0x16
21218 +       .long   .LASF144
21219 +       .byte   0x8
21220 +       .value  0x15e
21221 +       .long   0xdbe
21222 +       .byte   0x2
21223 +       .byte   0x23
21224 +       .uleb128 0x0
21225 +       .uleb128 0x16
21226 +       .long   .LASF145
21227 +       .byte   0x8
21228 +       .value  0x15f
21229 +       .long   0x2f
21230 +       .byte   0x2
21231 +       .byte   0x23
21232 +       .uleb128 0x18
21233 +       .uleb128 0x16
21234 +       .long   .LASF146
21235 +       .byte   0x8
21236 +       .value  0x160
21237 +       .long   0x2f
21238 +       .byte   0x2
21239 +       .byte   0x23
21240 +       .uleb128 0x1c
21241 +       .uleb128 0x17
21242 +       .string "eip"
21243 +       .byte   0x8
21244 +       .value  0x161
21245 +       .long   0x2f
21246 +       .byte   0x2
21247 +       .byte   0x23
21248 +       .uleb128 0x20
21249 +       .uleb128 0x17
21250 +       .string "esp"
21251 +       .byte   0x8
21252 +       .value  0x162
21253 +       .long   0x2f
21254 +       .byte   0x2
21255 +       .byte   0x23
21256 +       .uleb128 0x24
21257 +       .uleb128 0x17
21258 +       .string "fs"
21259 +       .byte   0x8
21260 +       .value  0x163
21261 +       .long   0x2f
21262 +       .byte   0x2
21263 +       .byte   0x23
21264 +       .uleb128 0x28
21265 +       .uleb128 0x17
21266 +       .string "gs"
21267 +       .byte   0x8
21268 +       .value  0x164
21269 +       .long   0x2f
21270 +       .byte   0x2
21271 +       .byte   0x23
21272 +       .uleb128 0x2c
21273 +       .uleb128 0x16
21274 +       .long   .LASF147
21275 +       .byte   0x8
21276 +       .value  0x166
21277 +       .long   0x515
21278 +       .byte   0x2
21279 +       .byte   0x23
21280 +       .uleb128 0x30
21281 +       .uleb128 0x17
21282 +       .string "cr2"
21283 +       .byte   0x8
21284 +       .value  0x168
21285 +       .long   0x2f
21286 +       .byte   0x2
21287 +       .byte   0x23
21288 +       .uleb128 0x50
21289 +       .uleb128 0x16
21290 +       .long   .LASF148
21291 +       .byte   0x8
21292 +       .value  0x168
21293 +       .long   0x2f
21294 +       .byte   0x2
21295 +       .byte   0x23
21296 +       .uleb128 0x54
21297 +       .uleb128 0x16
21298 +       .long   .LASF149
21299 +       .byte   0x8
21300 +       .value  0x168
21301 +       .long   0x2f
21302 +       .byte   0x2
21303 +       .byte   0x23
21304 +       .uleb128 0x58
21305 +       .uleb128 0x16
21306 +       .long   .LASF150
21307 +       .byte   0x8
21308 +       .value  0x16a
21309 +       .long   0xc05
21310 +       .byte   0x2
21311 +       .byte   0x23
21312 +       .uleb128 0x60
21313 +       .uleb128 0x16
21314 +       .long   .LASF151
21315 +       .byte   0x8
21316 +       .value  0x16c
21317 +       .long   0xdce
21318 +       .byte   0x3
21319 +       .byte   0x23
21320 +       .uleb128 0x260
21321 +       .uleb128 0x16
21322 +       .long   .LASF77
21323 +       .byte   0x8
21324 +       .value  0x16d
21325 +       .long   0x2f
21326 +       .byte   0x3
21327 +       .byte   0x23
21328 +       .uleb128 0x264
21329 +       .uleb128 0x16
21330 +       .long   .LASF152
21331 +       .byte   0x8
21332 +       .value  0x16e
21333 +       .long   0x2f
21334 +       .byte   0x3
21335 +       .byte   0x23
21336 +       .uleb128 0x268
21337 +       .uleb128 0x16
21338 +       .long   .LASF153
21339 +       .byte   0x8
21340 +       .value  0x16e
21341 +       .long   0x2f
21342 +       .byte   0x3
21343 +       .byte   0x23
21344 +       .uleb128 0x26c
21345 +       .uleb128 0x16
21346 +       .long   .LASF154
21347 +       .byte   0x8
21348 +       .value  0x16e
21349 +       .long   0x2f
21350 +       .byte   0x3
21351 +       .byte   0x23
21352 +       .uleb128 0x270
21353 +       .uleb128 0x16
21354 +       .long   .LASF155
21355 +       .byte   0x8
21356 +       .value  0x16f
21357 +       .long   0x77
21358 +       .byte   0x3
21359 +       .byte   0x23
21360 +       .uleb128 0x274
21361 +       .uleb128 0x16
21362 +       .long   .LASF156
21363 +       .byte   0x8
21364 +       .value  0x16f
21365 +       .long   0x77
21366 +       .byte   0x3
21367 +       .byte   0x23
21368 +       .uleb128 0x278
21369 +       .uleb128 0x16
21370 +       .long   .LASF157
21371 +       .byte   0x8
21372 +       .value  0x171
21373 +       .long   0xdd4
21374 +       .byte   0x3
21375 +       .byte   0x23
21376 +       .uleb128 0x27c
21377 +       .uleb128 0x16
21378 +       .long   .LASF158
21379 +       .byte   0x8
21380 +       .value  0x172
21381 +       .long   0x2f
21382 +       .byte   0x3
21383 +       .byte   0x23
21384 +       .uleb128 0x280
21385 +       .uleb128 0x16
21386 +       .long   .LASF159
21387 +       .byte   0x8
21388 +       .value  0x174
21389 +       .long   0x2f
21390 +       .byte   0x3
21391 +       .byte   0x23
21392 +       .uleb128 0x284
21393 +       .byte   0x0
21394 +       .uleb128 0x12
21395 +       .long   0xdce
21396 +       .long   0x942
21397 +       .uleb128 0x13
21398 +       .long   0x28
21399 +       .byte   0x2
21400 +       .byte   0x0
21401 +       .uleb128 0x4
21402 +       .byte   0x4
21403 +       .long   0x525
21404 +       .uleb128 0x4
21405 +       .byte   0x4
21406 +       .long   0x2f
21407 +       .uleb128 0xf
21408 +       .long   0xe81
21409 +       .long   .LASF160
21410 +       .byte   0x3c
21411 +       .byte   0x10
21412 +       .byte   0x1b
21413 +       .uleb128 0xa
21414 +       .long   .LASF161
21415 +       .byte   0x10
21416 +       .byte   0x1c
21417 +       .long   0x15f9
21418 +       .byte   0x2
21419 +       .byte   0x23
21420 +       .uleb128 0x0
21421 +       .uleb128 0xa
21422 +       .long   .LASF162
21423 +       .byte   0x10
21424 +       .byte   0x1d
21425 +       .long   0x1605
21426 +       .byte   0x2
21427 +       .byte   0x23
21428 +       .uleb128 0x4
21429 +       .uleb128 0xa
21430 +       .long   .LASF53
21431 +       .byte   0x10
21432 +       .byte   0x1e
21433 +       .long   0x2f
21434 +       .byte   0x2
21435 +       .byte   0x23
21436 +       .uleb128 0x8
21437 +       .uleb128 0xa
21438 +       .long   .LASF126
21439 +       .byte   0x10
21440 +       .byte   0x1f
21441 +       .long   0x2f
21442 +       .byte   0x2
21443 +       .byte   0x23
21444 +       .uleb128 0xc
21445 +       .uleb128 0xb
21446 +       .string "cpu"
21447 +       .byte   0x10
21448 +       .byte   0x20
21449 +       .long   0x141
21450 +       .byte   0x2
21451 +       .byte   0x23
21452 +       .uleb128 0x10
21453 +       .uleb128 0xa
21454 +       .long   .LASF163
21455 +       .byte   0x10
21456 +       .byte   0x21
21457 +       .long   0x21
21458 +       .byte   0x2
21459 +       .byte   0x23
21460 +       .uleb128 0x14
21461 +       .uleb128 0xa
21462 +       .long   .LASF164
21463 +       .byte   0x10
21464 +       .byte   0x24
21465 +       .long   0xc51
21466 +       .byte   0x2
21467 +       .byte   0x23
21468 +       .uleb128 0x18
21469 +       .uleb128 0xa
21470 +       .long   .LASF165
21471 +       .byte   0x10
21472 +       .byte   0x28
21473 +       .long   0x160b
21474 +       .byte   0x2
21475 +       .byte   0x23
21476 +       .uleb128 0x1c
21477 +       .uleb128 0xa
21478 +       .long   .LASF59
21479 +       .byte   0x10
21480 +       .byte   0x29
21481 +       .long   0x2f7
21482 +       .byte   0x2
21483 +       .byte   0x23
21484 +       .uleb128 0x20
21485 +       .uleb128 0xa
21486 +       .long   .LASF166
21487 +       .byte   0x10
21488 +       .byte   0x2b
21489 +       .long   0x2f
21490 +       .byte   0x2
21491 +       .byte   0x23
21492 +       .uleb128 0x38
21493 +       .uleb128 0xa
21494 +       .long   .LASF167
21495 +       .byte   0x10
21496 +       .byte   0x2e
21497 +       .long   0x160d
21498 +       .byte   0x2
21499 +       .byte   0x23
21500 +       .uleb128 0x3c
21501 +       .byte   0x0
21502 +       .uleb128 0x1a
21503 +       .long   0x15f9
21504 +       .long   .LASF168
21505 +       .value  0x510
21506 +       .byte   0x11
21507 +       .byte   0x13
21508 +       .uleb128 0x16
21509 +       .long   .LASF169
21510 +       .byte   0xb
21511 +       .value  0x336
21512 +       .long   0x43e6
21513 +       .byte   0x2
21514 +       .byte   0x23
21515 +       .uleb128 0x0
21516 +       .uleb128 0x16
21517 +       .long   .LASF170
21518 +       .byte   0xb
21519 +       .value  0x337
21520 +       .long   0x160b
21521 +       .byte   0x2
21522 +       .byte   0x23
21523 +       .uleb128 0x4
21524 +       .uleb128 0x16
21525 +       .long   .LASF171
21526 +       .byte   0xb
21527 +       .value  0x338
21528 +       .long   0x16c4
21529 +       .byte   0x2
21530 +       .byte   0x23
21531 +       .uleb128 0x8
21532 +       .uleb128 0x16
21533 +       .long   .LASF53
21534 +       .byte   0xb
21535 +       .value  0x339
21536 +       .long   0x77
21537 +       .byte   0x2
21538 +       .byte   0x23
21539 +       .uleb128 0xc
21540 +       .uleb128 0x16
21541 +       .long   .LASF172
21542 +       .byte   0xb
21543 +       .value  0x33a
21544 +       .long   0x77
21545 +       .byte   0x2
21546 +       .byte   0x23
21547 +       .uleb128 0x10
21548 +       .uleb128 0x16
21549 +       .long   .LASF173
21550 +       .byte   0xb
21551 +       .value  0x33c
21552 +       .long   0x21
21553 +       .byte   0x2
21554 +       .byte   0x23
21555 +       .uleb128 0x14
21556 +       .uleb128 0x16
21557 +       .long   .LASF174
21558 +       .byte   0xb
21559 +       .value  0x343
21560 +       .long   0x21
21561 +       .byte   0x2
21562 +       .byte   0x23
21563 +       .uleb128 0x18
21564 +       .uleb128 0x16
21565 +       .long   .LASF175
21566 +       .byte   0xb
21567 +       .value  0x344
21568 +       .long   0x21
21569 +       .byte   0x2
21570 +       .byte   0x23
21571 +       .uleb128 0x1c
21572 +       .uleb128 0x16
21573 +       .long   .LASF176
21574 +       .byte   0xb
21575 +       .value  0x344
21576 +       .long   0x21
21577 +       .byte   0x2
21578 +       .byte   0x23
21579 +       .uleb128 0x20
21580 +       .uleb128 0x16
21581 +       .long   .LASF177
21582 +       .byte   0xb
21583 +       .value  0x344
21584 +       .long   0x21
21585 +       .byte   0x2
21586 +       .byte   0x23
21587 +       .uleb128 0x24
21588 +       .uleb128 0x16
21589 +       .long   .LASF178
21590 +       .byte   0xb
21591 +       .value  0x345
21592 +       .long   0x17bc
21593 +       .byte   0x2
21594 +       .byte   0x23
21595 +       .uleb128 0x28
21596 +       .uleb128 0x16
21597 +       .long   .LASF179
21598 +       .byte   0xb
21599 +       .value  0x346
21600 +       .long   0x43f1
21601 +       .byte   0x2
21602 +       .byte   0x23
21603 +       .uleb128 0x30
21604 +       .uleb128 0x16
21605 +       .long   .LASF180
21606 +       .byte   0xb
21607 +       .value  0x348
21608 +       .long   0x53
21609 +       .byte   0x2
21610 +       .byte   0x23
21611 +       .uleb128 0x34
21612 +       .uleb128 0x16
21613 +       .long   .LASF181
21614 +       .byte   0xb
21615 +       .value  0x34c
21616 +       .long   0x2f
21617 +       .byte   0x2
21618 +       .byte   0x23
21619 +       .uleb128 0x38
21620 +       .uleb128 0x16
21621 +       .long   .LASF182
21622 +       .byte   0xb
21623 +       .value  0x34d
21624 +       .long   0x162
21625 +       .byte   0x2
21626 +       .byte   0x23
21627 +       .uleb128 0x3c
21628 +       .uleb128 0x16
21629 +       .long   .LASF183
21630 +       .byte   0xb
21631 +       .value  0x34d
21632 +       .long   0x162
21633 +       .byte   0x2
21634 +       .byte   0x23
21635 +       .uleb128 0x44
21636 +       .uleb128 0x16
21637 +       .long   .LASF184
21638 +       .byte   0xb
21639 +       .value  0x34f
21640 +       .long   0x2f
21641 +       .byte   0x2
21642 +       .byte   0x23
21643 +       .uleb128 0x4c
21644 +       .uleb128 0x16
21645 +       .long   .LASF185
21646 +       .byte   0xb
21647 +       .value  0x34f
21648 +       .long   0x2f
21649 +       .byte   0x2
21650 +       .byte   0x23
21651 +       .uleb128 0x50
21652 +       .uleb128 0x16
21653 +       .long   .LASF186
21654 +       .byte   0xb
21655 +       .value  0x352
21656 +       .long   0x162
21657 +       .byte   0x2
21658 +       .byte   0x23
21659 +       .uleb128 0x54
21660 +       .uleb128 0x16
21661 +       .long   .LASF187
21662 +       .byte   0xb
21663 +       .value  0x353
21664 +       .long   0x43c0
21665 +       .byte   0x2
21666 +       .byte   0x23
21667 +       .uleb128 0x5c
21668 +       .uleb128 0x16
21669 +       .long   .LASF188
21670 +       .byte   0xb
21671 +       .value  0x355
21672 +       .long   0x77
21673 +       .byte   0x2
21674 +       .byte   0x23
21675 +       .uleb128 0x60
21676 +       .uleb128 0x16
21677 +       .long   .LASF189
21678 +       .byte   0xb
21679 +       .value  0x356
21680 +       .long   0x923
21681 +       .byte   0x2
21682 +       .byte   0x23
21683 +       .uleb128 0x64
21684 +       .uleb128 0x16
21685 +       .long   .LASF190
21686 +       .byte   0xb
21687 +       .value  0x357
21688 +       .long   0x77
21689 +       .byte   0x2
21690 +       .byte   0x23
21691 +       .uleb128 0x68
21692 +       .uleb128 0x16
21693 +       .long   .LASF191
21694 +       .byte   0xb
21695 +       .value  0x357
21696 +       .long   0x77
21697 +       .byte   0x2
21698 +       .byte   0x23
21699 +       .uleb128 0x6c
21700 +       .uleb128 0x16
21701 +       .long   .LASF192
21702 +       .byte   0xb
21703 +       .value  0x35d
21704 +       .long   0x17bc
21705 +       .byte   0x2
21706 +       .byte   0x23
21707 +       .uleb128 0x70
21708 +       .uleb128 0x16
21709 +       .long   .LASF193
21710 +       .byte   0xb
21711 +       .value  0x362
21712 +       .long   0x17bc
21713 +       .byte   0x2
21714 +       .byte   0x23
21715 +       .uleb128 0x78
21716 +       .uleb128 0x16
21717 +       .long   .LASF194
21718 +       .byte   0xb
21719 +       .value  0x363
21720 +       .long   0x17bc
21721 +       .byte   0x3
21722 +       .byte   0x23
21723 +       .uleb128 0x80
21724 +       .uleb128 0x17
21725 +       .string "mm"
21726 +       .byte   0xb
21727 +       .value  0x365
21728 +       .long   0x36ad
21729 +       .byte   0x3
21730 +       .byte   0x23
21731 +       .uleb128 0x88
21732 +       .uleb128 0x16
21733 +       .long   .LASF195
21734 +       .byte   0xb
21735 +       .value  0x365
21736 +       .long   0x36ad
21737 +       .byte   0x3
21738 +       .byte   0x23
21739 +       .uleb128 0x8c
21740 +       .uleb128 0x16
21741 +       .long   .LASF196
21742 +       .byte   0xb
21743 +       .value  0x368
21744 +       .long   0x43fd
21745 +       .byte   0x3
21746 +       .byte   0x23
21747 +       .uleb128 0x90
21748 +       .uleb128 0x16
21749 +       .long   .LASF197
21750 +       .byte   0xb
21751 +       .value  0x369
21752 +       .long   0x21
21753 +       .byte   0x3
21754 +       .byte   0x23
21755 +       .uleb128 0x94
21756 +       .uleb128 0x16
21757 +       .long   .LASF198
21758 +       .byte   0xb
21759 +       .value  0x36a
21760 +       .long   0x21
21761 +       .byte   0x3
21762 +       .byte   0x23
21763 +       .uleb128 0x98
21764 +       .uleb128 0x16
21765 +       .long   .LASF199
21766 +       .byte   0xb
21767 +       .value  0x36a
21768 +       .long   0x21
21769 +       .byte   0x3
21770 +       .byte   0x23
21771 +       .uleb128 0x9c
21772 +       .uleb128 0x16
21773 +       .long   .LASF200
21774 +       .byte   0xb
21775 +       .value  0x36b
21776 +       .long   0x21
21777 +       .byte   0x3
21778 +       .byte   0x23
21779 +       .uleb128 0xa0
21780 +       .uleb128 0x16
21781 +       .long   .LASF201
21782 +       .byte   0xb
21783 +       .value  0x36d
21784 +       .long   0x77
21785 +       .byte   0x3
21786 +       .byte   0x23
21787 +       .uleb128 0xa4
21788 +       .uleb128 0x20
21789 +       .long   .LASF202
21790 +       .byte   0xb
21791 +       .value  0x36e
21792 +       .long   0x77
21793 +       .byte   0x4
21794 +       .byte   0x1
21795 +       .byte   0x1f
21796 +       .byte   0x3
21797 +       .byte   0x23
21798 +       .uleb128 0xa8
21799 +       .uleb128 0x17
21800 +       .string "pid"
21801 +       .byte   0xb
21802 +       .value  0x36f
21803 +       .long   0x1b5
21804 +       .byte   0x3
21805 +       .byte   0x23
21806 +       .uleb128 0xac
21807 +       .uleb128 0x16
21808 +       .long   .LASF203
21809 +       .byte   0xb
21810 +       .value  0x370
21811 +       .long   0x1b5
21812 +       .byte   0x3
21813 +       .byte   0x23
21814 +       .uleb128 0xb0
21815 +       .uleb128 0x16
21816 +       .long   .LASF204
21817 +       .byte   0xb
21818 +       .value  0x37b
21819 +       .long   0x15f9
21820 +       .byte   0x3
21821 +       .byte   0x23
21822 +       .uleb128 0xb4
21823 +       .uleb128 0x16
21824 +       .long   .LASF205
21825 +       .byte   0xb
21826 +       .value  0x37c
21827 +       .long   0x15f9
21828 +       .byte   0x3
21829 +       .byte   0x23
21830 +       .uleb128 0xb8
21831 +       .uleb128 0x16
21832 +       .long   .LASF206
21833 +       .byte   0xb
21834 +       .value  0x381
21835 +       .long   0x17bc
21836 +       .byte   0x3
21837 +       .byte   0x23
21838 +       .uleb128 0xbc
21839 +       .uleb128 0x16
21840 +       .long   .LASF207
21841 +       .byte   0xb
21842 +       .value  0x382
21843 +       .long   0x17bc
21844 +       .byte   0x3
21845 +       .byte   0x23
21846 +       .uleb128 0xc4
21847 +       .uleb128 0x16
21848 +       .long   .LASF208
21849 +       .byte   0xb
21850 +       .value  0x383
21851 +       .long   0x15f9
21852 +       .byte   0x3
21853 +       .byte   0x23
21854 +       .uleb128 0xcc
21855 +       .uleb128 0x16
21856 +       .long   .LASF209
21857 +       .byte   0xb
21858 +       .value  0x386
21859 +       .long   0x4403
21860 +       .byte   0x3
21861 +       .byte   0x23
21862 +       .uleb128 0xd0
21863 +       .uleb128 0x16
21864 +       .long   .LASF210
21865 +       .byte   0xb
21866 +       .value  0x387
21867 +       .long   0x17bc
21868 +       .byte   0x3
21869 +       .byte   0x23
21870 +       .uleb128 0xf4
21871 +       .uleb128 0x16
21872 +       .long   .LASF211
21873 +       .byte   0xb
21874 +       .value  0x389
21875 +       .long   0x3ff9
21876 +       .byte   0x3
21877 +       .byte   0x23
21878 +       .uleb128 0xfc
21879 +       .uleb128 0x16
21880 +       .long   .LASF212
21881 +       .byte   0xb
21882 +       .value  0x38a
21883 +       .long   0x4413
21884 +       .byte   0x3
21885 +       .byte   0x23
21886 +       .uleb128 0x100
21887 +       .uleb128 0x16
21888 +       .long   .LASF213
21889 +       .byte   0xb
21890 +       .value  0x38b
21891 +       .long   0x4413
21892 +       .byte   0x3
21893 +       .byte   0x23
21894 +       .uleb128 0x104
21895 +       .uleb128 0x16
21896 +       .long   .LASF214
21897 +       .byte   0xb
21898 +       .value  0x38d
21899 +       .long   0x77
21900 +       .byte   0x3
21901 +       .byte   0x23
21902 +       .uleb128 0x108
21903 +       .uleb128 0x16
21904 +       .long   .LASF215
21905 +       .byte   0xb
21906 +       .value  0x38e
21907 +       .long   0x19b4
21908 +       .byte   0x3
21909 +       .byte   0x23
21910 +       .uleb128 0x10c
21911 +       .uleb128 0x16
21912 +       .long   .LASF216
21913 +       .byte   0xb
21914 +       .value  0x38e
21915 +       .long   0x19b4
21916 +       .byte   0x3
21917 +       .byte   0x23
21918 +       .uleb128 0x110
21919 +       .uleb128 0x16
21920 +       .long   .LASF217
21921 +       .byte   0xb
21922 +       .value  0x38f
21923 +       .long   0x2f
21924 +       .byte   0x3
21925 +       .byte   0x23
21926 +       .uleb128 0x114
21927 +       .uleb128 0x16
21928 +       .long   .LASF218
21929 +       .byte   0xb
21930 +       .value  0x38f
21931 +       .long   0x2f
21932 +       .byte   0x3
21933 +       .byte   0x23
21934 +       .uleb128 0x118
21935 +       .uleb128 0x16
21936 +       .long   .LASF219
21937 +       .byte   0xb
21938 +       .value  0x390
21939 +       .long   0x173b
21940 +       .byte   0x3
21941 +       .byte   0x23
21942 +       .uleb128 0x11c
21943 +       .uleb128 0x16
21944 +       .long   .LASF220
21945 +       .byte   0xb
21946 +       .value  0x392
21947 +       .long   0x2f
21948 +       .byte   0x3
21949 +       .byte   0x23
21950 +       .uleb128 0x124
21951 +       .uleb128 0x16
21952 +       .long   .LASF221
21953 +       .byte   0xb
21954 +       .value  0x392
21955 +       .long   0x2f
21956 +       .byte   0x3
21957 +       .byte   0x23
21958 +       .uleb128 0x128
21959 +       .uleb128 0x16
21960 +       .long   .LASF222
21961 +       .byte   0xb
21962 +       .value  0x394
21963 +       .long   0x19b4
21964 +       .byte   0x3
21965 +       .byte   0x23
21966 +       .uleb128 0x12c
21967 +       .uleb128 0x16
21968 +       .long   .LASF223
21969 +       .byte   0xb
21970 +       .value  0x394
21971 +       .long   0x19b4
21972 +       .byte   0x3
21973 +       .byte   0x23
21974 +       .uleb128 0x130
21975 +       .uleb128 0x16
21976 +       .long   .LASF224
21977 +       .byte   0xb
21978 +       .value  0x395
21979 +       .long   0x162
21980 +       .byte   0x3
21981 +       .byte   0x23
21982 +       .uleb128 0x134
21983 +       .uleb128 0x16
21984 +       .long   .LASF225
21985 +       .byte   0xb
21986 +       .value  0x396
21987 +       .long   0x4330
21988 +       .byte   0x3
21989 +       .byte   0x23
21990 +       .uleb128 0x13c
21991 +       .uleb128 0x17
21992 +       .string "uid"
21993 +       .byte   0xb
21994 +       .value  0x399
21995 +       .long   0x1dd
21996 +       .byte   0x3
21997 +       .byte   0x23
21998 +       .uleb128 0x154
21999 +       .uleb128 0x16
22000 +       .long   .LASF226
22001 +       .byte   0xb
22002 +       .value  0x399
22003 +       .long   0x1dd
22004 +       .byte   0x3
22005 +       .byte   0x23
22006 +       .uleb128 0x158
22007 +       .uleb128 0x16
22008 +       .long   .LASF227
22009 +       .byte   0xb
22010 +       .value  0x399
22011 +       .long   0x1dd
22012 +       .byte   0x3
22013 +       .byte   0x23
22014 +       .uleb128 0x15c
22015 +       .uleb128 0x16
22016 +       .long   .LASF228
22017 +       .byte   0xb
22018 +       .value  0x399
22019 +       .long   0x1dd
22020 +       .byte   0x3
22021 +       .byte   0x23
22022 +       .uleb128 0x160
22023 +       .uleb128 0x17
22024 +       .string "gid"
22025 +       .byte   0xb
22026 +       .value  0x39a
22027 +       .long   0x1e8
22028 +       .byte   0x3
22029 +       .byte   0x23
22030 +       .uleb128 0x164
22031 +       .uleb128 0x16
22032 +       .long   .LASF229
22033 +       .byte   0xb
22034 +       .value  0x39a
22035 +       .long   0x1e8
22036 +       .byte   0x3
22037 +       .byte   0x23
22038 +       .uleb128 0x168
22039 +       .uleb128 0x16
22040 +       .long   .LASF230
22041 +       .byte   0xb
22042 +       .value  0x39a
22043 +       .long   0x1e8
22044 +       .byte   0x3
22045 +       .byte   0x23
22046 +       .uleb128 0x16c
22047 +       .uleb128 0x16
22048 +       .long   .LASF231
22049 +       .byte   0xb
22050 +       .value  0x39a
22051 +       .long   0x1e8
22052 +       .byte   0x3
22053 +       .byte   0x23
22054 +       .uleb128 0x170
22055 +       .uleb128 0x16
22056 +       .long   .LASF232
22057 +       .byte   0xb
22058 +       .value  0x39b
22059 +       .long   0x4419
22060 +       .byte   0x3
22061 +       .byte   0x23
22062 +       .uleb128 0x174
22063 +       .uleb128 0x16
22064 +       .long   .LASF233
22065 +       .byte   0xb
22066 +       .value  0x39c
22067 +       .long   0x16da
22068 +       .byte   0x3
22069 +       .byte   0x23
22070 +       .uleb128 0x178
22071 +       .uleb128 0x16
22072 +       .long   .LASF234
22073 +       .byte   0xb
22074 +       .value  0x39c
22075 +       .long   0x16da
22076 +       .byte   0x3
22077 +       .byte   0x23
22078 +       .uleb128 0x17c
22079 +       .uleb128 0x16
22080 +       .long   .LASF235
22081 +       .byte   0xb
22082 +       .value  0x39c
22083 +       .long   0x16da
22084 +       .byte   0x3
22085 +       .byte   0x23
22086 +       .uleb128 0x180
22087 +       .uleb128 0x20
22088 +       .long   .LASF236
22089 +       .byte   0xb
22090 +       .value  0x39d
22091 +       .long   0x77
22092 +       .byte   0x4
22093 +       .byte   0x1
22094 +       .byte   0x1f
22095 +       .byte   0x3
22096 +       .byte   0x23
22097 +       .uleb128 0x184
22098 +       .uleb128 0x16
22099 +       .long   .LASF237
22100 +       .byte   0xb
22101 +       .value  0x39e
22102 +       .long   0x2729
22103 +       .byte   0x3
22104 +       .byte   0x23
22105 +       .uleb128 0x188
22106 +       .uleb128 0x16
22107 +       .long   .LASF238
22108 +       .byte   0xb
22109 +       .value  0x3ac
22110 +       .long   0x112
22111 +       .byte   0x3
22112 +       .byte   0x23
22113 +       .uleb128 0x18c
22114 +       .uleb128 0x16
22115 +       .long   .LASF239
22116 +       .byte   0xb
22117 +       .value  0x3ad
22118 +       .long   0x21
22119 +       .byte   0x3
22120 +       .byte   0x23
22121 +       .uleb128 0x190
22122 +       .uleb128 0x16
22123 +       .long   .LASF240
22124 +       .byte   0xb
22125 +       .value  0x3ae
22126 +       .long   0x967
22127 +       .byte   0x3
22128 +       .byte   0x23
22129 +       .uleb128 0x194
22130 +       .uleb128 0x16
22131 +       .long   .LASF241
22132 +       .byte   0xb
22133 +       .value  0x3b3
22134 +       .long   0x21
22135 +       .byte   0x3
22136 +       .byte   0x23
22137 +       .uleb128 0x1a4
22138 +       .uleb128 0x16
22139 +       .long   .LASF242
22140 +       .byte   0xb
22141 +       .value  0x3b3
22142 +       .long   0x21
22143 +       .byte   0x3
22144 +       .byte   0x23
22145 +       .uleb128 0x1a8
22146 +       .uleb128 0x16
22147 +       .long   .LASF243
22148 +       .byte   0xb
22149 +       .value  0x3b6
22150 +       .long   0x2387
22151 +       .byte   0x3
22152 +       .byte   0x23
22153 +       .uleb128 0x1ac
22154 +       .uleb128 0x16
22155 +       .long   .LASF244
22156 +       .byte   0xb
22157 +       .value  0x3b9
22158 +       .long   0xc5d
22159 +       .byte   0x3
22160 +       .byte   0x23
22161 +       .uleb128 0x1b0
22162 +       .uleb128 0x17
22163 +       .string "fs"
22164 +       .byte   0xb
22165 +       .value  0x3bb
22166 +       .long   0x441f
22167 +       .byte   0x3
22168 +       .byte   0x23
22169 +       .uleb128 0x440
22170 +       .uleb128 0x16
22171 +       .long   .LASF245
22172 +       .byte   0xb
22173 +       .value  0x3bd
22174 +       .long   0x442b
22175 +       .byte   0x3
22176 +       .byte   0x23
22177 +       .uleb128 0x444
22178 +       .uleb128 0x16
22179 +       .long   .LASF246
22180 +       .byte   0xb
22181 +       .value  0x3bf
22182 +       .long   0x4437
22183 +       .byte   0x3
22184 +       .byte   0x23
22185 +       .uleb128 0x448
22186 +       .uleb128 0x16
22187 +       .long   .LASF247
22188 +       .byte   0xb
22189 +       .value  0x3c1
22190 +       .long   0x443d
22191 +       .byte   0x3
22192 +       .byte   0x23
22193 +       .uleb128 0x44c
22194 +       .uleb128 0x16
22195 +       .long   .LASF248
22196 +       .byte   0xb
22197 +       .value  0x3c2
22198 +       .long   0x4443
22199 +       .byte   0x3
22200 +       .byte   0x23
22201 +       .uleb128 0x450
22202 +       .uleb128 0x16
22203 +       .long   .LASF249
22204 +       .byte   0xb
22205 +       .value  0x3c4
22206 +       .long   0x23bf
22207 +       .byte   0x3
22208 +       .byte   0x23
22209 +       .uleb128 0x454
22210 +       .uleb128 0x16
22211 +       .long   .LASF250
22212 +       .byte   0xb
22213 +       .value  0x3c4
22214 +       .long   0x23bf
22215 +       .byte   0x3
22216 +       .byte   0x23
22217 +       .uleb128 0x45c
22218 +       .uleb128 0x16
22219 +       .long   .LASF251
22220 +       .byte   0xb
22221 +       .value  0x3c5
22222 +       .long   0x23bf
22223 +       .byte   0x3
22224 +       .byte   0x23
22225 +       .uleb128 0x464
22226 +       .uleb128 0x16
22227 +       .long   .LASF252
22228 +       .byte   0xb
22229 +       .value  0x3c6
22230 +       .long   0x272f
22231 +       .byte   0x3
22232 +       .byte   0x23
22233 +       .uleb128 0x46c
22234 +       .uleb128 0x16
22235 +       .long   .LASF253
22236 +       .byte   0xb
22237 +       .value  0x3c8
22238 +       .long   0x2f
22239 +       .byte   0x3
22240 +       .byte   0x23
22241 +       .uleb128 0x47c
22242 +       .uleb128 0x16
22243 +       .long   .LASF254
22244 +       .byte   0xb
22245 +       .value  0x3c9
22246 +       .long   0x1fe
22247 +       .byte   0x3
22248 +       .byte   0x23
22249 +       .uleb128 0x480
22250 +       .uleb128 0x16
22251 +       .long   .LASF255
22252 +       .byte   0xb
22253 +       .value  0x3ca
22254 +       .long   0x4459
22255 +       .byte   0x3
22256 +       .byte   0x23
22257 +       .uleb128 0x484
22258 +       .uleb128 0x16
22259 +       .long   .LASF256
22260 +       .byte   0xb
22261 +       .value  0x3cb
22262 +       .long   0x160b
22263 +       .byte   0x3
22264 +       .byte   0x23
22265 +       .uleb128 0x488
22266 +       .uleb128 0x16
22267 +       .long   .LASF257
22268 +       .byte   0xb
22269 +       .value  0x3cc
22270 +       .long   0x445f
22271 +       .byte   0x3
22272 +       .byte   0x23
22273 +       .uleb128 0x48c
22274 +       .uleb128 0x16
22275 +       .long   .LASF258
22276 +       .byte   0xb
22277 +       .value  0x3ce
22278 +       .long   0x160b
22279 +       .byte   0x3
22280 +       .byte   0x23
22281 +       .uleb128 0x490
22282 +       .uleb128 0x16
22283 +       .long   .LASF259
22284 +       .byte   0xb
22285 +       .value  0x3cf
22286 +       .long   0x446b
22287 +       .byte   0x3
22288 +       .byte   0x23
22289 +       .uleb128 0x494
22290 +       .uleb128 0x16
22291 +       .long   .LASF260
22292 +       .byte   0xb
22293 +       .value  0x3d0
22294 +       .long   0x308d
22295 +       .byte   0x3
22296 +       .byte   0x23
22297 +       .uleb128 0x498
22298 +       .uleb128 0x16
22299 +       .long   .LASF261
22300 +       .byte   0xb
22301 +       .value  0x3d3
22302 +       .long   0x173
22303 +       .byte   0x3
22304 +       .byte   0x23
22305 +       .uleb128 0x49c
22306 +       .uleb128 0x16
22307 +       .long   .LASF262
22308 +       .byte   0xb
22309 +       .value  0x3d4
22310 +       .long   0x173
22311 +       .byte   0x3
22312 +       .byte   0x23
22313 +       .uleb128 0x4a0
22314 +       .uleb128 0x16
22315 +       .long   .LASF263
22316 +       .byte   0xb
22317 +       .value  0x3d6
22318 +       .long   0x1680
22319 +       .byte   0x3
22320 +       .byte   0x23
22321 +       .uleb128 0x4a4
22322 +       .uleb128 0x16
22323 +       .long   .LASF264
22324 +       .byte   0xb
22325 +       .value  0x3d9
22326 +       .long   0x1680
22327 +       .byte   0x3
22328 +       .byte   0x23
22329 +       .uleb128 0x4a8
22330 +       .uleb128 0x16
22331 +       .long   .LASF265
22332 +       .byte   0xb
22333 +       .value  0x3dd
22334 +       .long   0x36b3
22335 +       .byte   0x3
22336 +       .byte   0x23
22337 +       .uleb128 0x4ac
22338 +       .uleb128 0x16
22339 +       .long   .LASF266
22340 +       .byte   0xb
22341 +       .value  0x3df
22342 +       .long   0x4477
22343 +       .byte   0x3
22344 +       .byte   0x23
22345 +       .uleb128 0x4bc
22346 +       .uleb128 0x16
22347 +       .long   .LASF267
22348 +       .byte   0xb
22349 +       .value  0x3fe
22350 +       .long   0x160b
22351 +       .byte   0x3
22352 +       .byte   0x23
22353 +       .uleb128 0x4c0
22354 +       .uleb128 0x16
22355 +       .long   .LASF268
22356 +       .byte   0xb
22357 +       .value  0x401
22358 +       .long   0x4483
22359 +       .byte   0x3
22360 +       .byte   0x23
22361 +       .uleb128 0x4c4
22362 +       .uleb128 0x16
22363 +       .long   .LASF269
22364 +       .byte   0xb
22365 +       .value  0x401
22366 +       .long   0x4489
22367 +       .byte   0x3
22368 +       .byte   0x23
22369 +       .uleb128 0x4c8
22370 +       .uleb128 0x16
22371 +       .long   .LASF270
22372 +       .byte   0xb
22373 +       .value  0x404
22374 +       .long   0x44ab
22375 +       .byte   0x3
22376 +       .byte   0x23
22377 +       .uleb128 0x4cc
22378 +       .uleb128 0x16
22379 +       .long   .LASF271
22380 +       .byte   0xb
22381 +       .value  0x406
22382 +       .long   0x4521
22383 +       .byte   0x3
22384 +       .byte   0x23
22385 +       .uleb128 0x4d0
22386 +       .uleb128 0x16
22387 +       .long   .LASF272
22388 +       .byte   0xb
22389 +       .value  0x408
22390 +       .long   0x452d
22391 +       .byte   0x3
22392 +       .byte   0x23
22393 +       .uleb128 0x4d4
22394 +       .uleb128 0x16
22395 +       .long   .LASF273
22396 +       .byte   0xb
22397 +       .value  0x40a
22398 +       .long   0x2f
22399 +       .byte   0x3
22400 +       .byte   0x23
22401 +       .uleb128 0x4d8
22402 +       .uleb128 0x16
22403 +       .long   .LASF274
22404 +       .byte   0xb
22405 +       .value  0x40b
22406 +       .long   0x4533
22407 +       .byte   0x3
22408 +       .byte   0x23
22409 +       .uleb128 0x4dc
22410 +       .uleb128 0x16
22411 +       .long   .LASF275
22412 +       .byte   0xb
22413 +       .value  0x412
22414 +       .long   0x18c0
22415 +       .byte   0x3
22416 +       .byte   0x23
22417 +       .uleb128 0x4e0
22418 +       .uleb128 0x16
22419 +       .long   .LASF276
22420 +       .byte   0xb
22421 +       .value  0x417
22422 +       .long   0x3979
22423 +       .byte   0x3
22424 +       .byte   0x23
22425 +       .uleb128 0x4e4
22426 +       .uleb128 0x16
22427 +       .long   .LASF277
22428 +       .byte   0xb
22429 +       .value  0x427
22430 +       .long   0x4539
22431 +       .byte   0x3
22432 +       .byte   0x23
22433 +       .uleb128 0x4e4
22434 +       .uleb128 0x16
22435 +       .long   .LASF278
22436 +       .byte   0xb
22437 +       .value  0x42b
22438 +       .long   0x17bc
22439 +       .byte   0x3
22440 +       .byte   0x23
22441 +       .uleb128 0x4e8
22442 +       .uleb128 0x16
22443 +       .long   .LASF279
22444 +       .byte   0xb
22445 +       .value  0x42c
22446 +       .long   0x4545
22447 +       .byte   0x3
22448 +       .byte   0x23
22449 +       .uleb128 0x4f0
22450 +       .uleb128 0x16
22451 +       .long   .LASF280
22452 +       .byte   0xb
22453 +       .value  0x42e
22454 +       .long   0x16c4
22455 +       .byte   0x3
22456 +       .byte   0x23
22457 +       .uleb128 0x4f4
22458 +       .uleb128 0x17
22459 +       .string "rcu"
22460 +       .byte   0xb
22461 +       .value  0x42f
22462 +       .long   0x2ea8
22463 +       .byte   0x3
22464 +       .byte   0x23
22465 +       .uleb128 0x4f8
22466 +       .uleb128 0x16
22467 +       .long   .LASF281
22468 +       .byte   0xb
22469 +       .value  0x434
22470 +       .long   0x4551
22471 +       .byte   0x3
22472 +       .byte   0x23
22473 +       .uleb128 0x500
22474 +       .byte   0x0
22475 +       .uleb128 0x4
22476 +       .byte   0x4
22477 +       .long   0xe81
22478 +       .uleb128 0x21
22479 +       .long   .LASF162
22480 +       .byte   0x1
22481 +       .uleb128 0x4
22482 +       .byte   0x4
22483 +       .long   0x15ff
22484 +       .uleb128 0x22
22485 +       .byte   0x4
22486 +       .uleb128 0x12
22487 +       .long   0x161c
22488 +       .long   0x107
22489 +       .uleb128 0x23
22490 +       .long   0x28
22491 +       .byte   0x0
22492 +       .uleb128 0x24
22493 +       .long   .LASF282
22494 +       .byte   0x0
22495 +       .byte   0x3a
22496 +       .value  0x116
22497 +       .uleb128 0x9
22498 +       .long   0x163c
22499 +       .byte   0x4
22500 +       .byte   0xe
22501 +       .byte   0x8
22502 +       .uleb128 0xa
22503 +       .long   .LASF283
22504 +       .byte   0xe
22505 +       .byte   0x9
22506 +       .long   0x77
22507 +       .byte   0x2
22508 +       .byte   0x23
22509 +       .uleb128 0x0
22510 +       .byte   0x0
22511 +       .uleb128 0x7
22512 +       .long   .LASF284
22513 +       .byte   0xe
22514 +       .byte   0xa
22515 +       .long   0x1625
22516 +       .uleb128 0x9
22517 +       .long   0x165e
22518 +       .byte   0x4
22519 +       .byte   0xe
22520 +       .byte   0xe
22521 +       .uleb128 0xa
22522 +       .long   .LASF285
22523 +       .byte   0xe
22524 +       .byte   0xf
22525 +       .long   0x77
22526 +       .byte   0x2
22527 +       .byte   0x23
22528 +       .uleb128 0x0
22529 +       .byte   0x0
22530 +       .uleb128 0x7
22531 +       .long   .LASF286
22532 +       .byte   0xe
22533 +       .byte   0x10
22534 +       .long   0x1647
22535 +       .uleb128 0x9
22536 +       .long   0x1680
22537 +       .byte   0x4
22538 +       .byte   0xd
22539 +       .byte   0x14
22540 +       .uleb128 0xa
22541 +       .long   .LASF287
22542 +       .byte   0xd
22543 +       .byte   0x15
22544 +       .long   0x163c
22545 +       .byte   0x2
22546 +       .byte   0x23
22547 +       .uleb128 0x0
22548 +       .byte   0x0
22549 +       .uleb128 0x7
22550 +       .long   .LASF288
22551 +       .byte   0xd
22552 +       .byte   0x20
22553 +       .long   0x1669
22554 +       .uleb128 0x9
22555 +       .long   0x16a2
22556 +       .byte   0x4
22557 +       .byte   0xd
22558 +       .byte   0x24
22559 +       .uleb128 0xa
22560 +       .long   .LASF287
22561 +       .byte   0xd
22562 +       .byte   0x25
22563 +       .long   0x165e
22564 +       .byte   0x2
22565 +       .byte   0x23
22566 +       .uleb128 0x0
22567 +       .byte   0x0
22568 +       .uleb128 0x7
22569 +       .long   .LASF289
22570 +       .byte   0xd
22571 +       .byte   0x30
22572 +       .long   0x168b
22573 +       .uleb128 0x9
22574 +       .long   0x16c4
22575 +       .byte   0x4
22576 +       .byte   0x12
22577 +       .byte   0x12
22578 +       .uleb128 0xa
22579 +       .long   .LASF290
22580 +       .byte   0x12
22581 +       .byte   0x12
22582 +       .long   0x21
22583 +       .byte   0x2
22584 +       .byte   0x23
22585 +       .uleb128 0x0
22586 +       .byte   0x0
22587 +       .uleb128 0x7
22588 +       .long   .LASF291
22589 +       .byte   0x12
22590 +       .byte   0x12
22591 +       .long   0x16ad
22592 +       .uleb128 0x7
22593 +       .long   .LASF292
22594 +       .byte   0x31
22595 +       .byte   0x8d
22596 +       .long   0x16c4
22597 +       .uleb128 0x7
22598 +       .long   .LASF293
22599 +       .byte   0x11
22600 +       .byte   0x3c
22601 +       .long   0x141
22602 +       .uleb128 0x9
22603 +       .long   0x170a
22604 +       .byte   0x8
22605 +       .byte   0x7
22606 +       .byte   0x20
22607 +       .uleb128 0xa
22608 +       .long   .LASF294
22609 +       .byte   0x7
22610 +       .byte   0x21
22611 +       .long   0x77
22612 +       .byte   0x2
22613 +       .byte   0x23
22614 +       .uleb128 0x0
22615 +       .uleb128 0xa
22616 +       .long   .LASF285
22617 +       .byte   0x7
22618 +       .byte   0x22
22619 +       .long   0x1680
22620 +       .byte   0x2
22621 +       .byte   0x23
22622 +       .uleb128 0x4
22623 +       .byte   0x0
22624 +       .uleb128 0x7
22625 +       .long   .LASF295
22626 +       .byte   0x7
22627 +       .byte   0x23
22628 +       .long   0x16e5
22629 +       .uleb128 0xf
22630 +       .long   0x1730
22631 +       .long   .LASF296
22632 +       .byte   0x4
22633 +       .byte   0x7
22634 +       .byte   0x73
22635 +       .uleb128 0xa
22636 +       .long   .LASF294
22637 +       .byte   0x7
22638 +       .byte   0x74
22639 +       .long   0x77
22640 +       .byte   0x2
22641 +       .byte   0x23
22642 +       .uleb128 0x0
22643 +       .byte   0x0
22644 +       .uleb128 0x7
22645 +       .long   .LASF297
22646 +       .byte   0x7
22647 +       .byte   0x75
22648 +       .long   0x1715
22649 +       .uleb128 0xf
22650 +       .long   0x1764
22651 +       .long   .LASF298
22652 +       .byte   0x8
22653 +       .byte   0x1f
22654 +       .byte   0xc
22655 +       .uleb128 0xa
22656 +       .long   .LASF299
22657 +       .byte   0x1f
22658 +       .byte   0xd
22659 +       .long   0x214
22660 +       .byte   0x2
22661 +       .byte   0x23
22662 +       .uleb128 0x0
22663 +       .uleb128 0xa
22664 +       .long   .LASF300
22665 +       .byte   0x1f
22666 +       .byte   0xe
22667 +       .long   0x5a
22668 +       .byte   0x2
22669 +       .byte   0x23
22670 +       .uleb128 0x4
22671 +       .byte   0x0
22672 +       .uleb128 0xf
22673 +       .long   0x179b
22674 +       .long   .LASF301
22675 +       .byte   0xc
22676 +       .byte   0x16
22677 +       .byte   0x65
22678 +       .uleb128 0xa
22679 +       .long   .LASF302
22680 +       .byte   0x16
22681 +       .byte   0x66
22682 +       .long   0x2f
22683 +       .byte   0x2
22684 +       .byte   0x23
22685 +       .uleb128 0x0
22686 +       .uleb128 0xa
22687 +       .long   .LASF303
22688 +       .byte   0x16
22689 +       .byte   0x69
22690 +       .long   0x179b
22691 +       .byte   0x2
22692 +       .byte   0x23
22693 +       .uleb128 0x4
22694 +       .uleb128 0xa
22695 +       .long   .LASF304
22696 +       .byte   0x16
22697 +       .byte   0x6a
22698 +       .long   0x179b
22699 +       .byte   0x2
22700 +       .byte   0x23
22701 +       .uleb128 0x8
22702 +       .byte   0x0
22703 +       .uleb128 0x4
22704 +       .byte   0x4
22705 +       .long   0x1764
22706 +       .uleb128 0xf
22707 +       .long   0x17bc
22708 +       .long   .LASF305
22709 +       .byte   0x4
22710 +       .byte   0x16
22711 +       .byte   0x6f
22712 +       .uleb128 0xa
22713 +       .long   .LASF301
22714 +       .byte   0x16
22715 +       .byte   0x70
22716 +       .long   0x179b
22717 +       .byte   0x2
22718 +       .byte   0x23
22719 +       .uleb128 0x0
22720 +       .byte   0x0
22721 +       .uleb128 0xf
22722 +       .long   0x17e5
22723 +       .long   .LASF306
22724 +       .byte   0x8
22725 +       .byte   0x2
22726 +       .byte   0x15
22727 +       .uleb128 0xa
22728 +       .long   .LASF307
22729 +       .byte   0x2
22730 +       .byte   0x16
22731 +       .long   0x17e5
22732 +       .byte   0x2
22733 +       .byte   0x23
22734 +       .uleb128 0x0
22735 +       .uleb128 0xa
22736 +       .long   .LASF308
22737 +       .byte   0x2
22738 +       .byte   0x16
22739 +       .long   0x17e5
22740 +       .byte   0x2
22741 +       .byte   0x23
22742 +       .uleb128 0x4
22743 +       .byte   0x0
22744 +       .uleb128 0x4
22745 +       .byte   0x4
22746 +       .long   0x17bc
22747 +       .uleb128 0x15
22748 +       .long   0x1808
22749 +       .long   .LASF309
22750 +       .byte   0x4
22751 +       .byte   0x2
22752 +       .value  0x2a3
22753 +       .uleb128 0x16
22754 +       .long   .LASF310
22755 +       .byte   0x2
22756 +       .value  0x2a4
22757 +       .long   0x1834
22758 +       .byte   0x2
22759 +       .byte   0x23
22760 +       .uleb128 0x0
22761 +       .byte   0x0
22762 +       .uleb128 0x15
22763 +       .long   0x1834
22764 +       .long   .LASF311
22765 +       .byte   0x8
22766 +       .byte   0x2
22767 +       .value  0x2a4
22768 +       .uleb128 0x16
22769 +       .long   .LASF307
22770 +       .byte   0x2
22771 +       .value  0x2a8
22772 +       .long   0x1834
22773 +       .byte   0x2
22774 +       .byte   0x23
22775 +       .uleb128 0x0
22776 +       .uleb128 0x16
22777 +       .long   .LASF312
22778 +       .byte   0x2
22779 +       .value  0x2a8
22780 +       .long   0x183a
22781 +       .byte   0x2
22782 +       .byte   0x23
22783 +       .uleb128 0x4
22784 +       .byte   0x0
22785 +       .uleb128 0x4
22786 +       .byte   0x4
22787 +       .long   0x1808
22788 +       .uleb128 0x4
22789 +       .byte   0x4
22790 +       .long   0x1834
22791 +       .uleb128 0x7
22792 +       .long   .LASF313
22793 +       .byte   0x2c
22794 +       .byte   0x1c
22795 +       .long   0x184b
22796 +       .uleb128 0xf
22797 +       .long   0x1890
22798 +       .long   .LASF314
22799 +       .byte   0x14
22800 +       .byte   0x2c
22801 +       .byte   0x1c
22802 +       .uleb128 0xa
22803 +       .long   .LASF53
22804 +       .byte   0x2c
22805 +       .byte   0x21
22806 +       .long   0x77
22807 +       .byte   0x2
22808 +       .byte   0x23
22809 +       .uleb128 0x0
22810 +       .uleb128 0xa
22811 +       .long   .LASF315
22812 +       .byte   0x2c
22813 +       .byte   0x23
22814 +       .long   0x160b
22815 +       .byte   0x2
22816 +       .byte   0x23
22817 +       .uleb128 0x4
22818 +       .uleb128 0xa
22819 +       .long   .LASF316
22820 +       .byte   0x2c
22821 +       .byte   0x24
22822 +       .long   0x1890
22823 +       .byte   0x2
22824 +       .byte   0x23
22825 +       .uleb128 0x8
22826 +       .uleb128 0xa
22827 +       .long   .LASF317
22828 +       .byte   0x2c
22829 +       .byte   0x25
22830 +       .long   0x17bc
22831 +       .byte   0x2
22832 +       .byte   0x23
22833 +       .uleb128 0xc
22834 +       .byte   0x0
22835 +       .uleb128 0x7
22836 +       .long   .LASF318
22837 +       .byte   0x2c
22838 +       .byte   0x1d
22839 +       .long   0x189b
22840 +       .uleb128 0x4
22841 +       .byte   0x4
22842 +       .long   0x18a1
22843 +       .uleb128 0x11
22844 +       .long   0x18c0
22845 +       .byte   0x1
22846 +       .long   0x21
22847 +       .uleb128 0x6
22848 +       .long   0x18c0
22849 +       .uleb128 0x6
22850 +       .long   0x77
22851 +       .uleb128 0x6
22852 +       .long   0x21
22853 +       .uleb128 0x6
22854 +       .long   0x160b
22855 +       .byte   0x0
22856 +       .uleb128 0x4
22857 +       .byte   0x4
22858 +       .long   0x1840
22859 +       .uleb128 0xf
22860 +       .long   0x18ef
22861 +       .long   .LASF319
22862 +       .byte   0xc
22863 +       .byte   0x2c
22864 +       .byte   0x32
22865 +       .uleb128 0xa
22866 +       .long   .LASF285
22867 +       .byte   0x2c
22868 +       .byte   0x33
22869 +       .long   0x1680
22870 +       .byte   0x2
22871 +       .byte   0x23
22872 +       .uleb128 0x0
22873 +       .uleb128 0xa
22874 +       .long   .LASF317
22875 +       .byte   0x2c
22876 +       .byte   0x34
22877 +       .long   0x17bc
22878 +       .byte   0x2
22879 +       .byte   0x23
22880 +       .uleb128 0x4
22881 +       .byte   0x0
22882 +       .uleb128 0x7
22883 +       .long   .LASF320
22884 +       .byte   0x2c
22885 +       .byte   0x36
22886 +       .long   0x18c6
22887 +       .uleb128 0xf
22888 +       .long   0x1931
22889 +       .long   .LASF321
22890 +       .byte   0x10
22891 +       .byte   0x21
22892 +       .byte   0x13
22893 +       .uleb128 0xa
22894 +       .long   .LASF322
22895 +       .byte   0x22
22896 +       .byte   0x38
22897 +       .long   0x5a
22898 +       .byte   0x2
22899 +       .byte   0x23
22900 +       .uleb128 0x0
22901 +       .uleb128 0xa
22902 +       .long   .LASF323
22903 +       .byte   0x22
22904 +       .byte   0x3f
22905 +       .long   0x1680
22906 +       .byte   0x2
22907 +       .byte   0x23
22908 +       .uleb128 0x4
22909 +       .uleb128 0xa
22910 +       .long   .LASF324
22911 +       .byte   0x22
22912 +       .byte   0x40
22913 +       .long   0x17bc
22914 +       .byte   0x2
22915 +       .byte   0x23
22916 +       .uleb128 0x8
22917 +       .byte   0x0
22918 +       .uleb128 0xf
22919 +       .long   0x1968
22920 +       .long   .LASF325
22921 +       .byte   0x14
22922 +       .byte   0x3d
22923 +       .byte   0x2c
22924 +       .uleb128 0xa
22925 +       .long   .LASF322
22926 +       .byte   0x3d
22927 +       .byte   0x2d
22928 +       .long   0x16c4
22929 +       .byte   0x2
22930 +       .byte   0x23
22931 +       .uleb128 0x0
22932 +       .uleb128 0xa
22933 +       .long   .LASF326
22934 +       .byte   0x3d
22935 +       .byte   0x2e
22936 +       .long   0x21
22937 +       .byte   0x2
22938 +       .byte   0x23
22939 +       .uleb128 0x4
22940 +       .uleb128 0xa
22941 +       .long   .LASF327
22942 +       .byte   0x3d
22943 +       .byte   0x2f
22944 +       .long   0x18ef
22945 +       .byte   0x2
22946 +       .byte   0x23
22947 +       .uleb128 0x8
22948 +       .byte   0x0
22949 +       .uleb128 0x9
22950 +       .long   0x19a9
22951 +       .byte   0x20
22952 +       .byte   0x43
22953 +       .byte   0xb
22954 +       .uleb128 0xa
22955 +       .long   .LASF328
22956 +       .byte   0x43
22957 +       .byte   0xc
22958 +       .long   0x21
22959 +       .byte   0x2
22960 +       .byte   0x23
22961 +       .uleb128 0x0
22962 +       .uleb128 0xb
22963 +       .string "sem"
22964 +       .byte   0x43
22965 +       .byte   0xd
22966 +       .long   0x1931
22967 +       .byte   0x2
22968 +       .byte   0x23
22969 +       .uleb128 0x4
22970 +       .uleb128 0xb
22971 +       .string "ldt"
22972 +       .byte   0x43
22973 +       .byte   0xe
22974 +       .long   0x160b
22975 +       .byte   0x2
22976 +       .byte   0x23
22977 +       .uleb128 0x18
22978 +       .uleb128 0xa
22979 +       .long   .LASF329
22980 +       .byte   0x43
22981 +       .byte   0xf
22982 +       .long   0x160b
22983 +       .byte   0x2
22984 +       .byte   0x23
22985 +       .uleb128 0x1c
22986 +       .byte   0x0
22987 +       .uleb128 0x7
22988 +       .long   .LASF330
22989 +       .byte   0x43
22990 +       .byte   0x10
22991 +       .long   0x1968
22992 +       .uleb128 0x7
22993 +       .long   .LASF331
22994 +       .byte   0x45
22995 +       .byte   0x7
22996 +       .long   0x2f
22997 +       .uleb128 0x7
22998 +       .long   .LASF332
22999 +       .byte   0x45
23000 +       .byte   0x17
23001 +       .long   0x189
23002 +       .uleb128 0x12
23003 +       .long   0x19da
23004 +       .long   0xbb
23005 +       .uleb128 0x13
23006 +       .long   0x28
23007 +       .byte   0x3
23008 +       .byte   0x0
23009 +       .uleb128 0xf
23010 +       .long   0x1a81
23011 +       .long   .LASF333
23012 +       .byte   0x2c
23013 +       .byte   0x65
23014 +       .byte   0x22
23015 +       .uleb128 0xa
23016 +       .long   .LASF334
23017 +       .byte   0x65
23018 +       .byte   0x23
23019 +       .long   0x19ca
23020 +       .byte   0x2
23021 +       .byte   0x23
23022 +       .uleb128 0x0
23023 +       .uleb128 0xa
23024 +       .long   .LASF335
23025 +       .byte   0x65
23026 +       .byte   0x25
23027 +       .long   0x53
23028 +       .byte   0x2
23029 +       .byte   0x23
23030 +       .uleb128 0x4
23031 +       .uleb128 0xa
23032 +       .long   .LASF336
23033 +       .byte   0x65
23034 +       .byte   0x26
23035 +       .long   0xbb
23036 +       .byte   0x2
23037 +       .byte   0x23
23038 +       .uleb128 0x6
23039 +       .uleb128 0xa
23040 +       .long   .LASF337
23041 +       .byte   0x65
23042 +       .byte   0x27
23043 +       .long   0xbb
23044 +       .byte   0x2
23045 +       .byte   0x23
23046 +       .uleb128 0x7
23047 +       .uleb128 0xa
23048 +       .long   .LASF338
23049 +       .byte   0x65
23050 +       .byte   0x28
23051 +       .long   0x8ec
23052 +       .byte   0x2
23053 +       .byte   0x23
23054 +       .uleb128 0x8
23055 +       .uleb128 0xa
23056 +       .long   .LASF339
23057 +       .byte   0x65
23058 +       .byte   0x29
23059 +       .long   0x1a81
23060 +       .byte   0x2
23061 +       .byte   0x23
23062 +       .uleb128 0x10
23063 +       .uleb128 0xa
23064 +       .long   .LASF340
23065 +       .byte   0x65
23066 +       .byte   0x2a
23067 +       .long   0x2f
23068 +       .byte   0x2
23069 +       .byte   0x23
23070 +       .uleb128 0x1c
23071 +       .uleb128 0xa
23072 +       .long   .LASF341
23073 +       .byte   0x65
23074 +       .byte   0x2b
23075 +       .long   0x53
23076 +       .byte   0x2
23077 +       .byte   0x23
23078 +       .uleb128 0x20
23079 +       .uleb128 0xa
23080 +       .long   .LASF342
23081 +       .byte   0x65
23082 +       .byte   0x2c
23083 +       .long   0x53
23084 +       .byte   0x2
23085 +       .byte   0x23
23086 +       .uleb128 0x22
23087 +       .uleb128 0xa
23088 +       .long   .LASF343
23089 +       .byte   0x65
23090 +       .byte   0x2d
23091 +       .long   0x2f
23092 +       .byte   0x2
23093 +       .byte   0x23
23094 +       .uleb128 0x24
23095 +       .uleb128 0xa
23096 +       .long   .LASF344
23097 +       .byte   0x65
23098 +       .byte   0x2e
23099 +       .long   0x2f
23100 +       .byte   0x2
23101 +       .byte   0x23
23102 +       .uleb128 0x28
23103 +       .byte   0x0
23104 +       .uleb128 0x12
23105 +       .long   0x1a91
23106 +       .long   0xbb
23107 +       .uleb128 0x13
23108 +       .long   0x28
23109 +       .byte   0xb
23110 +       .byte   0x0
23111 +       .uleb128 0xf
23112 +       .long   0x1b00
23113 +       .long   .LASF345
23114 +       .byte   0x14
23115 +       .byte   0x65
23116 +       .byte   0x3b
23117 +       .uleb128 0xa
23118 +       .long   .LASF346
23119 +       .byte   0x65
23120 +       .byte   0x3c
23121 +       .long   0x112
23122 +       .byte   0x2
23123 +       .byte   0x23
23124 +       .uleb128 0x0
23125 +       .uleb128 0xa
23126 +       .long   .LASF347
23127 +       .byte   0x65
23128 +       .byte   0x3d
23129 +       .long   0x112
23130 +       .byte   0x2
23131 +       .byte   0x23
23132 +       .uleb128 0x1
23133 +       .uleb128 0xa
23134 +       .long   .LASF348
23135 +       .byte   0x65
23136 +       .byte   0x3e
23137 +       .long   0x112
23138 +       .byte   0x2
23139 +       .byte   0x23
23140 +       .uleb128 0x2
23141 +       .uleb128 0xa
23142 +       .long   .LASF349
23143 +       .byte   0x65
23144 +       .byte   0x3f
23145 +       .long   0x112
23146 +       .byte   0x2
23147 +       .byte   0x23
23148 +       .uleb128 0x3
23149 +       .uleb128 0xa
23150 +       .long   .LASF350
23151 +       .byte   0x65
23152 +       .byte   0x42
23153 +       .long   0x2f
23154 +       .byte   0x2
23155 +       .byte   0x23
23156 +       .uleb128 0x4
23157 +       .uleb128 0xa
23158 +       .long   .LASF351
23159 +       .byte   0x65
23160 +       .byte   0x46
23161 +       .long   0x2f
23162 +       .byte   0x2
23163 +       .byte   0x23
23164 +       .uleb128 0x8
23165 +       .uleb128 0xa
23166 +       .long   .LASF352
23167 +       .byte   0x65
23168 +       .byte   0x47
23169 +       .long   0x1b00
23170 +       .byte   0x2
23171 +       .byte   0x23
23172 +       .uleb128 0xc
23173 +       .byte   0x0
23174 +       .uleb128 0x12
23175 +       .long   0x1b10
23176 +       .long   0x2f
23177 +       .uleb128 0x13
23178 +       .long   0x28
23179 +       .byte   0x1
23180 +       .byte   0x0
23181 +       .uleb128 0xf
23182 +       .long   0x1b47
23183 +       .long   .LASF353
23184 +       .byte   0x8
23185 +       .byte   0x65
23186 +       .byte   0x4b
23187 +       .uleb128 0xa
23188 +       .long   .LASF346
23189 +       .byte   0x65
23190 +       .byte   0x4c
23191 +       .long   0x112
23192 +       .byte   0x2
23193 +       .byte   0x23
23194 +       .uleb128 0x0
23195 +       .uleb128 0xa
23196 +       .long   .LASF354
23197 +       .byte   0x65
23198 +       .byte   0x4d
23199 +       .long   0x112
23200 +       .byte   0x2
23201 +       .byte   0x23
23202 +       .uleb128 0x1
23203 +       .uleb128 0xa
23204 +       .long   .LASF355
23205 +       .byte   0x65
23206 +       .byte   0x4e
23207 +       .long   0x1b47
23208 +       .byte   0x2
23209 +       .byte   0x23
23210 +       .uleb128 0x2
23211 +       .byte   0x0
23212 +       .uleb128 0x12
23213 +       .long   0x1b57
23214 +       .long   0x112
23215 +       .uleb128 0x13
23216 +       .long   0x28
23217 +       .byte   0x5
23218 +       .byte   0x0
23219 +       .uleb128 0xf
23220 +       .long   0x1bc6
23221 +       .long   .LASF356
23222 +       .byte   0x8
23223 +       .byte   0x65
23224 +       .byte   0x9d
23225 +       .uleb128 0xa
23226 +       .long   .LASF346
23227 +       .byte   0x65
23228 +       .byte   0x9e
23229 +       .long   0x112
23230 +       .byte   0x2
23231 +       .byte   0x23
23232 +       .uleb128 0x0
23233 +       .uleb128 0xa
23234 +       .long   .LASF357
23235 +       .byte   0x65
23236 +       .byte   0x9f
23237 +       .long   0x112
23238 +       .byte   0x2
23239 +       .byte   0x23
23240 +       .uleb128 0x1
23241 +       .uleb128 0xa
23242 +       .long   .LASF358
23243 +       .byte   0x65
23244 +       .byte   0xa0
23245 +       .long   0x112
23246 +       .byte   0x2
23247 +       .byte   0x23
23248 +       .uleb128 0x2
23249 +       .uleb128 0xa
23250 +       .long   .LASF359
23251 +       .byte   0x65
23252 +       .byte   0xa1
23253 +       .long   0x112
23254 +       .byte   0x2
23255 +       .byte   0x23
23256 +       .uleb128 0x3
23257 +       .uleb128 0xa
23258 +       .long   .LASF360
23259 +       .byte   0x65
23260 +       .byte   0xa2
23261 +       .long   0x112
23262 +       .byte   0x2
23263 +       .byte   0x23
23264 +       .uleb128 0x4
23265 +       .uleb128 0xa
23266 +       .long   .LASF361
23267 +       .byte   0x65
23268 +       .byte   0xa3
23269 +       .long   0x112
23270 +       .byte   0x2
23271 +       .byte   0x23
23272 +       .uleb128 0x5
23273 +       .uleb128 0xa
23274 +       .long   .LASF362
23275 +       .byte   0x65
23276 +       .byte   0xa4
23277 +       .long   0x53
23278 +       .byte   0x2
23279 +       .byte   0x23
23280 +       .uleb128 0x6
23281 +       .byte   0x0
23282 +       .uleb128 0xf
23283 +       .long   0x1be1
23284 +       .long   .LASF363
23285 +       .byte   0x20
23286 +       .byte   0x64
23287 +       .byte   0x27
23288 +       .uleb128 0xa
23289 +       .long   .LASF364
23290 +       .byte   0x64
23291 +       .byte   0x28
23292 +       .long   0x515
23293 +       .byte   0x2
23294 +       .byte   0x23
23295 +       .uleb128 0x0
23296 +       .byte   0x0
23297 +       .uleb128 0x7
23298 +       .long   .LASF365
23299 +       .byte   0x64
23300 +       .byte   0x2b
23301 +       .long   0x1bc6
23302 +       .uleb128 0xf
23303 +       .long   0x1c07
23304 +       .long   .LASF366
23305 +       .byte   0x4
23306 +       .byte   0x5c
23307 +       .byte   0xca
23308 +       .uleb128 0xa
23309 +       .long   .LASF367
23310 +       .byte   0x5c
23311 +       .byte   0xcb
23312 +       .long   0x21
23313 +       .byte   0x2
23314 +       .byte   0x23
23315 +       .uleb128 0x0
23316 +       .byte   0x0
23317 +       .uleb128 0x7
23318 +       .long   .LASF368
23319 +       .byte   0x5c
23320 +       .byte   0xcc
23321 +       .long   0x1bec
23322 +       .uleb128 0x15
23323 +       .long   0x1c8f
23324 +       .long   .LASF369
23325 +       .byte   0x1c
23326 +       .byte   0x5c
23327 +       .value  0x109
23328 +       .uleb128 0x16
23329 +       .long   .LASF370
23330 +       .byte   0x5c
23331 +       .value  0x10a
23332 +       .long   0x1c07
23333 +       .byte   0x2
23334 +       .byte   0x23
23335 +       .uleb128 0x0
23336 +       .uleb128 0x20
23337 +       .long   .LASF371
23338 +       .byte   0x5c
23339 +       .value  0x10b
23340 +       .long   0x77
23341 +       .byte   0x4
23342 +       .byte   0x1
23343 +       .byte   0x1f
23344 +       .byte   0x2
23345 +       .byte   0x23
23346 +       .uleb128 0x4
23347 +       .uleb128 0x20
23348 +       .long   .LASF372
23349 +       .byte   0x5c
23350 +       .value  0x10d
23351 +       .long   0x77
23352 +       .byte   0x4
23353 +       .byte   0x1
23354 +       .byte   0x1e
23355 +       .byte   0x2
23356 +       .byte   0x23
23357 +       .uleb128 0x4
23358 +       .uleb128 0x16
23359 +       .long   .LASF373
23360 +       .byte   0x5c
23361 +       .value  0x10e
23362 +       .long   0x1c07
23363 +       .byte   0x2
23364 +       .byte   0x23
23365 +       .uleb128 0x8
23366 +       .uleb128 0x16
23367 +       .long   .LASF374
23368 +       .byte   0x5c
23369 +       .value  0x10f
23370 +       .long   0x160b
23371 +       .byte   0x2
23372 +       .byte   0x23
23373 +       .uleb128 0xc
23374 +       .uleb128 0x16
23375 +       .long   .LASF375
23376 +       .byte   0x5c
23377 +       .value  0x110
23378 +       .long   0x1e7d
23379 +       .byte   0x2
23380 +       .byte   0x23
23381 +       .uleb128 0x10
23382 +       .uleb128 0x16
23383 +       .long   .LASF376
23384 +       .byte   0x5c
23385 +       .value  0x111
23386 +       .long   0x17bc
23387 +       .byte   0x2
23388 +       .byte   0x23
23389 +       .uleb128 0x14
23390 +       .byte   0x0
23391 +       .uleb128 0x1a
23392 +       .long   0x1e7d
23393 +       .long   .LASF377
23394 +       .value  0x16c
23395 +       .byte   0x5c
23396 +       .byte   0xc8
23397 +       .uleb128 0x16
23398 +       .long   .LASF378
23399 +       .byte   0x5d
23400 +       .value  0x19b
23401 +       .long   0x53c4
23402 +       .byte   0x2
23403 +       .byte   0x23
23404 +       .uleb128 0x0
23405 +       .uleb128 0x16
23406 +       .long   .LASF379
23407 +       .byte   0x5d
23408 +       .value  0x19c
23409 +       .long   0x541b
23410 +       .byte   0x2
23411 +       .byte   0x23
23412 +       .uleb128 0x14
23413 +       .uleb128 0x16
23414 +       .long   .LASF380
23415 +       .byte   0x5d
23416 +       .value  0x19d
23417 +       .long   0x541b
23418 +       .byte   0x2
23419 +       .byte   0x23
23420 +       .uleb128 0x34
23421 +       .uleb128 0x16
23422 +       .long   .LASF381
23423 +       .byte   0x5d
23424 +       .value  0x19e
23425 +       .long   0x541b
23426 +       .byte   0x2
23427 +       .byte   0x23
23428 +       .uleb128 0x54
23429 +       .uleb128 0x16
23430 +       .long   .LASF205
23431 +       .byte   0x5d
23432 +       .value  0x19f
23433 +       .long   0x1e7d
23434 +       .byte   0x2
23435 +       .byte   0x23
23436 +       .uleb128 0x74
23437 +       .uleb128 0x16
23438 +       .long   .LASF382
23439 +       .byte   0x5d
23440 +       .value  0x1a1
23441 +       .long   0x4b3a
23442 +       .byte   0x2
23443 +       .byte   0x23
23444 +       .uleb128 0x78
23445 +       .uleb128 0x16
23446 +       .long   .LASF383
23447 +       .byte   0x5d
23448 +       .value  0x1a2
23449 +       .long   0x4c33
23450 +       .byte   0x3
23451 +       .byte   0x23
23452 +       .uleb128 0xb8
23453 +       .uleb128 0x16
23454 +       .long   .LASF384
23455 +       .byte   0x5d
23456 +       .value  0x1a3
23457 +       .long   0x5c6b
23458 +       .byte   0x3
23459 +       .byte   0x23
23460 +       .uleb128 0xcc
23461 +       .uleb128 0x20
23462 +       .long   .LASF385
23463 +       .byte   0x5d
23464 +       .value  0x1a4
23465 +       .long   0x77
23466 +       .byte   0x4
23467 +       .byte   0x1
23468 +       .byte   0x1f
23469 +       .byte   0x3
23470 +       .byte   0x23
23471 +       .uleb128 0xd0
23472 +       .uleb128 0x20
23473 +       .long   .LASF386
23474 +       .byte   0x5d
23475 +       .value  0x1a5
23476 +       .long   0x77
23477 +       .byte   0x4
23478 +       .byte   0x1
23479 +       .byte   0x1e
23480 +       .byte   0x3
23481 +       .byte   0x23
23482 +       .uleb128 0xd0
23483 +       .uleb128 0x16
23484 +       .long   .LASF387
23485 +       .byte   0x5d
23486 +       .value  0x1a6
23487 +       .long   0x566b
23488 +       .byte   0x3
23489 +       .byte   0x23
23490 +       .uleb128 0xd4
23491 +       .uleb128 0x16
23492 +       .long   .LASF388
23493 +       .byte   0x5d
23494 +       .value  0x1a7
23495 +       .long   0x56a5
23496 +       .byte   0x3
23497 +       .byte   0x23
23498 +       .uleb128 0xe8
23499 +       .uleb128 0x17
23500 +       .string "sem"
23501 +       .byte   0x5d
23502 +       .value  0x1a9
23503 +       .long   0x1931
23504 +       .byte   0x3
23505 +       .byte   0x23
23506 +       .uleb128 0xec
23507 +       .uleb128 0x17
23508 +       .string "bus"
23509 +       .byte   0x5d
23510 +       .value  0x1ad
23511 +       .long   0x54d3
23512 +       .byte   0x3
23513 +       .byte   0x23
23514 +       .uleb128 0x100
23515 +       .uleb128 0x16
23516 +       .long   .LASF389
23517 +       .byte   0x5d
23518 +       .value  0x1ae
23519 +       .long   0x56fd
23520 +       .byte   0x3
23521 +       .byte   0x23
23522 +       .uleb128 0x104
23523 +       .uleb128 0x16
23524 +       .long   .LASF390
23525 +       .byte   0x5d
23526 +       .value  0x1b0
23527 +       .long   0x160b
23528 +       .byte   0x3
23529 +       .byte   0x23
23530 +       .uleb128 0x108
23531 +       .uleb128 0x16
23532 +       .long   .LASF391
23533 +       .byte   0x5d
23534 +       .value  0x1b1
23535 +       .long   0x160b
23536 +       .byte   0x3
23537 +       .byte   0x23
23538 +       .uleb128 0x10c
23539 +       .uleb128 0x16
23540 +       .long   .LASF392
23541 +       .byte   0x5d
23542 +       .value  0x1b3
23543 +       .long   0x1c12
23544 +       .byte   0x3
23545 +       .byte   0x23
23546 +       .uleb128 0x110
23547 +       .uleb128 0x16
23548 +       .long   .LASF393
23549 +       .byte   0x5d
23550 +       .value  0x1b8
23551 +       .long   0x5c71
23552 +       .byte   0x3
23553 +       .byte   0x23
23554 +       .uleb128 0x12c
23555 +       .uleb128 0x16
23556 +       .long   .LASF394
23557 +       .byte   0x5d
23558 +       .value  0x1b9
23559 +       .long   0x189
23560 +       .byte   0x3
23561 +       .byte   0x23
23562 +       .uleb128 0x130
23563 +       .uleb128 0x16
23564 +       .long   .LASF395
23565 +       .byte   0x5d
23566 +       .value  0x1bf
23567 +       .long   0x17bc
23568 +       .byte   0x3
23569 +       .byte   0x23
23570 +       .uleb128 0x138
23571 +       .uleb128 0x16
23572 +       .long   .LASF396
23573 +       .byte   0x5d
23574 +       .value  0x1c1
23575 +       .long   0x5c7d
23576 +       .byte   0x3
23577 +       .byte   0x23
23578 +       .uleb128 0x140
23579 +       .uleb128 0x16
23580 +       .long   .LASF397
23581 +       .byte   0x5d
23582 +       .value  0x1c4
23583 +       .long   0x546c
23584 +       .byte   0x3
23585 +       .byte   0x23
23586 +       .uleb128 0x144
23587 +       .uleb128 0x16
23588 +       .long   .LASF398
23589 +       .byte   0x5d
23590 +       .value  0x1c6
23591 +       .long   0x1680
23592 +       .byte   0x3
23593 +       .byte   0x23
23594 +       .uleb128 0x148
23595 +       .uleb128 0x16
23596 +       .long   .LASF399
23597 +       .byte   0x5d
23598 +       .value  0x1c7
23599 +       .long   0x17bc
23600 +       .byte   0x3
23601 +       .byte   0x23
23602 +       .uleb128 0x14c
23603 +       .uleb128 0x16
23604 +       .long   .LASF400
23605 +       .byte   0x5d
23606 +       .value  0x1ca
23607 +       .long   0x17bc
23608 +       .byte   0x3
23609 +       .byte   0x23
23610 +       .uleb128 0x154
23611 +       .uleb128 0x16
23612 +       .long   .LASF401
23613 +       .byte   0x5d
23614 +       .value  0x1cb
23615 +       .long   0x5b30
23616 +       .byte   0x3
23617 +       .byte   0x23
23618 +       .uleb128 0x15c
23619 +       .uleb128 0x16
23620 +       .long   .LASF402
23621 +       .byte   0x5d
23622 +       .value  0x1cc
23623 +       .long   0x19f
23624 +       .byte   0x3
23625 +       .byte   0x23
23626 +       .uleb128 0x160
23627 +       .uleb128 0x16
23628 +       .long   .LASF403
23629 +       .byte   0x5d
23630 +       .value  0x1cd
23631 +       .long   0x5bb2
23632 +       .byte   0x3
23633 +       .byte   0x23
23634 +       .uleb128 0x164
23635 +       .uleb128 0x16
23636 +       .long   .LASF404
23637 +       .byte   0x5d
23638 +       .value  0x1cf
23639 +       .long   0x5820
23640 +       .byte   0x3
23641 +       .byte   0x23
23642 +       .uleb128 0x168
23643 +       .byte   0x0
23644 +       .uleb128 0x4
23645 +       .byte   0x4
23646 +       .long   0x1c8f
23647 +       .uleb128 0xf
23648 +       .long   0x1ef2
23649 +       .long   .LASF405
23650 +       .byte   0x1c
23651 +       .byte   0x59
23652 +       .byte   0x34
23653 +       .uleb128 0xa
23654 +       .long   .LASF406
23655 +       .byte   0x59
23656 +       .byte   0x35
23657 +       .long   0x93a
23658 +       .byte   0x2
23659 +       .byte   0x23
23660 +       .uleb128 0x0
23661 +       .uleb128 0xa
23662 +       .long   .LASF407
23663 +       .byte   0x59
23664 +       .byte   0x36
23665 +       .long   0x1efe
23666 +       .byte   0x2
23667 +       .byte   0x23
23668 +       .uleb128 0x4
23669 +       .uleb128 0xa
23670 +       .long   .LASF408
23671 +       .byte   0x59
23672 +       .byte   0x37
23673 +       .long   0x1f14
23674 +       .byte   0x2
23675 +       .byte   0x23
23676 +       .uleb128 0x8
23677 +       .uleb128 0xa
23678 +       .long   .LASF409
23679 +       .byte   0x59
23680 +       .byte   0x38
23681 +       .long   0x1efe
23682 +       .byte   0x2
23683 +       .byte   0x23
23684 +       .uleb128 0xc
23685 +       .uleb128 0xa
23686 +       .long   .LASF410
23687 +       .byte   0x59
23688 +       .byte   0x3a
23689 +       .long   0x93a
23690 +       .byte   0x2
23691 +       .byte   0x23
23692 +       .uleb128 0x10
23693 +       .uleb128 0xa
23694 +       .long   .LASF411
23695 +       .byte   0x59
23696 +       .byte   0x3b
23697 +       .long   0x36
23698 +       .byte   0x2
23699 +       .byte   0x23
23700 +       .uleb128 0x14
23701 +       .uleb128 0xa
23702 +       .long   .LASF412
23703 +       .byte   0x59
23704 +       .byte   0x3e
23705 +       .long   0x1f4b
23706 +       .byte   0x2
23707 +       .byte   0x23
23708 +       .uleb128 0x18
23709 +       .byte   0x0
23710 +       .uleb128 0x5
23711 +       .long   0x1efe
23712 +       .byte   0x1
23713 +       .uleb128 0x6
23714 +       .long   0x77
23715 +       .byte   0x0
23716 +       .uleb128 0x4
23717 +       .byte   0x4
23718 +       .long   0x1ef2
23719 +       .uleb128 0x11
23720 +       .long   0x1f14
23721 +       .byte   0x1
23722 +       .long   0x21
23723 +       .uleb128 0x6
23724 +       .long   0x77
23725 +       .byte   0x0
23726 +       .uleb128 0x4
23727 +       .byte   0x4
23728 +       .long   0x1f04
23729 +       .uleb128 0x11
23730 +       .long   0x1f39
23731 +       .byte   0x1
23732 +       .long   0x21
23733 +       .uleb128 0x6
23734 +       .long   0x923
23735 +       .uleb128 0x6
23736 +       .long   0x1f39
23737 +       .uleb128 0x6
23738 +       .long   0x160b
23739 +       .uleb128 0x6
23740 +       .long   0x21
23741 +       .byte   0x0
23742 +       .uleb128 0x4
23743 +       .byte   0x4
23744 +       .long   0x1f3f
23745 +       .uleb128 0x5
23746 +       .long   0x1f4b
23747 +       .byte   0x1
23748 +       .uleb128 0x6
23749 +       .long   0x160b
23750 +       .byte   0x0
23751 +       .uleb128 0x4
23752 +       .byte   0x4
23753 +       .long   0x1f1a
23754 +       .uleb128 0xf
23755 +       .long   0x214b
23756 +       .long   .LASF413
23757 +       .byte   0x8c
23758 +       .byte   0x63
23759 +       .byte   0x16
23760 +       .uleb128 0xa
23761 +       .long   .LASF414
23762 +       .byte   0x63
23763 +       .byte   0x17
23764 +       .long   0xb5
23765 +       .byte   0x2
23766 +       .byte   0x23
23767 +       .uleb128 0x0
23768 +       .uleb128 0xa
23769 +       .long   .LASF415
23770 +       .byte   0x63
23771 +       .byte   0x18
23772 +       .long   0x92e
23773 +       .byte   0x2
23774 +       .byte   0x23
23775 +       .uleb128 0x4
23776 +       .uleb128 0xa
23777 +       .long   .LASF416
23778 +       .byte   0x63
23779 +       .byte   0x1a
23780 +       .long   0x92e
23781 +       .byte   0x2
23782 +       .byte   0x23
23783 +       .uleb128 0x8
23784 +       .uleb128 0xa
23785 +       .long   .LASF417
23786 +       .byte   0x63
23787 +       .byte   0x1b
23788 +       .long   0x2151
23789 +       .byte   0x2
23790 +       .byte   0x23
23791 +       .uleb128 0xc
23792 +       .uleb128 0xa
23793 +       .long   .LASF418
23794 +       .byte   0x63
23795 +       .byte   0x1c
23796 +       .long   0x21
23797 +       .byte   0x2
23798 +       .byte   0x23
23799 +       .uleb128 0x10
23800 +       .uleb128 0xa
23801 +       .long   .LASF419
23802 +       .byte   0x63
23803 +       .byte   0x1d
23804 +       .long   0x21
23805 +       .byte   0x2
23806 +       .byte   0x23
23807 +       .uleb128 0x14
23808 +       .uleb128 0xa
23809 +       .long   .LASF420
23810 +       .byte   0x63
23811 +       .byte   0x1e
23812 +       .long   0x21
23813 +       .byte   0x2
23814 +       .byte   0x23
23815 +       .uleb128 0x18
23816 +       .uleb128 0xa
23817 +       .long   .LASF421
23818 +       .byte   0x63
23819 +       .byte   0x1f
23820 +       .long   0x21
23821 +       .byte   0x2
23822 +       .byte   0x23
23823 +       .uleb128 0x1c
23824 +       .uleb128 0xa
23825 +       .long   .LASF422
23826 +       .byte   0x63
23827 +       .byte   0x20
23828 +       .long   0x216c
23829 +       .byte   0x2
23830 +       .byte   0x23
23831 +       .uleb128 0x20
23832 +       .uleb128 0xa
23833 +       .long   .LASF423
23834 +       .byte   0x63
23835 +       .byte   0x21
23836 +       .long   0x2182
23837 +       .byte   0x2
23838 +       .byte   0x23
23839 +       .uleb128 0x24
23840 +       .uleb128 0xa
23841 +       .long   .LASF424
23842 +       .byte   0x63
23843 +       .byte   0x22
23844 +       .long   0x21
23845 +       .byte   0x2
23846 +       .byte   0x23
23847 +       .uleb128 0x28
23848 +       .uleb128 0xa
23849 +       .long   .LASF425
23850 +       .byte   0x63
23851 +       .byte   0x23
23852 +       .long   0x21
23853 +       .byte   0x2
23854 +       .byte   0x23
23855 +       .uleb128 0x2c
23856 +       .uleb128 0xa
23857 +       .long   .LASF426
23858 +       .byte   0x63
23859 +       .byte   0x24
23860 +       .long   0x93a
23861 +       .byte   0x2
23862 +       .byte   0x23
23863 +       .uleb128 0x30
23864 +       .uleb128 0xa
23865 +       .long   .LASF427
23866 +       .byte   0x63
23867 +       .byte   0x25
23868 +       .long   0x2198
23869 +       .byte   0x2
23870 +       .byte   0x23
23871 +       .uleb128 0x34
23872 +       .uleb128 0xa
23873 +       .long   .LASF428
23874 +       .byte   0x63
23875 +       .byte   0x27
23876 +       .long   0x93a
23877 +       .byte   0x2
23878 +       .byte   0x23
23879 +       .uleb128 0x38
23880 +       .uleb128 0xa
23881 +       .long   .LASF429
23882 +       .byte   0x63
23883 +       .byte   0x28
23884 +       .long   0x21b3
23885 +       .byte   0x2
23886 +       .byte   0x23
23887 +       .uleb128 0x3c
23888 +       .uleb128 0xa
23889 +       .long   .LASF430
23890 +       .byte   0x63
23891 +       .byte   0x29
23892 +       .long   0x21c9
23893 +       .byte   0x2
23894 +       .byte   0x23
23895 +       .uleb128 0x40
23896 +       .uleb128 0xa
23897 +       .long   .LASF431
23898 +       .byte   0x63
23899 +       .byte   0x2a
23900 +       .long   0x21c9
23901 +       .byte   0x2
23902 +       .byte   0x23
23903 +       .uleb128 0x44
23904 +       .uleb128 0xa
23905 +       .long   .LASF432
23906 +       .byte   0x63
23907 +       .byte   0x2b
23908 +       .long   0x21c9
23909 +       .byte   0x2
23910 +       .byte   0x23
23911 +       .uleb128 0x48
23912 +       .uleb128 0xa
23913 +       .long   .LASF433
23914 +       .byte   0x63
23915 +       .byte   0x2c
23916 +       .long   0x21df
23917 +       .byte   0x2
23918 +       .byte   0x23
23919 +       .uleb128 0x4c
23920 +       .uleb128 0xa
23921 +       .long   .LASF434
23922 +       .byte   0x63
23923 +       .byte   0x2e
23924 +       .long   0x2206
23925 +       .byte   0x2
23926 +       .byte   0x23
23927 +       .uleb128 0x50
23928 +       .uleb128 0xa
23929 +       .long   .LASF435
23930 +       .byte   0x63
23931 +       .byte   0x2f
23932 +       .long   0x93a
23933 +       .byte   0x2
23934 +       .byte   0x23
23935 +       .uleb128 0x54
23936 +       .uleb128 0xa
23937 +       .long   .LASF436
23938 +       .byte   0x63
23939 +       .byte   0x30
23940 +       .long   0x21c9
23941 +       .byte   0x2
23942 +       .byte   0x23
23943 +       .uleb128 0x58
23944 +       .uleb128 0xa
23945 +       .long   .LASF437
23946 +       .byte   0x63
23947 +       .byte   0x31
23948 +       .long   0x93a
23949 +       .byte   0x2
23950 +       .byte   0x23
23951 +       .uleb128 0x5c
23952 +       .uleb128 0xa
23953 +       .long   .LASF438
23954 +       .byte   0x63
23955 +       .byte   0x32
23956 +       .long   0x2221
23957 +       .byte   0x2
23958 +       .byte   0x23
23959 +       .uleb128 0x60
23960 +       .uleb128 0xa
23961 +       .long   .LASF439
23962 +       .byte   0x63
23963 +       .byte   0x36
23964 +       .long   0x2243
23965 +       .byte   0x2
23966 +       .byte   0x23
23967 +       .uleb128 0x64
23968 +       .uleb128 0xa
23969 +       .long   .LASF440
23970 +       .byte   0x63
23971 +       .byte   0x38
23972 +       .long   0x225a
23973 +       .byte   0x2
23974 +       .byte   0x23
23975 +       .uleb128 0x68
23976 +       .uleb128 0xa
23977 +       .long   .LASF441
23978 +       .byte   0x63
23979 +       .byte   0x3e
23980 +       .long   0x2280
23981 +       .byte   0x2
23982 +       .byte   0x23
23983 +       .uleb128 0x6c
23984 +       .uleb128 0xa
23985 +       .long   .LASF442
23986 +       .byte   0x63
23987 +       .byte   0x3f
23988 +       .long   0x229b
23989 +       .byte   0x2
23990 +       .byte   0x23
23991 +       .uleb128 0x70
23992 +       .uleb128 0xa
23993 +       .long   .LASF443
23994 +       .byte   0x63
23995 +       .byte   0x41
23996 +       .long   0x22b1
23997 +       .byte   0x2
23998 +       .byte   0x23
23999 +       .uleb128 0x74
24000 +       .uleb128 0xa
24001 +       .long   .LASF444
24002 +       .byte   0x63
24003 +       .byte   0x42
24004 +       .long   0x2f
24005 +       .byte   0x2
24006 +       .byte   0x23
24007 +       .uleb128 0x78
24008 +       .uleb128 0xa
24009 +       .long   .LASF445
24010 +       .byte   0x63
24011 +       .byte   0x43
24012 +       .long   0x22c7
24013 +       .byte   0x2
24014 +       .byte   0x23
24015 +       .uleb128 0x7c
24016 +       .uleb128 0xa
24017 +       .long   .LASF446
24018 +       .byte   0x63
24019 +       .byte   0x47
24020 +       .long   0x22de
24021 +       .byte   0x3
24022 +       .byte   0x23
24023 +       .uleb128 0x80
24024 +       .uleb128 0xa
24025 +       .long   .LASF447
24026 +       .byte   0x63
24027 +       .byte   0x48
24028 +       .long   0x36
24029 +       .byte   0x3
24030 +       .byte   0x23
24031 +       .uleb128 0x84
24032 +       .uleb128 0xa
24033 +       .long   .LASF448
24034 +       .byte   0x63
24035 +       .byte   0x49
24036 +       .long   0x36
24037 +       .byte   0x3
24038 +       .byte   0x23
24039 +       .uleb128 0x88
24040 +       .byte   0x0
24041 +       .uleb128 0x18
24042 +       .byte   0x1
24043 +       .long   0x923
24044 +       .uleb128 0x4
24045 +       .byte   0x4
24046 +       .long   0x214b
24047 +       .uleb128 0x11
24048 +       .long   0x216c
24049 +       .byte   0x1
24050 +       .long   0x2f
24051 +       .uleb128 0x6
24052 +       .long   0x1be1
24053 +       .uleb128 0x6
24054 +       .long   0x21
24055 +       .byte   0x0
24056 +       .uleb128 0x4
24057 +       .byte   0x4
24058 +       .long   0x2157
24059 +       .uleb128 0x11
24060 +       .long   0x2182
24061 +       .byte   0x1
24062 +       .long   0x2f
24063 +       .uleb128 0x6
24064 +       .long   0x21
24065 +       .byte   0x0
24066 +       .uleb128 0x4
24067 +       .byte   0x4
24068 +       .long   0x2172
24069 +       .uleb128 0x11
24070 +       .long   0x2198
24071 +       .byte   0x1
24072 +       .long   0x1be1
24073 +       .uleb128 0x6
24074 +       .long   0x1be1
24075 +       .byte   0x0
24076 +       .uleb128 0x4
24077 +       .byte   0x4
24078 +       .long   0x2188
24079 +       .uleb128 0x11
24080 +       .long   0x21b3
24081 +       .byte   0x1
24082 +       .long   0x21
24083 +       .uleb128 0x6
24084 +       .long   0x21
24085 +       .uleb128 0x6
24086 +       .long   0x21
24087 +       .byte   0x0
24088 +       .uleb128 0x4
24089 +       .byte   0x4
24090 +       .long   0x219e
24091 +       .uleb128 0x11
24092 +       .long   0x21c9
24093 +       .byte   0x1
24094 +       .long   0x21
24095 +       .uleb128 0x6
24096 +       .long   0x21
24097 +       .byte   0x0
24098 +       .uleb128 0x4
24099 +       .byte   0x4
24100 +       .long   0x21b9
24101 +       .uleb128 0x11
24102 +       .long   0x21df
24103 +       .byte   0x1
24104 +       .long   0x1be1
24105 +       .uleb128 0x6
24106 +       .long   0x21
24107 +       .byte   0x0
24108 +       .uleb128 0x4
24109 +       .byte   0x4
24110 +       .long   0x21cf
24111 +       .uleb128 0x11
24112 +       .long   0x21fa
24113 +       .byte   0x1
24114 +       .long   0x21
24115 +       .uleb128 0x6
24116 +       .long   0x21fa
24117 +       .uleb128 0x6
24118 +       .long   0x2200
24119 +       .byte   0x0
24120 +       .uleb128 0x4
24121 +       .byte   0x4
24122 +       .long   0x1a91
24123 +       .uleb128 0x4
24124 +       .byte   0x4
24125 +       .long   0x1b57
24126 +       .uleb128 0x4
24127 +       .byte   0x4
24128 +       .long   0x21e5
24129 +       .uleb128 0x11
24130 +       .long   0x2221
24131 +       .byte   0x1
24132 +       .long   0x173
24133 +       .uleb128 0x6
24134 +       .long   0x173
24135 +       .uleb128 0x6
24136 +       .long   0x21
24137 +       .byte   0x0
24138 +       .uleb128 0x4
24139 +       .byte   0x4
24140 +       .long   0x220c
24141 +       .uleb128 0x5
24142 +       .long   0x223d
24143 +       .byte   0x1
24144 +       .uleb128 0x6
24145 +       .long   0x223d
24146 +       .uleb128 0x6
24147 +       .long   0xb5
24148 +       .uleb128 0x6
24149 +       .long   0x2200
24150 +       .byte   0x0
24151 +       .uleb128 0x4
24152 +       .byte   0x4
24153 +       .long   0x1b10
24154 +       .uleb128 0x4
24155 +       .byte   0x4
24156 +       .long   0x2227
24157 +       .uleb128 0x5
24158 +       .long   0x225a
24159 +       .byte   0x1
24160 +       .uleb128 0x6
24161 +       .long   0x223d
24162 +       .uleb128 0x6
24163 +       .long   0x2200
24164 +       .byte   0x0
24165 +       .uleb128 0x4
24166 +       .byte   0x4
24167 +       .long   0x2249
24168 +       .uleb128 0x11
24169 +       .long   0x227a
24170 +       .byte   0x1
24171 +       .long   0x21
24172 +       .uleb128 0x6
24173 +       .long   0x227a
24174 +       .uleb128 0x6
24175 +       .long   0xb5
24176 +       .uleb128 0x6
24177 +       .long   0xb5
24178 +       .byte   0x0
24179 +       .uleb128 0x4
24180 +       .byte   0x4
24181 +       .long   0x19da
24182 +       .uleb128 0x4
24183 +       .byte   0x4
24184 +       .long   0x2260
24185 +       .uleb128 0x11
24186 +       .long   0x229b
24187 +       .byte   0x1
24188 +       .long   0x21
24189 +       .uleb128 0x6
24190 +       .long   0xb5
24191 +       .uleb128 0x6
24192 +       .long   0xb5
24193 +       .byte   0x0
24194 +       .uleb128 0x4
24195 +       .byte   0x4
24196 +       .long   0x2286
24197 +       .uleb128 0x11
24198 +       .long   0x22b1
24199 +       .byte   0x1
24200 +       .long   0x77
24201 +       .uleb128 0x6
24202 +       .long   0x2f
24203 +       .byte   0x0
24204 +       .uleb128 0x4
24205 +       .byte   0x4
24206 +       .long   0x22a1
24207 +       .uleb128 0x11
24208 +       .long   0x22c7
24209 +       .byte   0x1
24210 +       .long   0x77
24211 +       .uleb128 0x6
24212 +       .long   0x923
24213 +       .byte   0x0
24214 +       .uleb128 0x4
24215 +       .byte   0x4
24216 +       .long   0x22b7
24217 +       .uleb128 0x5
24218 +       .long   0x22de
24219 +       .byte   0x1
24220 +       .uleb128 0x6
24221 +       .long   0x923
24222 +       .uleb128 0x6
24223 +       .long   0x21
24224 +       .byte   0x0
24225 +       .uleb128 0x4
24226 +       .byte   0x4
24227 +       .long   0x22cd
24228 +       .uleb128 0xf
24229 +       .long   0x22ff
24230 +       .long   .LASF449
24231 +       .byte   0x4
24232 +       .byte   0x2b
24233 +       .byte   0x17
24234 +       .uleb128 0xa
24235 +       .long   .LASF450
24236 +       .byte   0x2b
24237 +       .byte   0x18
24238 +       .long   0x16c4
24239 +       .byte   0x2
24240 +       .byte   0x23
24241 +       .uleb128 0x0
24242 +       .byte   0x0
24243 +       .uleb128 0xf
24244 +       .long   0x2344
24245 +       .long   .LASF451
24246 +       .byte   0x10
24247 +       .byte   0x47
24248 +       .byte   0x1e
24249 +       .uleb128 0xa
24250 +       .long   .LASF452
24251 +       .byte   0x47
24252 +       .byte   0x7a
24253 +       .long   0x2344
24254 +       .byte   0x2
24255 +       .byte   0x23
24256 +       .uleb128 0x0
24257 +       .uleb128 0xa
24258 +       .long   .LASF453
24259 +       .byte   0x47
24260 +       .byte   0x7b
24261 +       .long   0x2344
24262 +       .byte   0x2
24263 +       .byte   0x23
24264 +       .uleb128 0x4
24265 +       .uleb128 0xa
24266 +       .long   .LASF454
24267 +       .byte   0x47
24268 +       .byte   0x7c
24269 +       .long   0x21
24270 +       .byte   0x2
24271 +       .byte   0x23
24272 +       .uleb128 0x8
24273 +       .uleb128 0xa
24274 +       .long   .LASF455
24275 +       .byte   0x47
24276 +       .byte   0x7d
24277 +       .long   0x234a
24278 +       .byte   0x2
24279 +       .byte   0x23
24280 +       .uleb128 0xc
24281 +       .byte   0x0
24282 +       .uleb128 0x4
24283 +       .byte   0x4
24284 +       .long   0x22ff
24285 +       .uleb128 0x4
24286 +       .byte   0x4
24287 +       .long   0x124
24288 +       .uleb128 0xf
24289 +       .long   0x2387
24290 +       .long   .LASF456
24291 +       .byte   0xc
24292 +       .byte   0x47
24293 +       .byte   0x83
24294 +       .uleb128 0xa
24295 +       .long   .LASF457
24296 +       .byte   0x47
24297 +       .byte   0x84
24298 +       .long   0x16c4
24299 +       .byte   0x2
24300 +       .byte   0x23
24301 +       .uleb128 0x0
24302 +       .uleb128 0xa
24303 +       .long   .LASF285
24304 +       .byte   0x47
24305 +       .byte   0x85
24306 +       .long   0x1680
24307 +       .byte   0x2
24308 +       .byte   0x23
24309 +       .uleb128 0x4
24310 +       .uleb128 0xa
24311 +       .long   .LASF458
24312 +       .byte   0x47
24313 +       .byte   0x86
24314 +       .long   0x2344
24315 +       .byte   0x2
24316 +       .byte   0x23
24317 +       .uleb128 0x8
24318 +       .byte   0x0
24319 +       .uleb128 0xf
24320 +       .long   0x23a2
24321 +       .long   .LASF459
24322 +       .byte   0x4
24323 +       .byte   0x47
24324 +       .byte   0x89
24325 +       .uleb128 0xa
24326 +       .long   .LASF460
24327 +       .byte   0x47
24328 +       .byte   0x8a
24329 +       .long   0x23a2
24330 +       .byte   0x2
24331 +       .byte   0x23
24332 +       .uleb128 0x0
24333 +       .byte   0x0
24334 +       .uleb128 0x4
24335 +       .byte   0x4
24336 +       .long   0x2350
24337 +       .uleb128 0x9
24338 +       .long   0x23bf
24339 +       .byte   0x8
24340 +       .byte   0x4a
24341 +       .byte   0x18
24342 +       .uleb128 0xb
24343 +       .string "sig"
24344 +       .byte   0x4a
24345 +       .byte   0x19
24346 +       .long   0x1b00
24347 +       .byte   0x2
24348 +       .byte   0x23
24349 +       .uleb128 0x0
24350 +       .byte   0x0
24351 +       .uleb128 0x7
24352 +       .long   .LASF461
24353 +       .byte   0x4a
24354 +       .byte   0x1a
24355 +       .long   0x23a8
24356 +       .uleb128 0x7
24357 +       .long   .LASF462
24358 +       .byte   0x4e
24359 +       .byte   0x11
24360 +       .long   0x3c
24361 +       .uleb128 0x7
24362 +       .long   .LASF463
24363 +       .byte   0x4e
24364 +       .byte   0x12
24365 +       .long   0x23e0
24366 +       .uleb128 0x4
24367 +       .byte   0x4
24368 +       .long   0x23ca
24369 +       .uleb128 0x7
24370 +       .long   .LASF464
24371 +       .byte   0x4e
24372 +       .byte   0x14
24373 +       .long   0x940
24374 +       .uleb128 0x7
24375 +       .long   .LASF465
24376 +       .byte   0x4e
24377 +       .byte   0x15
24378 +       .long   0x23fc
24379 +       .uleb128 0x4
24380 +       .byte   0x4
24381 +       .long   0x23e6
24382 +       .uleb128 0xf
24383 +       .long   0x2447
24384 +       .long   .LASF466
24385 +       .byte   0x14
24386 +       .byte   0x4a
24387 +       .byte   0x7b
24388 +       .uleb128 0xa
24389 +       .long   .LASF467
24390 +       .byte   0x4a
24391 +       .byte   0x7c
24392 +       .long   0x23d5
24393 +       .byte   0x2
24394 +       .byte   0x23
24395 +       .uleb128 0x0
24396 +       .uleb128 0xa
24397 +       .long   .LASF468
24398 +       .byte   0x4a
24399 +       .byte   0x7d
24400 +       .long   0x2f
24401 +       .byte   0x2
24402 +       .byte   0x23
24403 +       .uleb128 0x4
24404 +       .uleb128 0xa
24405 +       .long   .LASF469
24406 +       .byte   0x4a
24407 +       .byte   0x7e
24408 +       .long   0x23f1
24409 +       .byte   0x2
24410 +       .byte   0x23
24411 +       .uleb128 0x8
24412 +       .uleb128 0xa
24413 +       .long   .LASF470
24414 +       .byte   0x4a
24415 +       .byte   0x7f
24416 +       .long   0x23bf
24417 +       .byte   0x2
24418 +       .byte   0x23
24419 +       .uleb128 0xc
24420 +       .byte   0x0
24421 +       .uleb128 0xf
24422 +       .long   0x2461
24423 +       .long   .LASF471
24424 +       .byte   0x14
24425 +       .byte   0x4a
24426 +       .byte   0x82
24427 +       .uleb128 0xb
24428 +       .string "sa"
24429 +       .byte   0x4a
24430 +       .byte   0x83
24431 +       .long   0x2402
24432 +       .byte   0x2
24433 +       .byte   0x23
24434 +       .uleb128 0x0
24435 +       .byte   0x0
24436 +       .uleb128 0x25
24437 +       .long   0x2484
24438 +       .long   .LASF473
24439 +       .byte   0x4
24440 +       .byte   0x52
24441 +       .byte   0x7
24442 +       .uleb128 0xe
24443 +       .long   .LASF474
24444 +       .byte   0x52
24445 +       .byte   0x8
24446 +       .long   0x21
24447 +       .uleb128 0xe
24448 +       .long   .LASF475
24449 +       .byte   0x52
24450 +       .byte   0x9
24451 +       .long   0x160b
24452 +       .byte   0x0
24453 +       .uleb128 0x7
24454 +       .long   .LASF476
24455 +       .byte   0x52
24456 +       .byte   0xa
24457 +       .long   0x2461
24458 +       .uleb128 0x9
24459 +       .long   0x24b4
24460 +       .byte   0x8
24461 +       .byte   0x52
24462 +       .byte   0x31
24463 +       .uleb128 0xa
24464 +       .long   .LASF477
24465 +       .byte   0x52
24466 +       .byte   0x32
24467 +       .long   0x1b5
24468 +       .byte   0x2
24469 +       .byte   0x23
24470 +       .uleb128 0x0
24471 +       .uleb128 0xa
24472 +       .long   .LASF478
24473 +       .byte   0x52
24474 +       .byte   0x33
24475 +       .long   0x1dd
24476 +       .byte   0x2
24477 +       .byte   0x23
24478 +       .uleb128 0x4
24479 +       .byte   0x0
24480 +       .uleb128 0x9
24481 +       .long   0x2503
24482 +       .byte   0x10
24483 +       .byte   0x52
24484 +       .byte   0x37
24485 +       .uleb128 0xa
24486 +       .long   .LASF479
24487 +       .byte   0x52
24488 +       .byte   0x38
24489 +       .long   0x1c0
24490 +       .byte   0x2
24491 +       .byte   0x23
24492 +       .uleb128 0x0
24493 +       .uleb128 0xa
24494 +       .long   .LASF480
24495 +       .byte   0x52
24496 +       .byte   0x39
24497 +       .long   0x21
24498 +       .byte   0x2
24499 +       .byte   0x23
24500 +       .uleb128 0x4
24501 +       .uleb128 0xa
24502 +       .long   .LASF481
24503 +       .byte   0x52
24504 +       .byte   0x3a
24505 +       .long   0x2503
24506 +       .byte   0x2
24507 +       .byte   0x23
24508 +       .uleb128 0x8
24509 +       .uleb128 0xa
24510 +       .long   .LASF482
24511 +       .byte   0x52
24512 +       .byte   0x3b
24513 +       .long   0x2484
24514 +       .byte   0x2
24515 +       .byte   0x23
24516 +       .uleb128 0x8
24517 +       .uleb128 0xa
24518 +       .long   .LASF483
24519 +       .byte   0x52
24520 +       .byte   0x3c
24521 +       .long   0x21
24522 +       .byte   0x2
24523 +       .byte   0x23
24524 +       .uleb128 0xc
24525 +       .byte   0x0
24526 +       .uleb128 0x12
24527 +       .long   0x2512
24528 +       .long   0xbb
24529 +       .uleb128 0x23
24530 +       .long   0x28
24531 +       .byte   0x0
24532 +       .uleb128 0x9
24533 +       .long   0x2545
24534 +       .byte   0xc
24535 +       .byte   0x52
24536 +       .byte   0x40
24537 +       .uleb128 0xa
24538 +       .long   .LASF477
24539 +       .byte   0x52
24540 +       .byte   0x41
24541 +       .long   0x1b5
24542 +       .byte   0x2
24543 +       .byte   0x23
24544 +       .uleb128 0x0
24545 +       .uleb128 0xa
24546 +       .long   .LASF478
24547 +       .byte   0x52
24548 +       .byte   0x42
24549 +       .long   0x1dd
24550 +       .byte   0x2
24551 +       .byte   0x23
24552 +       .uleb128 0x4
24553 +       .uleb128 0xa
24554 +       .long   .LASF482
24555 +       .byte   0x52
24556 +       .byte   0x43
24557 +       .long   0x2484
24558 +       .byte   0x2
24559 +       .byte   0x23
24560 +       .uleb128 0x8
24561 +       .byte   0x0
24562 +       .uleb128 0x9
24563 +       .long   0x2594
24564 +       .byte   0x14
24565 +       .byte   0x52
24566 +       .byte   0x47
24567 +       .uleb128 0xa
24568 +       .long   .LASF477
24569 +       .byte   0x52
24570 +       .byte   0x48
24571 +       .long   0x1b5
24572 +       .byte   0x2
24573 +       .byte   0x23
24574 +       .uleb128 0x0
24575 +       .uleb128 0xa
24576 +       .long   .LASF478
24577 +       .byte   0x52
24578 +       .byte   0x49
24579 +       .long   0x1dd
24580 +       .byte   0x2
24581 +       .byte   0x23
24582 +       .uleb128 0x4
24583 +       .uleb128 0xa
24584 +       .long   .LASF484
24585 +       .byte   0x52
24586 +       .byte   0x4a
24587 +       .long   0x21
24588 +       .byte   0x2
24589 +       .byte   0x23
24590 +       .uleb128 0x8
24591 +       .uleb128 0xa
24592 +       .long   .LASF485
24593 +       .byte   0x52
24594 +       .byte   0x4b
24595 +       .long   0x21f
24596 +       .byte   0x2
24597 +       .byte   0x23
24598 +       .uleb128 0xc
24599 +       .uleb128 0xa
24600 +       .long   .LASF486
24601 +       .byte   0x52
24602 +       .byte   0x4c
24603 +       .long   0x21f
24604 +       .byte   0x2
24605 +       .byte   0x23
24606 +       .uleb128 0x10
24607 +       .byte   0x0
24608 +       .uleb128 0x9
24609 +       .long   0x25ab
24610 +       .byte   0x4
24611 +       .byte   0x52
24612 +       .byte   0x50
24613 +       .uleb128 0xa
24614 +       .long   .LASF487
24615 +       .byte   0x52
24616 +       .byte   0x51
24617 +       .long   0x160b
24618 +       .byte   0x2
24619 +       .byte   0x23
24620 +       .uleb128 0x0
24621 +       .byte   0x0
24622 +       .uleb128 0x9
24623 +       .long   0x25d0
24624 +       .byte   0x8
24625 +       .byte   0x52
24626 +       .byte   0x58
24627 +       .uleb128 0xa
24628 +       .long   .LASF488
24629 +       .byte   0x52
24630 +       .byte   0x59
24631 +       .long   0x5a
24632 +       .byte   0x2
24633 +       .byte   0x23
24634 +       .uleb128 0x0
24635 +       .uleb128 0xb
24636 +       .string "_fd"
24637 +       .byte   0x52
24638 +       .byte   0x5a
24639 +       .long   0x21
24640 +       .byte   0x2
24641 +       .byte   0x23
24642 +       .uleb128 0x4
24643 +       .byte   0x0
24644 +       .uleb128 0xc
24645 +       .long   0x2626
24646 +       .byte   0x74
24647 +       .byte   0x52
24648 +       .byte   0x2d
24649 +       .uleb128 0xe
24650 +       .long   .LASF481
24651 +       .byte   0x52
24652 +       .byte   0x2e
24653 +       .long   0x2626
24654 +       .uleb128 0xe
24655 +       .long   .LASF489
24656 +       .byte   0x52
24657 +       .byte   0x34
24658 +       .long   0x248f
24659 +       .uleb128 0xe
24660 +       .long   .LASF490
24661 +       .byte   0x52
24662 +       .byte   0x3d
24663 +       .long   0x24b4
24664 +       .uleb128 0x26
24665 +       .string "_rt"
24666 +       .byte   0x52
24667 +       .byte   0x44
24668 +       .long   0x2512
24669 +       .uleb128 0xe
24670 +       .long   .LASF491
24671 +       .byte   0x52
24672 +       .byte   0x4d
24673 +       .long   0x2545
24674 +       .uleb128 0xe
24675 +       .long   .LASF492
24676 +       .byte   0x52
24677 +       .byte   0x55
24678 +       .long   0x2594
24679 +       .uleb128 0xe
24680 +       .long   .LASF493
24681 +       .byte   0x52
24682 +       .byte   0x5b
24683 +       .long   0x25ab
24684 +       .byte   0x0
24685 +       .uleb128 0x12
24686 +       .long   0x2636
24687 +       .long   0x21
24688 +       .uleb128 0x13
24689 +       .long   0x28
24690 +       .byte   0x1c
24691 +       .byte   0x0
24692 +       .uleb128 0xf
24693 +       .long   0x267b
24694 +       .long   .LASF494
24695 +       .byte   0x80
24696 +       .byte   0x4a
24697 +       .byte   0x9
24698 +       .uleb128 0xa
24699 +       .long   .LASF495
24700 +       .byte   0x52
24701 +       .byte   0x29
24702 +       .long   0x21
24703 +       .byte   0x2
24704 +       .byte   0x23
24705 +       .uleb128 0x0
24706 +       .uleb128 0xa
24707 +       .long   .LASF496
24708 +       .byte   0x52
24709 +       .byte   0x2a
24710 +       .long   0x21
24711 +       .byte   0x2
24712 +       .byte   0x23
24713 +       .uleb128 0x4
24714 +       .uleb128 0xa
24715 +       .long   .LASF497
24716 +       .byte   0x52
24717 +       .byte   0x2b
24718 +       .long   0x21
24719 +       .byte   0x2
24720 +       .byte   0x23
24721 +       .uleb128 0x8
24722 +       .uleb128 0xa
24723 +       .long   .LASF498
24724 +       .byte   0x52
24725 +       .byte   0x5c
24726 +       .long   0x25d0
24727 +       .byte   0x2
24728 +       .byte   0x23
24729 +       .uleb128 0xc
24730 +       .byte   0x0
24731 +       .uleb128 0x7
24732 +       .long   .LASF499
24733 +       .byte   0x52
24734 +       .byte   0x5d
24735 +       .long   0x2636
24736 +       .uleb128 0xf
24737 +       .long   0x2729
24738 +       .long   .LASF500
24739 +       .byte   0x2c
24740 +       .byte   0x46
24741 +       .byte   0x13
24742 +       .uleb128 0x16
24743 +       .long   .LASF501
24744 +       .byte   0xb
24745 +       .value  0x229
24746 +       .long   0x16c4
24747 +       .byte   0x2
24748 +       .byte   0x23
24749 +       .uleb128 0x0
24750 +       .uleb128 0x16
24751 +       .long   .LASF502
24752 +       .byte   0xb
24753 +       .value  0x22a
24754 +       .long   0x16c4
24755 +       .byte   0x2
24756 +       .byte   0x23
24757 +       .uleb128 0x4
24758 +       .uleb128 0x16
24759 +       .long   .LASF245
24760 +       .byte   0xb
24761 +       .value  0x22b
24762 +       .long   0x16c4
24763 +       .byte   0x2
24764 +       .byte   0x23
24765 +       .uleb128 0x8
24766 +       .uleb128 0x16
24767 +       .long   .LASF503
24768 +       .byte   0xb
24769 +       .value  0x22c
24770 +       .long   0x16c4
24771 +       .byte   0x2
24772 +       .byte   0x23
24773 +       .uleb128 0xc
24774 +       .uleb128 0x16
24775 +       .long   .LASF504
24776 +       .byte   0xb
24777 +       .value  0x22e
24778 +       .long   0x16c4
24779 +       .byte   0x2
24780 +       .byte   0x23
24781 +       .uleb128 0x10
24782 +       .uleb128 0x16
24783 +       .long   .LASF505
24784 +       .byte   0xb
24785 +       .value  0x22f
24786 +       .long   0x16c4
24787 +       .byte   0x2
24788 +       .byte   0x23
24789 +       .uleb128 0x14
24790 +       .uleb128 0x16
24791 +       .long   .LASF506
24792 +       .byte   0xb
24793 +       .value  0x232
24794 +       .long   0x2f
24795 +       .byte   0x2
24796 +       .byte   0x23
24797 +       .uleb128 0x18
24798 +       .uleb128 0x16
24799 +       .long   .LASF507
24800 +       .byte   0xb
24801 +       .value  0x233
24802 +       .long   0x2f
24803 +       .byte   0x2
24804 +       .byte   0x23
24805 +       .uleb128 0x1c
24806 +       .uleb128 0x16
24807 +       .long   .LASF508
24808 +       .byte   0xb
24809 +       .value  0x23b
24810 +       .long   0x17bc
24811 +       .byte   0x2
24812 +       .byte   0x23
24813 +       .uleb128 0x20
24814 +       .uleb128 0x17
24815 +       .string "uid"
24816 +       .byte   0xb
24817 +       .value  0x23c
24818 +       .long   0x1dd
24819 +       .byte   0x2
24820 +       .byte   0x23
24821 +       .uleb128 0x28
24822 +       .byte   0x0
24823 +       .uleb128 0x4
24824 +       .byte   0x4
24825 +       .long   0x2686
24826 +       .uleb128 0xf
24827 +       .long   0x2758
24828 +       .long   .LASF503
24829 +       .byte   0x10
24830 +       .byte   0x46
24831 +       .byte   0x19
24832 +       .uleb128 0xa
24833 +       .long   .LASF509
24834 +       .byte   0x46
24835 +       .byte   0x1a
24836 +       .long   0x17bc
24837 +       .byte   0x2
24838 +       .byte   0x23
24839 +       .uleb128 0x0
24840 +       .uleb128 0xa
24841 +       .long   .LASF247
24842 +       .byte   0x46
24843 +       .byte   0x1b
24844 +       .long   0x23bf
24845 +       .byte   0x2
24846 +       .byte   0x23
24847 +       .uleb128 0x8
24848 +       .byte   0x0
24849 +       .uleb128 0xf
24850 +       .long   0x27e3
24851 +       .long   .LASF510
24852 +       .byte   0x24
24853 +       .byte   0x23
24854 +       .byte   0x7
24855 +       .uleb128 0xa
24856 +       .long   .LASF322
24857 +       .byte   0x23
24858 +       .byte   0x8
24859 +       .long   0x16c4
24860 +       .byte   0x2
24861 +       .byte   0x23
24862 +       .uleb128 0x0
24863 +       .uleb128 0xa
24864 +       .long   .LASF285
24865 +       .byte   0x23
24866 +       .byte   0x9
24867 +       .long   0x16a2
24868 +       .byte   0x2
24869 +       .byte   0x23
24870 +       .uleb128 0x4
24871 +       .uleb128 0xa
24872 +       .long   .LASF511
24873 +       .byte   0x23
24874 +       .byte   0xa
24875 +       .long   0x21
24876 +       .byte   0x2
24877 +       .byte   0x23
24878 +       .uleb128 0x8
24879 +       .uleb128 0xa
24880 +       .long   .LASF512
24881 +       .byte   0x23
24882 +       .byte   0xb
24883 +       .long   0x28ec
24884 +       .byte   0x2
24885 +       .byte   0x23
24886 +       .uleb128 0xc
24887 +       .uleb128 0xb
24888 +       .string "pwd"
24889 +       .byte   0x23
24890 +       .byte   0xb
24891 +       .long   0x28ec
24892 +       .byte   0x2
24893 +       .byte   0x23
24894 +       .uleb128 0x10
24895 +       .uleb128 0xa
24896 +       .long   .LASF513
24897 +       .byte   0x23
24898 +       .byte   0xb
24899 +       .long   0x28ec
24900 +       .byte   0x2
24901 +       .byte   0x23
24902 +       .uleb128 0x14
24903 +       .uleb128 0xa
24904 +       .long   .LASF514
24905 +       .byte   0x23
24906 +       .byte   0xc
24907 +       .long   0x28f8
24908 +       .byte   0x2
24909 +       .byte   0x23
24910 +       .uleb128 0x18
24911 +       .uleb128 0xa
24912 +       .long   .LASF515
24913 +       .byte   0x23
24914 +       .byte   0xc
24915 +       .long   0x28f8
24916 +       .byte   0x2
24917 +       .byte   0x23
24918 +       .uleb128 0x1c
24919 +       .uleb128 0xa
24920 +       .long   .LASF516
24921 +       .byte   0x23
24922 +       .byte   0xc
24923 +       .long   0x28f8
24924 +       .byte   0x2
24925 +       .byte   0x23
24926 +       .uleb128 0x20
24927 +       .byte   0x0
24928 +       .uleb128 0xf
24929 +       .long   0x28ec
24930 +       .long   .LASF517
24931 +       .byte   0x84
24932 +       .byte   0x23
24933 +       .byte   0x4
24934 +       .uleb128 0xa
24935 +       .long   .LASF518
24936 +       .byte   0x24
24937 +       .byte   0x53
24938 +       .long   0x16c4
24939 +       .byte   0x2
24940 +       .byte   0x23
24941 +       .uleb128 0x0
24942 +       .uleb128 0xa
24943 +       .long   .LASF519
24944 +       .byte   0x24
24945 +       .byte   0x54
24946 +       .long   0x77
24947 +       .byte   0x2
24948 +       .byte   0x23
24949 +       .uleb128 0x4
24950 +       .uleb128 0xa
24951 +       .long   .LASF520
24952 +       .byte   0x24
24953 +       .byte   0x55
24954 +       .long   0x1680
24955 +       .byte   0x2
24956 +       .byte   0x23
24957 +       .uleb128 0x8
24958 +       .uleb128 0xa
24959 +       .long   .LASF521
24960 +       .byte   0x24
24961 +       .byte   0x56
24962 +       .long   0x3381
24963 +       .byte   0x2
24964 +       .byte   0x23
24965 +       .uleb128 0xc
24966 +       .uleb128 0xa
24967 +       .long   .LASF522
24968 +       .byte   0x24
24969 +       .byte   0x5c
24970 +       .long   0x1808
24971 +       .byte   0x2
24972 +       .byte   0x23
24973 +       .uleb128 0x10
24974 +       .uleb128 0xa
24975 +       .long   .LASF523
24976 +       .byte   0x24
24977 +       .byte   0x5d
24978 +       .long   0x28ec
24979 +       .byte   0x2
24980 +       .byte   0x23
24981 +       .uleb128 0x18
24982 +       .uleb128 0xa
24983 +       .long   .LASF524
24984 +       .byte   0x24
24985 +       .byte   0x5e
24986 +       .long   0x5db4
24987 +       .byte   0x2
24988 +       .byte   0x23
24989 +       .uleb128 0x1c
24990 +       .uleb128 0xa
24991 +       .long   .LASF525
24992 +       .byte   0x24
24993 +       .byte   0x60
24994 +       .long   0x17bc
24995 +       .byte   0x2
24996 +       .byte   0x23
24997 +       .uleb128 0x28
24998 +       .uleb128 0xb
24999 +       .string "d_u"
25000 +       .byte   0x24
25001 +       .byte   0x67
25002 +       .long   0x5df6
25003 +       .byte   0x2
25004 +       .byte   0x23
25005 +       .uleb128 0x30
25006 +       .uleb128 0xa
25007 +       .long   .LASF526
25008 +       .byte   0x24
25009 +       .byte   0x68
25010 +       .long   0x17bc
25011 +       .byte   0x2
25012 +       .byte   0x23
25013 +       .uleb128 0x38
25014 +       .uleb128 0xa
25015 +       .long   .LASF527
25016 +       .byte   0x24
25017 +       .byte   0x69
25018 +       .long   0x17bc
25019 +       .byte   0x2
25020 +       .byte   0x23
25021 +       .uleb128 0x40
25022 +       .uleb128 0xa
25023 +       .long   .LASF528
25024 +       .byte   0x24
25025 +       .byte   0x6a
25026 +       .long   0x2f
25027 +       .byte   0x2
25028 +       .byte   0x23
25029 +       .uleb128 0x48
25030 +       .uleb128 0xa
25031 +       .long   .LASF529
25032 +       .byte   0x24
25033 +       .byte   0x6b
25034 +       .long   0x5e84
25035 +       .byte   0x2
25036 +       .byte   0x23
25037 +       .uleb128 0x4c
25038 +       .uleb128 0xa
25039 +       .long   .LASF530
25040 +       .byte   0x24
25041 +       .byte   0x6c
25042 +       .long   0x60d1
25043 +       .byte   0x2
25044 +       .byte   0x23
25045 +       .uleb128 0x50
25046 +       .uleb128 0xa
25047 +       .long   .LASF531
25048 +       .byte   0x24
25049 +       .byte   0x6d
25050 +       .long   0x160b
25051 +       .byte   0x2
25052 +       .byte   0x23
25053 +       .uleb128 0x54
25054 +       .uleb128 0xa
25055 +       .long   .LASF532
25056 +       .byte   0x24
25057 +       .byte   0x6f
25058 +       .long   0x60dd
25059 +       .byte   0x2
25060 +       .byte   0x23
25061 +       .uleb128 0x58
25062 +       .uleb128 0xa
25063 +       .long   .LASF533
25064 +       .byte   0x24
25065 +       .byte   0x71
25066 +       .long   0x21
25067 +       .byte   0x2
25068 +       .byte   0x23
25069 +       .uleb128 0x5c
25070 +       .uleb128 0xa
25071 +       .long   .LASF534
25072 +       .byte   0x24
25073 +       .byte   0x72
25074 +       .long   0x60e3
25075 +       .byte   0x2
25076 +       .byte   0x23
25077 +       .uleb128 0x60
25078 +       .byte   0x0
25079 +       .uleb128 0x4
25080 +       .byte   0x4
25081 +       .long   0x27e3
25082 +       .uleb128 0x21
25083 +       .long   .LASF535
25084 +       .byte   0x1
25085 +       .uleb128 0x4
25086 +       .byte   0x4
25087 +       .long   0x28f2
25088 +       .uleb128 0xf
25089 +       .long   0x2927
25090 +       .long   .LASF536
25091 +       .byte   0x10
25092 +       .byte   0x38
25093 +       .byte   0x46
25094 +       .uleb128 0xa
25095 +       .long   .LASF537
25096 +       .byte   0x44
25097 +       .byte   0xe
25098 +       .long   0x77
25099 +       .byte   0x2
25100 +       .byte   0x23
25101 +       .uleb128 0x0
25102 +       .uleb128 0xa
25103 +       .long   .LASF327
25104 +       .byte   0x44
25105 +       .byte   0xf
25106 +       .long   0x18ef
25107 +       .byte   0x2
25108 +       .byte   0x23
25109 +       .uleb128 0x4
25110 +       .byte   0x0
25111 +       .uleb128 0xf
25112 +       .long   0x2950
25113 +       .long   .LASF538
25114 +       .byte   0xc
25115 +       .byte   0x18
25116 +       .byte   0x1b
25117 +       .uleb128 0xa
25118 +       .long   .LASF539
25119 +       .byte   0x18
25120 +       .byte   0x1c
25121 +       .long   0x17bc
25122 +       .byte   0x2
25123 +       .byte   0x23
25124 +       .uleb128 0x0
25125 +       .uleb128 0xa
25126 +       .long   .LASF540
25127 +       .byte   0x18
25128 +       .byte   0x1d
25129 +       .long   0x2f
25130 +       .byte   0x2
25131 +       .byte   0x23
25132 +       .uleb128 0x8
25133 +       .byte   0x0
25134 +       .uleb128 0xf
25135 +       .long   0x2969
25136 +       .long   .LASF541
25137 +       .byte   0x0
25138 +       .byte   0x18
25139 +       .byte   0x29
25140 +       .uleb128 0xb
25141 +       .string "x"
25142 +       .byte   0x18
25143 +       .byte   0x2a
25144 +       .long   0x2969
25145 +       .byte   0x2
25146 +       .byte   0x23
25147 +       .uleb128 0x0
25148 +       .byte   0x0
25149 +       .uleb128 0x12
25150 +       .long   0x2978
25151 +       .long   0xbb
25152 +       .uleb128 0x23
25153 +       .long   0x28
25154 +       .byte   0x0
25155 +       .uleb128 0xf
25156 +       .long   0x29bd
25157 +       .long   .LASF542
25158 +       .byte   0x14
25159 +       .byte   0x18
25160 +       .byte   0x4d
25161 +       .uleb128 0xa
25162 +       .long   .LASF322
25163 +       .byte   0x18
25164 +       .byte   0x4e
25165 +       .long   0x21
25166 +       .byte   0x2
25167 +       .byte   0x23
25168 +       .uleb128 0x0
25169 +       .uleb128 0xa
25170 +       .long   .LASF543
25171 +       .byte   0x18
25172 +       .byte   0x4f
25173 +       .long   0x21
25174 +       .byte   0x2
25175 +       .byte   0x23
25176 +       .uleb128 0x4
25177 +       .uleb128 0xa
25178 +       .long   .LASF544
25179 +       .byte   0x18
25180 +       .byte   0x50
25181 +       .long   0x21
25182 +       .byte   0x2
25183 +       .byte   0x23
25184 +       .uleb128 0x8
25185 +       .uleb128 0xa
25186 +       .long   .LASF509
25187 +       .byte   0x18
25188 +       .byte   0x51
25189 +       .long   0x17bc
25190 +       .byte   0x2
25191 +       .byte   0x23
25192 +       .uleb128 0xc
25193 +       .byte   0x0
25194 +       .uleb128 0xf
25195 +       .long   0x29f4
25196 +       .long   .LASF545
25197 +       .byte   0x80
25198 +       .byte   0x18
25199 +       .byte   0x54
25200 +       .uleb128 0xb
25201 +       .string "pcp"
25202 +       .byte   0x18
25203 +       .byte   0x55
25204 +       .long   0x29f4
25205 +       .byte   0x2
25206 +       .byte   0x23
25207 +       .uleb128 0x0
25208 +       .uleb128 0xa
25209 +       .long   .LASF546
25210 +       .byte   0x18
25211 +       .byte   0x5a
25212 +       .long   0x169
25213 +       .byte   0x2
25214 +       .byte   0x23
25215 +       .uleb128 0x28
25216 +       .uleb128 0xa
25217 +       .long   .LASF547
25218 +       .byte   0x18
25219 +       .byte   0x5b
25220 +       .long   0x2a04
25221 +       .byte   0x2
25222 +       .byte   0x23
25223 +       .uleb128 0x29
25224 +       .byte   0x0
25225 +       .uleb128 0x12
25226 +       .long   0x2a04
25227 +       .long   0x2978
25228 +       .uleb128 0x13
25229 +       .long   0x28
25230 +       .byte   0x1
25231 +       .byte   0x0
25232 +       .uleb128 0x12
25233 +       .long   0x2a14
25234 +       .long   0x169
25235 +       .uleb128 0x13
25236 +       .long   0x28
25237 +       .byte   0xd
25238 +       .byte   0x0
25239 +       .uleb128 0x1a
25240 +       .long   0x2bbb
25241 +       .long   .LASF548
25242 +       .value  0x1280
25243 +       .byte   0x18
25244 +       .byte   0xb6
25245 +       .uleb128 0xa
25246 +       .long   .LASF549
25247 +       .byte   0x18
25248 +       .byte   0xb8
25249 +       .long   0x2f
25250 +       .byte   0x2
25251 +       .byte   0x23
25252 +       .uleb128 0x0
25253 +       .uleb128 0xa
25254 +       .long   .LASF550
25255 +       .byte   0x18
25256 +       .byte   0xb8
25257 +       .long   0x2f
25258 +       .byte   0x2
25259 +       .byte   0x23
25260 +       .uleb128 0x4
25261 +       .uleb128 0xa
25262 +       .long   .LASF551
25263 +       .byte   0x18
25264 +       .byte   0xb8
25265 +       .long   0x2f
25266 +       .byte   0x2
25267 +       .byte   0x23
25268 +       .uleb128 0x8
25269 +       .uleb128 0xa
25270 +       .long   .LASF552
25271 +       .byte   0x18
25272 +       .byte   0xc1
25273 +       .long   0x8dc
25274 +       .byte   0x2
25275 +       .byte   0x23
25276 +       .uleb128 0xc
25277 +       .uleb128 0xa
25278 +       .long   .LASF553
25279 +       .byte   0x18
25280 +       .byte   0xcc
25281 +       .long   0x2bbb
25282 +       .byte   0x3
25283 +       .byte   0x23
25284 +       .uleb128 0x80
25285 +       .uleb128 0xa
25286 +       .long   .LASF285
25287 +       .byte   0x18
25288 +       .byte   0xd1
25289 +       .long   0x1680
25290 +       .byte   0x3
25291 +       .byte   0x23
25292 +       .uleb128 0x1080
25293 +       .uleb128 0xa
25294 +       .long   .LASF538
25295 +       .byte   0x18
25296 +       .byte   0xd6
25297 +       .long   0x2bcb
25298 +       .byte   0x3
25299 +       .byte   0x23
25300 +       .uleb128 0x1084
25301 +       .uleb128 0xa
25302 +       .long   .LASF554
25303 +       .byte   0x18
25304 +       .byte   0xd9
25305 +       .long   0x2950
25306 +       .byte   0x3
25307 +       .byte   0x23
25308 +       .uleb128 0x1180
25309 +       .uleb128 0xa
25310 +       .long   .LASF555
25311 +       .byte   0x18
25312 +       .byte   0xdc
25313 +       .long   0x1680
25314 +       .byte   0x3
25315 +       .byte   0x23
25316 +       .uleb128 0x1180
25317 +       .uleb128 0xa
25318 +       .long   .LASF556
25319 +       .byte   0x18
25320 +       .byte   0xdd
25321 +       .long   0x17bc
25322 +       .byte   0x3
25323 +       .byte   0x23
25324 +       .uleb128 0x1184
25325 +       .uleb128 0xa
25326 +       .long   .LASF557
25327 +       .byte   0x18
25328 +       .byte   0xde
25329 +       .long   0x17bc
25330 +       .byte   0x3
25331 +       .byte   0x23
25332 +       .uleb128 0x118c
25333 +       .uleb128 0xa
25334 +       .long   .LASF558
25335 +       .byte   0x18
25336 +       .byte   0xdf
25337 +       .long   0x2f
25338 +       .byte   0x3
25339 +       .byte   0x23
25340 +       .uleb128 0x1194
25341 +       .uleb128 0xa
25342 +       .long   .LASF559
25343 +       .byte   0x18
25344 +       .byte   0xe0
25345 +       .long   0x2f
25346 +       .byte   0x3
25347 +       .byte   0x23
25348 +       .uleb128 0x1198
25349 +       .uleb128 0xa
25350 +       .long   .LASF560
25351 +       .byte   0x18
25352 +       .byte   0xe1
25353 +       .long   0x2f
25354 +       .byte   0x3
25355 +       .byte   0x23
25356 +       .uleb128 0x119c
25357 +       .uleb128 0xa
25358 +       .long   .LASF561
25359 +       .byte   0x18
25360 +       .byte   0xe2
25361 +       .long   0x21
25362 +       .byte   0x3
25363 +       .byte   0x23
25364 +       .uleb128 0x11a0
25365 +       .uleb128 0xa
25366 +       .long   .LASF562
25367 +       .byte   0x18
25368 +       .byte   0xe5
25369 +       .long   0x16c4
25370 +       .byte   0x3
25371 +       .byte   0x23
25372 +       .uleb128 0x11a4
25373 +       .uleb128 0xa
25374 +       .long   .LASF563
25375 +       .byte   0x18
25376 +       .byte   0xe8
25377 +       .long   0x2bdb
25378 +       .byte   0x3
25379 +       .byte   0x23
25380 +       .uleb128 0x11a8
25381 +       .uleb128 0xa
25382 +       .long   .LASF564
25383 +       .byte   0x18
25384 +       .byte   0xf7
25385 +       .long   0x21
25386 +       .byte   0x3
25387 +       .byte   0x23
25388 +       .uleb128 0x11e0
25389 +       .uleb128 0xa
25390 +       .long   .LASF565
25391 +       .byte   0x18
25392 +       .byte   0xfa
25393 +       .long   0x2950
25394 +       .byte   0x3
25395 +       .byte   0x23
25396 +       .uleb128 0x1200
25397 +       .uleb128 0x16
25398 +       .long   .LASF566
25399 +       .byte   0x18
25400 +       .value  0x115
25401 +       .long   0x2beb
25402 +       .byte   0x3
25403 +       .byte   0x23
25404 +       .uleb128 0x1200
25405 +       .uleb128 0x16
25406 +       .long   .LASF567
25407 +       .byte   0x18
25408 +       .value  0x116
25409 +       .long   0x2f
25410 +       .byte   0x3
25411 +       .byte   0x23
25412 +       .uleb128 0x1204
25413 +       .uleb128 0x16
25414 +       .long   .LASF568
25415 +       .byte   0x18
25416 +       .value  0x117
25417 +       .long   0x2f
25418 +       .byte   0x3
25419 +       .byte   0x23
25420 +       .uleb128 0x1208
25421 +       .uleb128 0x16
25422 +       .long   .LASF569
25423 +       .byte   0x18
25424 +       .value  0x11c
25425 +       .long   0x2cbe
25426 +       .byte   0x3
25427 +       .byte   0x23
25428 +       .uleb128 0x120c
25429 +       .uleb128 0x16
25430 +       .long   .LASF570
25431 +       .byte   0x18
25432 +       .value  0x11e
25433 +       .long   0x2f
25434 +       .byte   0x3
25435 +       .byte   0x23
25436 +       .uleb128 0x1210
25437 +       .uleb128 0x16
25438 +       .long   .LASF571
25439 +       .byte   0x18
25440 +       .value  0x12a
25441 +       .long   0x2f
25442 +       .byte   0x3
25443 +       .byte   0x23
25444 +       .uleb128 0x1214
25445 +       .uleb128 0x16
25446 +       .long   .LASF572
25447 +       .byte   0x18
25448 +       .value  0x12b
25449 +       .long   0x2f
25450 +       .byte   0x3
25451 +       .byte   0x23
25452 +       .uleb128 0x1218
25453 +       .uleb128 0x16
25454 +       .long   .LASF414
25455 +       .byte   0x18
25456 +       .value  0x130
25457 +       .long   0x7f2
25458 +       .byte   0x3
25459 +       .byte   0x23
25460 +       .uleb128 0x121c
25461 +       .byte   0x0
25462 +       .uleb128 0x12
25463 +       .long   0x2bcb
25464 +       .long   0x29bd
25465 +       .uleb128 0x13
25466 +       .long   0x28
25467 +       .byte   0x1f
25468 +       .byte   0x0
25469 +       .uleb128 0x12
25470 +       .long   0x2bdb
25471 +       .long   0x2927
25472 +       .uleb128 0x13
25473 +       .long   0x28
25474 +       .byte   0xa
25475 +       .byte   0x0
25476 +       .uleb128 0x12
25477 +       .long   0x2beb
25478 +       .long   0x16cf
25479 +       .uleb128 0x13
25480 +       .long   0x28
25481 +       .byte   0xd
25482 +       .byte   0x0
25483 +       .uleb128 0x4
25484 +       .byte   0x4
25485 +       .long   0x18ef
25486 +       .uleb128 0x1a
25487 +       .long   0x2cbe
25488 +       .long   .LASF573
25489 +       .value  0x3800
25490 +       .byte   0x18
25491 +       .byte   0x20
25492 +       .uleb128 0x16
25493 +       .long   .LASF574
25494 +       .byte   0x18
25495 +       .value  0x1ae
25496 +       .long   0x2d12
25497 +       .byte   0x2
25498 +       .byte   0x23
25499 +       .uleb128 0x0
25500 +       .uleb128 0x16
25501 +       .long   .LASF575
25502 +       .byte   0x18
25503 +       .value  0x1af
25504 +       .long   0x2d22
25505 +       .byte   0x3
25506 +       .byte   0x23
25507 +       .uleb128 0x3780
25508 +       .uleb128 0x16
25509 +       .long   .LASF576
25510 +       .byte   0x18
25511 +       .value  0x1b0
25512 +       .long   0x21
25513 +       .byte   0x3
25514 +       .byte   0x23
25515 +       .uleb128 0x37bc
25516 +       .uleb128 0x16
25517 +       .long   .LASF577
25518 +       .byte   0x18
25519 +       .value  0x1b2
25520 +       .long   0x2d82
25521 +       .byte   0x3
25522 +       .byte   0x23
25523 +       .uleb128 0x37c0
25524 +       .uleb128 0x16
25525 +       .long   .LASF578
25526 +       .byte   0x18
25527 +       .value  0x1b4
25528 +       .long   0x2d8e
25529 +       .byte   0x3
25530 +       .byte   0x23
25531 +       .uleb128 0x37c4
25532 +       .uleb128 0x16
25533 +       .long   .LASF579
25534 +       .byte   0x18
25535 +       .value  0x1bf
25536 +       .long   0x2f
25537 +       .byte   0x3
25538 +       .byte   0x23
25539 +       .uleb128 0x37c8
25540 +       .uleb128 0x16
25541 +       .long   .LASF580
25542 +       .byte   0x18
25543 +       .value  0x1c0
25544 +       .long   0x2f
25545 +       .byte   0x3
25546 +       .byte   0x23
25547 +       .uleb128 0x37cc
25548 +       .uleb128 0x16
25549 +       .long   .LASF581
25550 +       .byte   0x18
25551 +       .value  0x1c1
25552 +       .long   0x2f
25553 +       .byte   0x3
25554 +       .byte   0x23
25555 +       .uleb128 0x37d0
25556 +       .uleb128 0x16
25557 +       .long   .LASF582
25558 +       .byte   0x18
25559 +       .value  0x1c3
25560 +       .long   0x21
25561 +       .byte   0x3
25562 +       .byte   0x23
25563 +       .uleb128 0x37d4
25564 +       .uleb128 0x16
25565 +       .long   .LASF583
25566 +       .byte   0x18
25567 +       .value  0x1c4
25568 +       .long   0x18ef
25569 +       .byte   0x3
25570 +       .byte   0x23
25571 +       .uleb128 0x37d8
25572 +       .uleb128 0x16
25573 +       .long   .LASF584
25574 +       .byte   0x18
25575 +       .value  0x1c5
25576 +       .long   0x15f9
25577 +       .byte   0x3
25578 +       .byte   0x23
25579 +       .uleb128 0x37e4
25580 +       .uleb128 0x16
25581 +       .long   .LASF585
25582 +       .byte   0x18
25583 +       .value  0x1c6
25584 +       .long   0x21
25585 +       .byte   0x3
25586 +       .byte   0x23
25587 +       .uleb128 0x37e8
25588 +       .byte   0x0
25589 +       .uleb128 0x4
25590 +       .byte   0x4
25591 +       .long   0x2bf1
25592 +       .uleb128 0x15
25593 +       .long   0x2cf0
25594 +       .long   .LASF586
25595 +       .byte   0x14
25596 +       .byte   0x18
25597 +       .value  0x18c
25598 +       .uleb128 0x16
25599 +       .long   .LASF587
25600 +       .byte   0x18
25601 +       .value  0x18d
25602 +       .long   0x2cf6
25603 +       .byte   0x2
25604 +       .byte   0x23
25605 +       .uleb128 0x0
25606 +       .uleb128 0x16
25607 +       .long   .LASF588
25608 +       .byte   0x18
25609 +       .value  0x18e
25610 +       .long   0x2cfc
25611 +       .byte   0x2
25612 +       .byte   0x23
25613 +       .uleb128 0x4
25614 +       .byte   0x0
25615 +       .uleb128 0x21
25616 +       .long   .LASF589
25617 +       .byte   0x1
25618 +       .uleb128 0x4
25619 +       .byte   0x4
25620 +       .long   0x2cf0
25621 +       .uleb128 0x12
25622 +       .long   0x2d0c
25623 +       .long   0x2d0c
25624 +       .uleb128 0x13
25625 +       .long   0x28
25626 +       .byte   0x3
25627 +       .byte   0x0
25628 +       .uleb128 0x4
25629 +       .byte   0x4
25630 +       .long   0x2a14
25631 +       .uleb128 0x12
25632 +       .long   0x2d22
25633 +       .long   0x2a14
25634 +       .uleb128 0x13
25635 +       .long   0x28
25636 +       .byte   0x2
25637 +       .byte   0x0
25638 +       .uleb128 0x12
25639 +       .long   0x2d32
25640 +       .long   0x2cc4
25641 +       .uleb128 0x13
25642 +       .long   0x28
25643 +       .byte   0x2
25644 +       .byte   0x0
25645 +       .uleb128 0x15
25646 +       .long   0x2d82
25647 +       .long   .LASF590
25648 +       .byte   0x20
25649 +       .byte   0x18
25650 +       .value  0x19e
25651 +       .uleb128 0xa
25652 +       .long   .LASF53
25653 +       .byte   0x19
25654 +       .byte   0x13
25655 +       .long   0x2f
25656 +       .byte   0x2
25657 +       .byte   0x23
25658 +       .uleb128 0x0
25659 +       .uleb128 0xa
25660 +       .long   .LASF591
25661 +       .byte   0x19
25662 +       .byte   0x15
25663 +       .long   0x16c4
25664 +       .byte   0x2
25665 +       .byte   0x23
25666 +       .uleb128 0x4
25667 +       .uleb128 0x10
25668 +       .long   0x8487
25669 +       .byte   0x2
25670 +       .byte   0x23
25671 +       .uleb128 0x8
25672 +       .uleb128 0x10
25673 +       .long   0x8507
25674 +       .byte   0x2
25675 +       .byte   0x23
25676 +       .uleb128 0xc
25677 +       .uleb128 0x10
25678 +       .long   0x852a
25679 +       .byte   0x2
25680 +       .byte   0x23
25681 +       .uleb128 0x14
25682 +       .uleb128 0xb
25683 +       .string "lru"
25684 +       .byte   0x19
25685 +       .byte   0x40
25686 +       .long   0x17bc
25687 +       .byte   0x2
25688 +       .byte   0x23
25689 +       .uleb128 0x18
25690 +       .byte   0x0
25691 +       .uleb128 0x4
25692 +       .byte   0x4
25693 +       .long   0x2d32
25694 +       .uleb128 0x21
25695 +       .long   .LASF592
25696 +       .byte   0x1
25697 +       .uleb128 0x4
25698 +       .byte   0x4
25699 +       .long   0x2d88
25700 +       .uleb128 0xf
25701 +       .long   0x2dd9
25702 +       .long   .LASF593
25703 +       .byte   0x14
25704 +       .byte   0x20
25705 +       .byte   0x2f
25706 +       .uleb128 0xa
25707 +       .long   .LASF322
25708 +       .byte   0x20
25709 +       .byte   0x31
25710 +       .long   0x16c4
25711 +       .byte   0x2
25712 +       .byte   0x23
25713 +       .uleb128 0x0
25714 +       .uleb128 0xa
25715 +       .long   .LASF323
25716 +       .byte   0x20
25717 +       .byte   0x32
25718 +       .long   0x1680
25719 +       .byte   0x2
25720 +       .byte   0x23
25721 +       .uleb128 0x4
25722 +       .uleb128 0xa
25723 +       .long   .LASF324
25724 +       .byte   0x20
25725 +       .byte   0x33
25726 +       .long   0x17bc
25727 +       .byte   0x2
25728 +       .byte   0x23
25729 +       .uleb128 0x8
25730 +       .uleb128 0xa
25731 +       .long   .LASF594
25732 +       .byte   0x20
25733 +       .byte   0x3a
25734 +       .long   0x2dd9
25735 +       .byte   0x2
25736 +       .byte   0x23
25737 +       .uleb128 0x10
25738 +       .byte   0x0
25739 +       .uleb128 0x4
25740 +       .byte   0x4
25741 +       .long   0xdda
25742 +       .uleb128 0xf
25743 +       .long   0x2e16
25744 +       .long   .LASF595
25745 +       .byte   0xc
25746 +       .byte   0x55
25747 +       .byte   0x32
25748 +       .uleb128 0xa
25749 +       .long   .LASF596
25750 +       .byte   0x55
25751 +       .byte   0x33
25752 +       .long   0x2e36
25753 +       .byte   0x2
25754 +       .byte   0x23
25755 +       .uleb128 0x0
25756 +       .uleb128 0xa
25757 +       .long   .LASF307
25758 +       .byte   0x55
25759 +       .byte   0x34
25760 +       .long   0x2e30
25761 +       .byte   0x2
25762 +       .byte   0x23
25763 +       .uleb128 0x4
25764 +       .uleb128 0xa
25765 +       .long   .LASF597
25766 +       .byte   0x55
25767 +       .byte   0x35
25768 +       .long   0x21
25769 +       .byte   0x2
25770 +       .byte   0x23
25771 +       .uleb128 0x8
25772 +       .byte   0x0
25773 +       .uleb128 0x11
25774 +       .long   0x2e30
25775 +       .byte   0x1
25776 +       .long   0x21
25777 +       .uleb128 0x6
25778 +       .long   0x2e30
25779 +       .uleb128 0x6
25780 +       .long   0x2f
25781 +       .uleb128 0x6
25782 +       .long   0x160b
25783 +       .byte   0x0
25784 +       .uleb128 0x4
25785 +       .byte   0x4
25786 +       .long   0x2ddf
25787 +       .uleb128 0x4
25788 +       .byte   0x4
25789 +       .long   0x2e16
25790 +       .uleb128 0xf
25791 +       .long   0x2e65
25792 +       .long   .LASF598
25793 +       .byte   0x14
25794 +       .byte   0x55
25795 +       .byte   0x3d
25796 +       .uleb128 0xa
25797 +       .long   .LASF599
25798 +       .byte   0x55
25799 +       .byte   0x3e
25800 +       .long   0x18fa
25801 +       .byte   0x2
25802 +       .byte   0x23
25803 +       .uleb128 0x0
25804 +       .uleb128 0xa
25805 +       .long   .LASF600
25806 +       .byte   0x55
25807 +       .byte   0x3f
25808 +       .long   0x2e30
25809 +       .byte   0x2
25810 +       .byte   0x23
25811 +       .uleb128 0x10
25812 +       .byte   0x0
25813 +       .uleb128 0x21
25814 +       .long   .LASF601
25815 +       .byte   0x1
25816 +       .uleb128 0xf
25817 +       .long   0x2ea2
25818 +       .long   .LASF602
25819 +       .byte   0xc
25820 +       .byte   0x3
25821 +       .byte   0x13
25822 +       .uleb128 0xa
25823 +       .long   .LASF603
25824 +       .byte   0x3
25825 +       .byte   0x14
25826 +       .long   0x1fe
25827 +       .byte   0x2
25828 +       .byte   0x23
25829 +       .uleb128 0x0
25830 +       .uleb128 0xa
25831 +       .long   .LASF604
25832 +       .byte   0x3
25833 +       .byte   0x15
25834 +       .long   0x2ea2
25835 +       .byte   0x2
25836 +       .byte   0x23
25837 +       .uleb128 0x4
25838 +       .uleb128 0xa
25839 +       .long   .LASF605
25840 +       .byte   0x3
25841 +       .byte   0x17
25842 +       .long   0x2ea2
25843 +       .byte   0x2
25844 +       .byte   0x23
25845 +       .uleb128 0x8
25846 +       .byte   0x0
25847 +       .uleb128 0x4
25848 +       .byte   0x4
25849 +       .long   0x2e65
25850 +       .uleb128 0xf
25851 +       .long   0x2ed1
25852 +       .long   .LASF606
25853 +       .byte   0x8
25854 +       .byte   0x25
25855 +       .byte   0x32
25856 +       .uleb128 0xa
25857 +       .long   .LASF307
25858 +       .byte   0x25
25859 +       .byte   0x33
25860 +       .long   0x2ed1
25861 +       .byte   0x2
25862 +       .byte   0x23
25863 +       .uleb128 0x0
25864 +       .uleb128 0xa
25865 +       .long   .LASF316
25866 +       .byte   0x25
25867 +       .byte   0x34
25868 +       .long   0x2ee3
25869 +       .byte   0x2
25870 +       .byte   0x23
25871 +       .uleb128 0x4
25872 +       .byte   0x0
25873 +       .uleb128 0x4
25874 +       .byte   0x4
25875 +       .long   0x2ea8
25876 +       .uleb128 0x5
25877 +       .long   0x2ee3
25878 +       .byte   0x1
25879 +       .uleb128 0x6
25880 +       .long   0x2ed1
25881 +       .byte   0x0
25882 +       .uleb128 0x4
25883 +       .byte   0x4
25884 +       .long   0x2ed7
25885 +       .uleb128 0xf
25886 +       .long   0x2fba
25887 +       .long   .LASF607
25888 +       .byte   0x3c
25889 +       .byte   0x25
25890 +       .byte   0x5d
25891 +       .uleb128 0xa
25892 +       .long   .LASF608
25893 +       .byte   0x25
25894 +       .byte   0x5f
25895 +       .long   0x5a
25896 +       .byte   0x2
25897 +       .byte   0x23
25898 +       .uleb128 0x0
25899 +       .uleb128 0xa
25900 +       .long   .LASF609
25901 +       .byte   0x25
25902 +       .byte   0x60
25903 +       .long   0x21
25904 +       .byte   0x2
25905 +       .byte   0x23
25906 +       .uleb128 0x4
25907 +       .uleb128 0xa
25908 +       .long   .LASF610
25909 +       .byte   0x25
25910 +       .byte   0x61
25911 +       .long   0x21
25912 +       .byte   0x2
25913 +       .byte   0x23
25914 +       .uleb128 0x8
25915 +       .uleb128 0xa
25916 +       .long   .LASF544
25917 +       .byte   0x25
25918 +       .byte   0x64
25919 +       .long   0x5a
25920 +       .byte   0x2
25921 +       .byte   0x23
25922 +       .uleb128 0xc
25923 +       .uleb128 0xa
25924 +       .long   .LASF611
25925 +       .byte   0x25
25926 +       .byte   0x65
25927 +       .long   0x2ed1
25928 +       .byte   0x2
25929 +       .byte   0x23
25930 +       .uleb128 0x10
25931 +       .uleb128 0xa
25932 +       .long   .LASF612
25933 +       .byte   0x25
25934 +       .byte   0x66
25935 +       .long   0x2fba
25936 +       .byte   0x2
25937 +       .byte   0x23
25938 +       .uleb128 0x14
25939 +       .uleb128 0xa
25940 +       .long   .LASF613
25941 +       .byte   0x25
25942 +       .byte   0x67
25943 +       .long   0x5a
25944 +       .byte   0x2
25945 +       .byte   0x23
25946 +       .uleb128 0x18
25947 +       .uleb128 0xa
25948 +       .long   .LASF614
25949 +       .byte   0x25
25950 +       .byte   0x68
25951 +       .long   0x2ed1
25952 +       .byte   0x2
25953 +       .byte   0x23
25954 +       .uleb128 0x1c
25955 +       .uleb128 0xa
25956 +       .long   .LASF615
25957 +       .byte   0x25
25958 +       .byte   0x69
25959 +       .long   0x2fba
25960 +       .byte   0x2
25961 +       .byte   0x23
25962 +       .uleb128 0x20
25963 +       .uleb128 0xa
25964 +       .long   .LASF616
25965 +       .byte   0x25
25966 +       .byte   0x6a
25967 +       .long   0x2ed1
25968 +       .byte   0x2
25969 +       .byte   0x23
25970 +       .uleb128 0x24
25971 +       .uleb128 0xa
25972 +       .long   .LASF617
25973 +       .byte   0x25
25974 +       .byte   0x6b
25975 +       .long   0x2fba
25976 +       .byte   0x2
25977 +       .byte   0x23
25978 +       .uleb128 0x28
25979 +       .uleb128 0xa
25980 +       .long   .LASF618
25981 +       .byte   0x25
25982 +       .byte   0x6c
25983 +       .long   0x5a
25984 +       .byte   0x2
25985 +       .byte   0x23
25986 +       .uleb128 0x2c
25987 +       .uleb128 0xb
25988 +       .string "cpu"
25989 +       .byte   0x25
25990 +       .byte   0x6d
25991 +       .long   0x21
25992 +       .byte   0x2
25993 +       .byte   0x23
25994 +       .uleb128 0x30
25995 +       .uleb128 0xa
25996 +       .long   .LASF619
25997 +       .byte   0x25
25998 +       .byte   0x6e
25999 +       .long   0x2ea8
26000 +       .byte   0x2
26001 +       .byte   0x23
26002 +       .uleb128 0x34
26003 +       .byte   0x0
26004 +       .uleb128 0x4
26005 +       .byte   0x4
26006 +       .long   0x2ed1
26007 +       .uleb128 0x27
26008 +       .long   0x2fe5
26009 +       .long   .LASF739
26010 +       .byte   0x4
26011 +       .byte   0x39
26012 +       .byte   0x7
26013 +       .uleb128 0x28
26014 +       .long   .LASF620
26015 +       .sleb128 0
26016 +       .uleb128 0x28
26017 +       .long   .LASF621
26018 +       .sleb128 1
26019 +       .uleb128 0x28
26020 +       .long   .LASF622
26021 +       .sleb128 2
26022 +       .uleb128 0x28
26023 +       .long   .LASF623
26024 +       .sleb128 3
26025 +       .byte   0x0
26026 +       .uleb128 0x29
26027 +       .long   0x3037
26028 +       .string "pid"
26029 +       .byte   0x24
26030 +       .byte   0x38
26031 +       .byte   0x95
26032 +       .uleb128 0xa
26033 +       .long   .LASF322
26034 +       .byte   0x39
26035 +       .byte   0x2d
26036 +       .long   0x16c4
26037 +       .byte   0x2
26038 +       .byte   0x23
26039 +       .uleb128 0x0
26040 +       .uleb128 0xb
26041 +       .string "nr"
26042 +       .byte   0x39
26043 +       .byte   0x2f
26044 +       .long   0x21
26045 +       .byte   0x2
26046 +       .byte   0x23
26047 +       .uleb128 0x4
26048 +       .uleb128 0xa
26049 +       .long   .LASF624
26050 +       .byte   0x39
26051 +       .byte   0x30
26052 +       .long   0x1808
26053 +       .byte   0x2
26054 +       .byte   0x23
26055 +       .uleb128 0x8
26056 +       .uleb128 0xa
26057 +       .long   .LASF192
26058 +       .byte   0x39
26059 +       .byte   0x32
26060 +       .long   0x3037
26061 +       .byte   0x2
26062 +       .byte   0x23
26063 +       .uleb128 0x10
26064 +       .uleb128 0xb
26065 +       .string "rcu"
26066 +       .byte   0x39
26067 +       .byte   0x33
26068 +       .long   0x2ea8
26069 +       .byte   0x2
26070 +       .byte   0x23
26071 +       .uleb128 0x1c
26072 +       .byte   0x0
26073 +       .uleb128 0x12
26074 +       .long   0x3047
26075 +       .long   0x17eb
26076 +       .uleb128 0x13
26077 +       .long   0x28
26078 +       .byte   0x2
26079 +       .byte   0x0
26080 +       .uleb128 0xf
26081 +       .long   0x3070
26082 +       .long   .LASF625
26083 +       .byte   0xc
26084 +       .byte   0x39
26085 +       .byte   0x39
26086 +       .uleb128 0xa
26087 +       .long   .LASF400
26088 +       .byte   0x39
26089 +       .byte   0x3a
26090 +       .long   0x1808
26091 +       .byte   0x2
26092 +       .byte   0x23
26093 +       .uleb128 0x0
26094 +       .uleb128 0xb
26095 +       .string "pid"
26096 +       .byte   0x39
26097 +       .byte   0x3b
26098 +       .long   0x3070
26099 +       .byte   0x2
26100 +       .byte   0x23
26101 +       .uleb128 0x8
26102 +       .byte   0x0
26103 +       .uleb128 0x4
26104 +       .byte   0x4
26105 +       .long   0x2fe5
26106 +       .uleb128 0x9
26107 +       .long   0x308d
26108 +       .byte   0x4
26109 +       .byte   0x4f
26110 +       .byte   0xc
26111 +       .uleb128 0xa
26112 +       .long   .LASF626
26113 +       .byte   0x4f
26114 +       .byte   0xc
26115 +       .long   0x21
26116 +       .byte   0x2
26117 +       .byte   0x23
26118 +       .uleb128 0x0
26119 +       .byte   0x0
26120 +       .uleb128 0x7
26121 +       .long   .LASF627
26122 +       .byte   0x4f
26123 +       .byte   0xc
26124 +       .long   0x3076
26125 +       .uleb128 0xf
26126 +       .long   0x30b3
26127 +       .long   .LASF277
26128 +       .byte   0x4
26129 +       .byte   0x1b
26130 +       .byte   0x2f
26131 +       .uleb128 0xa
26132 +       .long   .LASF307
26133 +       .byte   0x1b
26134 +       .byte   0x30
26135 +       .long   0x30b3
26136 +       .byte   0x2
26137 +       .byte   0x23
26138 +       .uleb128 0x0
26139 +       .byte   0x0
26140 +       .uleb128 0x4
26141 +       .byte   0x4
26142 +       .long   0x3098
26143 +       .uleb128 0xf
26144 +       .long   0x30f0
26145 +       .long   .LASF628
26146 +       .byte   0xc
26147 +       .byte   0x1b
26148 +       .byte   0x3b
26149 +       .uleb128 0xa
26150 +       .long   .LASF509
26151 +       .byte   0x1b
26152 +       .byte   0x3f
26153 +       .long   0x3098
26154 +       .byte   0x2
26155 +       .byte   0x23
26156 +       .uleb128 0x0
26157 +       .uleb128 0xa
26158 +       .long   .LASF629
26159 +       .byte   0x1b
26160 +       .byte   0x47
26161 +       .long   0x5a
26162 +       .byte   0x2
26163 +       .byte   0x23
26164 +       .uleb128 0x4
26165 +       .uleb128 0xa
26166 +       .long   .LASF630
26167 +       .byte   0x1b
26168 +       .byte   0x53
26169 +       .long   0x30b3
26170 +       .byte   0x2
26171 +       .byte   0x23
26172 +       .uleb128 0x8
26173 +       .byte   0x0
26174 +       .uleb128 0x1a
26175 +       .long   0x3381
26176 +       .long   .LASF631
26177 +       .value  0x148
26178 +       .byte   0x1b
26179 +       .byte   0x8a
26180 +       .uleb128 0x16
26181 +       .long   .LASF632
26182 +       .byte   0x1a
26183 +       .value  0x213
26184 +       .long   0x1808
26185 +       .byte   0x2
26186 +       .byte   0x23
26187 +       .uleb128 0x0
26188 +       .uleb128 0x16
26189 +       .long   .LASF633
26190 +       .byte   0x1a
26191 +       .value  0x214
26192 +       .long   0x17bc
26193 +       .byte   0x2
26194 +       .byte   0x23
26195 +       .uleb128 0x8
26196 +       .uleb128 0x16
26197 +       .long   .LASF634
26198 +       .byte   0x1a
26199 +       .value  0x215
26200 +       .long   0x17bc
26201 +       .byte   0x2
26202 +       .byte   0x23
26203 +       .uleb128 0x10
26204 +       .uleb128 0x16
26205 +       .long   .LASF635
26206 +       .byte   0x1a
26207 +       .value  0x216
26208 +       .long   0x17bc
26209 +       .byte   0x2
26210 +       .byte   0x23
26211 +       .uleb128 0x18
26212 +       .uleb128 0x16
26213 +       .long   .LASF636
26214 +       .byte   0x1a
26215 +       .value  0x217
26216 +       .long   0x2f
26217 +       .byte   0x2
26218 +       .byte   0x23
26219 +       .uleb128 0x20
26220 +       .uleb128 0x16
26221 +       .long   .LASF637
26222 +       .byte   0x1a
26223 +       .value  0x218
26224 +       .long   0x16c4
26225 +       .byte   0x2
26226 +       .byte   0x23
26227 +       .uleb128 0x24
26228 +       .uleb128 0x16
26229 +       .long   .LASF638
26230 +       .byte   0x1a
26231 +       .value  0x219
26232 +       .long   0x77
26233 +       .byte   0x2
26234 +       .byte   0x23
26235 +       .uleb128 0x28
26236 +       .uleb128 0x16
26237 +       .long   .LASF639
26238 +       .byte   0x1a
26239 +       .value  0x21a
26240 +       .long   0x1dd
26241 +       .byte   0x2
26242 +       .byte   0x23
26243 +       .uleb128 0x2c
26244 +       .uleb128 0x16
26245 +       .long   .LASF640
26246 +       .byte   0x1a
26247 +       .value  0x21b
26248 +       .long   0x1e8
26249 +       .byte   0x2
26250 +       .byte   0x23
26251 +       .uleb128 0x30
26252 +       .uleb128 0x16
26253 +       .long   .LASF641
26254 +       .byte   0x1a
26255 +       .value  0x21c
26256 +       .long   0x19f
26257 +       .byte   0x2
26258 +       .byte   0x23
26259 +       .uleb128 0x34
26260 +       .uleb128 0x16
26261 +       .long   .LASF642
26262 +       .byte   0x1a
26263 +       .value  0x21d
26264 +       .long   0x2f
26265 +       .byte   0x2
26266 +       .byte   0x23
26267 +       .uleb128 0x38
26268 +       .uleb128 0x16
26269 +       .long   .LASF643
26270 +       .byte   0x1a
26271 +       .value  0x21e
26272 +       .long   0x1f3
26273 +       .byte   0x2
26274 +       .byte   0x23
26275 +       .uleb128 0x3c
26276 +       .uleb128 0x16
26277 +       .long   .LASF644
26278 +       .byte   0x1a
26279 +       .value  0x220
26280 +       .long   0x1730
26281 +       .byte   0x2
26282 +       .byte   0x23
26283 +       .uleb128 0x44
26284 +       .uleb128 0x16
26285 +       .long   .LASF645
26286 +       .byte   0x1a
26287 +       .value  0x222
26288 +       .long   0x173b
26289 +       .byte   0x2
26290 +       .byte   0x23
26291 +       .uleb128 0x48
26292 +       .uleb128 0x16
26293 +       .long   .LASF646
26294 +       .byte   0x1a
26295 +       .value  0x223
26296 +       .long   0x173b
26297 +       .byte   0x2
26298 +       .byte   0x23
26299 +       .uleb128 0x50
26300 +       .uleb128 0x16
26301 +       .long   .LASF647
26302 +       .byte   0x1a
26303 +       .value  0x224
26304 +       .long   0x173b
26305 +       .byte   0x2
26306 +       .byte   0x23
26307 +       .uleb128 0x58
26308 +       .uleb128 0x16
26309 +       .long   .LASF648
26310 +       .byte   0x1a
26311 +       .value  0x225
26312 +       .long   0x77
26313 +       .byte   0x2
26314 +       .byte   0x23
26315 +       .uleb128 0x60
26316 +       .uleb128 0x16
26317 +       .long   .LASF649
26318 +       .byte   0x1a
26319 +       .value  0x226
26320 +       .long   0x235
26321 +       .byte   0x2
26322 +       .byte   0x23
26323 +       .uleb128 0x64
26324 +       .uleb128 0x16
26325 +       .long   .LASF650
26326 +       .byte   0x1a
26327 +       .value  0x227
26328 +       .long   0x53
26329 +       .byte   0x2
26330 +       .byte   0x23
26331 +       .uleb128 0x68
26332 +       .uleb128 0x16
26333 +       .long   .LASF651
26334 +       .byte   0x1a
26335 +       .value  0x228
26336 +       .long   0xea
26337 +       .byte   0x2
26338 +       .byte   0x23
26339 +       .uleb128 0x6a
26340 +       .uleb128 0x16
26341 +       .long   .LASF652
26342 +       .byte   0x1a
26343 +       .value  0x229
26344 +       .long   0x1680
26345 +       .byte   0x2
26346 +       .byte   0x23
26347 +       .uleb128 0x6c
26348 +       .uleb128 0x16
26349 +       .long   .LASF653
26350 +       .byte   0x1a
26351 +       .value  0x22a
26352 +       .long   0x2d94
26353 +       .byte   0x2
26354 +       .byte   0x23
26355 +       .uleb128 0x70
26356 +       .uleb128 0x16
26357 +       .long   .LASF654
26358 +       .byte   0x1a
26359 +       .value  0x22b
26360 +       .long   0x18fa
26361 +       .byte   0x3
26362 +       .byte   0x23
26363 +       .uleb128 0x84
26364 +       .uleb128 0x16
26365 +       .long   .LASF655
26366 +       .byte   0x1a
26367 +       .value  0x22c
26368 +       .long   0x738a
26369 +       .byte   0x3
26370 +       .byte   0x23
26371 +       .uleb128 0x94
26372 +       .uleb128 0x16
26373 +       .long   .LASF656
26374 +       .byte   0x1a
26375 +       .value  0x22d
26376 +       .long   0x7538
26377 +       .byte   0x3
26378 +       .byte   0x23
26379 +       .uleb128 0x98
26380 +       .uleb128 0x16
26381 +       .long   .LASF657
26382 +       .byte   0x1a
26383 +       .value  0x22e
26384 +       .long   0x60d1
26385 +       .byte   0x3
26386 +       .byte   0x23
26387 +       .uleb128 0x9c
26388 +       .uleb128 0x16
26389 +       .long   .LASF658
26390 +       .byte   0x1a
26391 +       .value  0x22f
26392 +       .long   0x7641
26393 +       .byte   0x3
26394 +       .byte   0x23
26395 +       .uleb128 0xa0
26396 +       .uleb128 0x16
26397 +       .long   .LASF659
26398 +       .byte   0x1a
26399 +       .value  0x230
26400 +       .long   0x6e96
26401 +       .byte   0x3
26402 +       .byte   0x23
26403 +       .uleb128 0xa4
26404 +       .uleb128 0x16
26405 +       .long   .LASF660
26406 +       .byte   0x1a
26407 +       .value  0x231
26408 +       .long   0x6e9c
26409 +       .byte   0x3
26410 +       .byte   0x23
26411 +       .uleb128 0xa8
26412 +       .uleb128 0x16
26413 +       .long   .LASF661
26414 +       .byte   0x1a
26415 +       .value  0x235
26416 +       .long   0x17bc
26417 +       .byte   0x3
26418 +       .byte   0x23
26419 +       .uleb128 0xfc
26420 +       .uleb128 0x10
26421 +       .long   0x7207
26422 +       .byte   0x3
26423 +       .byte   0x23
26424 +       .uleb128 0x104
26425 +       .uleb128 0x16
26426 +       .long   .LASF662
26427 +       .byte   0x1a
26428 +       .value  0x23b
26429 +       .long   0x21
26430 +       .byte   0x3
26431 +       .byte   0x23
26432 +       .uleb128 0x108
26433 +       .uleb128 0x16
26434 +       .long   .LASF663
26435 +       .byte   0x1a
26436 +       .value  0x23d
26437 +       .long   0x141
26438 +       .byte   0x3
26439 +       .byte   0x23
26440 +       .uleb128 0x10c
26441 +       .uleb128 0x16
26442 +       .long   .LASF664
26443 +       .byte   0x1a
26444 +       .value  0x240
26445 +       .long   0x2f
26446 +       .byte   0x3
26447 +       .byte   0x23
26448 +       .uleb128 0x110
26449 +       .uleb128 0x16
26450 +       .long   .LASF665
26451 +       .byte   0x1a
26452 +       .value  0x241
26453 +       .long   0x764d
26454 +       .byte   0x3
26455 +       .byte   0x23
26456 +       .uleb128 0x114
26457 +       .uleb128 0x16
26458 +       .long   .LASF504
26459 +       .byte   0x1a
26460 +       .value  0x245
26461 +       .long   0x17bc
26462 +       .byte   0x3
26463 +       .byte   0x23
26464 +       .uleb128 0x118
26465 +       .uleb128 0x16
26466 +       .long   .LASF666
26467 +       .byte   0x1a
26468 +       .value  0x246
26469 +       .long   0x2d94
26470 +       .byte   0x3
26471 +       .byte   0x23
26472 +       .uleb128 0x120
26473 +       .uleb128 0x16
26474 +       .long   .LASF667
26475 +       .byte   0x1a
26476 +       .value  0x249
26477 +       .long   0x2f
26478 +       .byte   0x3
26479 +       .byte   0x23
26480 +       .uleb128 0x134
26481 +       .uleb128 0x16
26482 +       .long   .LASF668
26483 +       .byte   0x1a
26484 +       .value  0x24a
26485 +       .long   0x2f
26486 +       .byte   0x3
26487 +       .byte   0x23
26488 +       .uleb128 0x138
26489 +       .uleb128 0x16
26490 +       .long   .LASF669
26491 +       .byte   0x1a
26492 +       .value  0x24c
26493 +       .long   0x77
26494 +       .byte   0x3
26495 +       .byte   0x23
26496 +       .uleb128 0x13c
26497 +       .uleb128 0x16
26498 +       .long   .LASF670
26499 +       .byte   0x1a
26500 +       .value  0x24e
26501 +       .long   0x16c4
26502 +       .byte   0x3
26503 +       .byte   0x23
26504 +       .uleb128 0x140
26505 +       .uleb128 0x16
26506 +       .long   .LASF671
26507 +       .byte   0x1a
26508 +       .value  0x252
26509 +       .long   0x160b
26510 +       .byte   0x3
26511 +       .byte   0x23
26512 +       .uleb128 0x144
26513 +       .byte   0x0
26514 +       .uleb128 0x4
26515 +       .byte   0x4
26516 +       .long   0x30f0
26517 +       .uleb128 0x1f
26518 +       .long   0x36ad
26519 +       .long   .LASF672
26520 +       .value  0x1b0
26521 +       .byte   0x8
26522 +       .value  0x19d
26523 +       .uleb128 0x16
26524 +       .long   .LASF673
26525 +       .byte   0xb
26526 +       .value  0x144
26527 +       .long   0x3f9c
26528 +       .byte   0x2
26529 +       .byte   0x23
26530 +       .uleb128 0x0
26531 +       .uleb128 0x16
26532 +       .long   .LASF674
26533 +       .byte   0xb
26534 +       .value  0x145
26535 +       .long   0x17a1
26536 +       .byte   0x2
26537 +       .byte   0x23
26538 +       .uleb128 0x4
26539 +       .uleb128 0x16
26540 +       .long   .LASF675
26541 +       .byte   0xb
26542 +       .value  0x146
26543 +       .long   0x3f9c
26544 +       .byte   0x2
26545 +       .byte   0x23
26546 +       .uleb128 0x8
26547 +       .uleb128 0x16
26548 +       .long   .LASF676
26549 +       .byte   0xb
26550 +       .value  0x149
26551 +       .long   0x3fc6
26552 +       .byte   0x2
26553 +       .byte   0x23
26554 +       .uleb128 0xc
26555 +       .uleb128 0x16
26556 +       .long   .LASF677
26557 +       .byte   0xb
26558 +       .value  0x14a
26559 +       .long   0x3fdd
26560 +       .byte   0x2
26561 +       .byte   0x23
26562 +       .uleb128 0x10
26563 +       .uleb128 0x16
26564 +       .long   .LASF678
26565 +       .byte   0xb
26566 +       .value  0x14b
26567 +       .long   0x2f
26568 +       .byte   0x2
26569 +       .byte   0x23
26570 +       .uleb128 0x14
26571 +       .uleb128 0x16
26572 +       .long   .LASF679
26573 +       .byte   0xb
26574 +       .value  0x14c
26575 +       .long   0x2f
26576 +       .byte   0x2
26577 +       .byte   0x23
26578 +       .uleb128 0x18
26579 +       .uleb128 0x16
26580 +       .long   .LASF680
26581 +       .byte   0xb
26582 +       .value  0x14d
26583 +       .long   0x2f
26584 +       .byte   0x2
26585 +       .byte   0x23
26586 +       .uleb128 0x1c
26587 +       .uleb128 0x16
26588 +       .long   .LASF681
26589 +       .byte   0xb
26590 +       .value  0x14e
26591 +       .long   0x2f
26592 +       .byte   0x2
26593 +       .byte   0x23
26594 +       .uleb128 0x20
26595 +       .uleb128 0x17
26596 +       .string "pgd"
26597 +       .byte   0xb
26598 +       .value  0x14f
26599 +       .long   0x3fe3
26600 +       .byte   0x2
26601 +       .byte   0x23
26602 +       .uleb128 0x24
26603 +       .uleb128 0x16
26604 +       .long   .LASF682
26605 +       .byte   0xb
26606 +       .value  0x150
26607 +       .long   0x16c4
26608 +       .byte   0x2
26609 +       .byte   0x23
26610 +       .uleb128 0x28
26611 +       .uleb128 0x16
26612 +       .long   .LASF683
26613 +       .byte   0xb
26614 +       .value  0x151
26615 +       .long   0x16c4
26616 +       .byte   0x2
26617 +       .byte   0x23
26618 +       .uleb128 0x2c
26619 +       .uleb128 0x16
26620 +       .long   .LASF684
26621 +       .byte   0xb
26622 +       .value  0x152
26623 +       .long   0x21
26624 +       .byte   0x2
26625 +       .byte   0x23
26626 +       .uleb128 0x30
26627 +       .uleb128 0x16
26628 +       .long   .LASF685
26629 +       .byte   0xb
26630 +       .value  0x153
26631 +       .long   0x18fa
26632 +       .byte   0x2
26633 +       .byte   0x23
26634 +       .uleb128 0x34
26635 +       .uleb128 0x16
26636 +       .long   .LASF686
26637 +       .byte   0xb
26638 +       .value  0x154
26639 +       .long   0x1680
26640 +       .byte   0x2
26641 +       .byte   0x23
26642 +       .uleb128 0x44
26643 +       .uleb128 0x16
26644 +       .long   .LASF687
26645 +       .byte   0xb
26646 +       .value  0x156
26647 +       .long   0x17bc
26648 +       .byte   0x2
26649 +       .byte   0x23
26650 +       .uleb128 0x48
26651 +       .uleb128 0x16
26652 +       .long   .LASF688
26653 +       .byte   0xb
26654 +       .value  0x15e
26655 +       .long   0x3eb1
26656 +       .byte   0x2
26657 +       .byte   0x23
26658 +       .uleb128 0x50
26659 +       .uleb128 0x16
26660 +       .long   .LASF689
26661 +       .byte   0xb
26662 +       .value  0x15f
26663 +       .long   0x3eb1
26664 +       .byte   0x2
26665 +       .byte   0x23
26666 +       .uleb128 0x54
26667 +       .uleb128 0x16
26668 +       .long   .LASF690
26669 +       .byte   0xb
26670 +       .value  0x161
26671 +       .long   0x2f
26672 +       .byte   0x2
26673 +       .byte   0x23
26674 +       .uleb128 0x58
26675 +       .uleb128 0x16
26676 +       .long   .LASF691
26677 +       .byte   0xb
26678 +       .value  0x162
26679 +       .long   0x2f
26680 +       .byte   0x2
26681 +       .byte   0x23
26682 +       .uleb128 0x5c
26683 +       .uleb128 0x16
26684 +       .long   .LASF692
26685 +       .byte   0xb
26686 +       .value  0x164
26687 +       .long   0x2f
26688 +       .byte   0x2
26689 +       .byte   0x23
26690 +       .uleb128 0x60
26691 +       .uleb128 0x16
26692 +       .long   .LASF693
26693 +       .byte   0xb
26694 +       .value  0x164
26695 +       .long   0x2f
26696 +       .byte   0x2
26697 +       .byte   0x23
26698 +       .uleb128 0x64
26699 +       .uleb128 0x16
26700 +       .long   .LASF694
26701 +       .byte   0xb
26702 +       .value  0x164
26703 +       .long   0x2f
26704 +       .byte   0x2
26705 +       .byte   0x23
26706 +       .uleb128 0x68
26707 +       .uleb128 0x16
26708 +       .long   .LASF695
26709 +       .byte   0xb
26710 +       .value  0x164
26711 +       .long   0x2f
26712 +       .byte   0x2
26713 +       .byte   0x23
26714 +       .uleb128 0x6c
26715 +       .uleb128 0x16
26716 +       .long   .LASF696
26717 +       .byte   0xb
26718 +       .value  0x165
26719 +       .long   0x2f
26720 +       .byte   0x2
26721 +       .byte   0x23
26722 +       .uleb128 0x70
26723 +       .uleb128 0x16
26724 +       .long   .LASF697
26725 +       .byte   0xb
26726 +       .value  0x165
26727 +       .long   0x2f
26728 +       .byte   0x2
26729 +       .byte   0x23
26730 +       .uleb128 0x74
26731 +       .uleb128 0x16
26732 +       .long   .LASF698
26733 +       .byte   0xb
26734 +       .value  0x165
26735 +       .long   0x2f
26736 +       .byte   0x2
26737 +       .byte   0x23
26738 +       .uleb128 0x78
26739 +       .uleb128 0x16
26740 +       .long   .LASF699
26741 +       .byte   0xb
26742 +       .value  0x165
26743 +       .long   0x2f
26744 +       .byte   0x2
26745 +       .byte   0x23
26746 +       .uleb128 0x7c
26747 +       .uleb128 0x16
26748 +       .long   .LASF700
26749 +       .byte   0xb
26750 +       .value  0x166
26751 +       .long   0x2f
26752 +       .byte   0x3
26753 +       .byte   0x23
26754 +       .uleb128 0x80
26755 +       .uleb128 0x16
26756 +       .long   .LASF701
26757 +       .byte   0xb
26758 +       .value  0x166
26759 +       .long   0x2f
26760 +       .byte   0x3
26761 +       .byte   0x23
26762 +       .uleb128 0x84
26763 +       .uleb128 0x16
26764 +       .long   .LASF702
26765 +       .byte   0xb
26766 +       .value  0x166
26767 +       .long   0x2f
26768 +       .byte   0x3
26769 +       .byte   0x23
26770 +       .uleb128 0x88
26771 +       .uleb128 0x16
26772 +       .long   .LASF703
26773 +       .byte   0xb
26774 +       .value  0x166
26775 +       .long   0x2f
26776 +       .byte   0x3
26777 +       .byte   0x23
26778 +       .uleb128 0x8c
26779 +       .uleb128 0x16
26780 +       .long   .LASF704
26781 +       .byte   0xb
26782 +       .value  0x167
26783 +       .long   0x2f
26784 +       .byte   0x3
26785 +       .byte   0x23
26786 +       .uleb128 0x90
26787 +       .uleb128 0x17
26788 +       .string "brk"
26789 +       .byte   0xb
26790 +       .value  0x167
26791 +       .long   0x2f
26792 +       .byte   0x3
26793 +       .byte   0x23
26794 +       .uleb128 0x94
26795 +       .uleb128 0x16
26796 +       .long   .LASF705
26797 +       .byte   0xb
26798 +       .value  0x167
26799 +       .long   0x2f
26800 +       .byte   0x3
26801 +       .byte   0x23
26802 +       .uleb128 0x98
26803 +       .uleb128 0x16
26804 +       .long   .LASF706
26805 +       .byte   0xb
26806 +       .value  0x168
26807 +       .long   0x2f
26808 +       .byte   0x3
26809 +       .byte   0x23
26810 +       .uleb128 0x9c
26811 +       .uleb128 0x16
26812 +       .long   .LASF707
26813 +       .byte   0xb
26814 +       .value  0x168
26815 +       .long   0x2f
26816 +       .byte   0x3
26817 +       .byte   0x23
26818 +       .uleb128 0xa0
26819 +       .uleb128 0x16
26820 +       .long   .LASF708
26821 +       .byte   0xb
26822 +       .value  0x168
26823 +       .long   0x2f
26824 +       .byte   0x3
26825 +       .byte   0x23
26826 +       .uleb128 0xa4
26827 +       .uleb128 0x16
26828 +       .long   .LASF709
26829 +       .byte   0xb
26830 +       .value  0x168
26831 +       .long   0x2f
26832 +       .byte   0x3
26833 +       .byte   0x23
26834 +       .uleb128 0xa8
26835 +       .uleb128 0x16
26836 +       .long   .LASF710
26837 +       .byte   0xb
26838 +       .value  0x16a
26839 +       .long   0x3fe9
26840 +       .byte   0x3
26841 +       .byte   0x23
26842 +       .uleb128 0xac
26843 +       .uleb128 0x16
26844 +       .long   .LASF711
26845 +       .byte   0xb
26846 +       .value  0x16c
26847 +       .long   0x923
26848 +       .byte   0x3
26849 +       .byte   0x23
26850 +       .uleb128 0x15c
26851 +       .uleb128 0x16
26852 +       .long   .LASF712
26853 +       .byte   0xb
26854 +       .value  0x16f
26855 +       .long   0x19a9
26856 +       .byte   0x3
26857 +       .byte   0x23
26858 +       .uleb128 0x160
26859 +       .uleb128 0x16
26860 +       .long   .LASF713
26861 +       .byte   0xb
26862 +       .value  0x178
26863 +       .long   0x77
26864 +       .byte   0x3
26865 +       .byte   0x23
26866 +       .uleb128 0x180
26867 +       .uleb128 0x16
26868 +       .long   .LASF714
26869 +       .byte   0xb
26870 +       .value  0x179
26871 +       .long   0x77
26872 +       .byte   0x3
26873 +       .byte   0x23
26874 +       .uleb128 0x184
26875 +       .uleb128 0x16
26876 +       .long   .LASF715
26877 +       .byte   0xb
26878 +       .value  0x17a
26879 +       .long   0x77
26880 +       .byte   0x3
26881 +       .byte   0x23
26882 +       .uleb128 0x188
26883 +       .uleb128 0x20
26884 +       .long   .LASF716
26885 +       .byte   0xb
26886 +       .value  0x17c
26887 +       .long   0x112
26888 +       .byte   0x1
26889 +       .byte   0x2
26890 +       .byte   0x6
26891 +       .byte   0x3
26892 +       .byte   0x23
26893 +       .uleb128 0x18c
26894 +       .uleb128 0x16
26895 +       .long   .LASF717
26896 +       .byte   0xb
26897 +       .value  0x17f
26898 +       .long   0x21
26899 +       .byte   0x3
26900 +       .byte   0x23
26901 +       .uleb128 0x190
26902 +       .uleb128 0x16
26903 +       .long   .LASF718
26904 +       .byte   0xb
26905 +       .value  0x180
26906 +       .long   0x3ff9
26907 +       .byte   0x3
26908 +       .byte   0x23
26909 +       .uleb128 0x194
26910 +       .uleb128 0x16
26911 +       .long   .LASF719
26912 +       .byte   0xb
26913 +       .value  0x180
26914 +       .long   0x28fe
26915 +       .byte   0x3
26916 +       .byte   0x23
26917 +       .uleb128 0x198
26918 +       .uleb128 0x16
26919 +       .long   .LASF720
26920 +       .byte   0xb
26921 +       .value  0x183
26922 +       .long   0x16a2
26923 +       .byte   0x3
26924 +       .byte   0x23
26925 +       .uleb128 0x1a8
26926 +       .uleb128 0x16
26927 +       .long   .LASF721
26928 +       .byte   0xb
26929 +       .value  0x184
26930 +       .long   0x3dc4
26931 +       .byte   0x3
26932 +       .byte   0x23
26933 +       .uleb128 0x1ac
26934 +       .byte   0x0
26935 +       .uleb128 0x4
26936 +       .byte   0x4
26937 +       .long   0x3387
26938 +       .uleb128 0xf
26939 +       .long   0x36dc
26940 +       .long   .LASF722
26941 +       .byte   0x10
26942 +       .byte   0x50
26943 +       .byte   0x50
26944 +       .uleb128 0xa
26945 +       .long   .LASF723
26946 +       .byte   0x50
26947 +       .byte   0x51
26948 +       .long   0x17bc
26949 +       .byte   0x2
26950 +       .byte   0x23
26951 +       .uleb128 0x0
26952 +       .uleb128 0xa
26953 +       .long   .LASF724
26954 +       .byte   0x50
26955 +       .byte   0x52
26956 +       .long   0x17bc
26957 +       .byte   0x2
26958 +       .byte   0x23
26959 +       .uleb128 0x8
26960 +       .byte   0x0
26961 +       .uleb128 0xf
26962 +       .long   0x3705
26963 +       .long   .LASF725
26964 +       .byte   0x8
26965 +       .byte   0x4d
26966 +       .byte   0x2a
26967 +       .uleb128 0xa
26968 +       .long   .LASF726
26969 +       .byte   0x4d
26970 +       .byte   0x2b
26971 +       .long   0x2f
26972 +       .byte   0x2
26973 +       .byte   0x23
26974 +       .uleb128 0x0
26975 +       .uleb128 0xa
26976 +       .long   .LASF727
26977 +       .byte   0x4d
26978 +       .byte   0x2c
26979 +       .long   0x2f
26980 +       .byte   0x2
26981 +       .byte   0x23
26982 +       .uleb128 0x4
26983 +       .byte   0x0
26984 +       .uleb128 0x25
26985 +       .long   0x371d
26986 +       .long   .LASF728
26987 +       .byte   0x8
26988 +       .byte   0x1b
26989 +       .byte   0x6
26990 +       .uleb128 0xe
26991 +       .long   .LASF729
26992 +       .byte   0x4c
26993 +       .byte   0x2f
26994 +       .long   0x17e
26995 +       .byte   0x0
26996 +       .uleb128 0x7
26997 +       .long   .LASF730
26998 +       .byte   0x4c
26999 +       .byte   0x3b
27000 +       .long   0x3705
27001 +       .uleb128 0xf
27002 +       .long   0x377b
27003 +       .long   .LASF731
27004 +       .byte   0x18
27005 +       .byte   0xa
27006 +       .byte   0xb
27007 +       .uleb128 0xa
27008 +       .long   .LASF376
27009 +       .byte   0xa
27010 +       .byte   0xc
27011 +       .long   0x17bc
27012 +       .byte   0x2
27013 +       .byte   0x23
27014 +       .uleb128 0x0
27015 +       .uleb128 0xa
27016 +       .long   .LASF732
27017 +       .byte   0xa
27018 +       .byte   0xd
27019 +       .long   0x2f
27020 +       .byte   0x2
27021 +       .byte   0x23
27022 +       .uleb128 0x8
27023 +       .uleb128 0xa
27024 +       .long   .LASF733
27025 +       .byte   0xa
27026 +       .byte   0xf
27027 +       .long   0x3787
27028 +       .byte   0x2
27029 +       .byte   0x23
27030 +       .uleb128 0xc
27031 +       .uleb128 0xa
27032 +       .long   .LASF734
27033 +       .byte   0xa
27034 +       .byte   0x10
27035 +       .long   0x2f
27036 +       .byte   0x2
27037 +       .byte   0x23
27038 +       .uleb128 0x10
27039 +       .uleb128 0xa
27040 +       .long   .LASF735
27041 +       .byte   0xa
27042 +       .byte   0x12
27043 +       .long   0x380f
27044 +       .byte   0x2
27045 +       .byte   0x23
27046 +       .uleb128 0x14
27047 +       .byte   0x0
27048 +       .uleb128 0x5
27049 +       .long   0x3787
27050 +       .byte   0x1
27051 +       .uleb128 0x6
27052 +       .long   0x2f
27053 +       .byte   0x0
27054 +       .uleb128 0x4
27055 +       .byte   0x4
27056 +       .long   0x377b
27057 +       .uleb128 0x1a
27058 +       .long   0x380f
27059 +       .long   .LASF736
27060 +       .value  0x1080
27061 +       .byte   0xa
27062 +       .byte   0x9
27063 +       .uleb128 0xa
27064 +       .long   .LASF285
27065 +       .byte   0x1
27066 +       .byte   0x46
27067 +       .long   0x1680
27068 +       .byte   0x2
27069 +       .byte   0x23
27070 +       .uleb128 0x0
27071 +       .uleb128 0xa
27072 +       .long   .LASF737
27073 +       .byte   0x1
27074 +       .byte   0x47
27075 +       .long   0x8791
27076 +       .byte   0x2
27077 +       .byte   0x23
27078 +       .uleb128 0x4
27079 +       .uleb128 0xa
27080 +       .long   .LASF738
27081 +       .byte   0x1
27082 +       .byte   0x48
27083 +       .long   0x2f
27084 +       .byte   0x2
27085 +       .byte   0x23
27086 +       .uleb128 0x8
27087 +       .uleb128 0xb
27088 +       .string "tv1"
27089 +       .byte   0x1
27090 +       .byte   0x49
27091 +       .long   0x8786
27092 +       .byte   0x2
27093 +       .byte   0x23
27094 +       .uleb128 0xc
27095 +       .uleb128 0xb
27096 +       .string "tv2"
27097 +       .byte   0x1
27098 +       .byte   0x4a
27099 +       .long   0x874f
27100 +       .byte   0x3
27101 +       .byte   0x23
27102 +       .uleb128 0x80c
27103 +       .uleb128 0xb
27104 +       .string "tv3"
27105 +       .byte   0x1
27106 +       .byte   0x4b
27107 +       .long   0x874f
27108 +       .byte   0x3
27109 +       .byte   0x23
27110 +       .uleb128 0xa0c
27111 +       .uleb128 0xb
27112 +       .string "tv4"
27113 +       .byte   0x1
27114 +       .byte   0x4c
27115 +       .long   0x874f
27116 +       .byte   0x3
27117 +       .byte   0x23
27118 +       .uleb128 0xc0c
27119 +       .uleb128 0xb
27120 +       .string "tv5"
27121 +       .byte   0x1
27122 +       .byte   0x4d
27123 +       .long   0x874f
27124 +       .byte   0x3
27125 +       .byte   0x23
27126 +       .uleb128 0xe0c
27127 +       .byte   0x0
27128 +       .uleb128 0x4
27129 +       .byte   0x4
27130 +       .long   0x378d
27131 +       .uleb128 0x27
27132 +       .long   0x382e
27133 +       .long   .LASF740
27134 +       .byte   0x4
27135 +       .byte   0xa
27136 +       .byte   0xaa
27137 +       .uleb128 0x28
27138 +       .long   .LASF741
27139 +       .sleb128 0
27140 +       .uleb128 0x28
27141 +       .long   .LASF742
27142 +       .sleb128 1
27143 +       .byte   0x0
27144 +       .uleb128 0xf
27145 +       .long   0x3881
27146 +       .long   .LASF743
27147 +       .byte   0x20
27148 +       .byte   0xa
27149 +       .byte   0xa9
27150 +       .uleb128 0xa
27151 +       .long   .LASF400
27152 +       .byte   0x4b
27153 +       .byte   0x71
27154 +       .long   0x1764
27155 +       .byte   0x2
27156 +       .byte   0x23
27157 +       .uleb128 0x0
27158 +       .uleb128 0xa
27159 +       .long   .LASF732
27160 +       .byte   0x4b
27161 +       .byte   0x72
27162 +       .long   0x371d
27163 +       .byte   0x2
27164 +       .byte   0x23
27165 +       .uleb128 0xc
27166 +       .uleb128 0xa
27167 +       .long   .LASF733
27168 +       .byte   0x4b
27169 +       .byte   0x73
27170 +       .long   0x3897
27171 +       .byte   0x2
27172 +       .byte   0x23
27173 +       .uleb128 0x14
27174 +       .uleb128 0xa
27175 +       .long   .LASF735
27176 +       .byte   0x4b
27177 +       .byte   0x74
27178 +       .long   0x391a
27179 +       .byte   0x2
27180 +       .byte   0x23
27181 +       .uleb128 0x18
27182 +       .uleb128 0xa
27183 +       .long   .LASF169
27184 +       .byte   0x4b
27185 +       .byte   0x75
27186 +       .long   0x2f
27187 +       .byte   0x2
27188 +       .byte   0x23
27189 +       .uleb128 0x1c
27190 +       .byte   0x0
27191 +       .uleb128 0x11
27192 +       .long   0x3891
27193 +       .byte   0x1
27194 +       .long   0x3815
27195 +       .uleb128 0x6
27196 +       .long   0x3891
27197 +       .byte   0x0
27198 +       .uleb128 0x4
27199 +       .byte   0x4
27200 +       .long   0x382e
27201 +       .uleb128 0x4
27202 +       .byte   0x4
27203 +       .long   0x3881
27204 +       .uleb128 0xf
27205 +       .long   0x391a
27206 +       .long   .LASF744
27207 +       .byte   0x28
27208 +       .byte   0x4b
27209 +       .byte   0x18
27210 +       .uleb128 0xa
27211 +       .long   .LASF745
27212 +       .byte   0x4b
27213 +       .byte   0x9d
27214 +       .long   0x3957
27215 +       .byte   0x2
27216 +       .byte   0x23
27217 +       .uleb128 0x0
27218 +       .uleb128 0xa
27219 +       .long   .LASF746
27220 +       .byte   0x4b
27221 +       .byte   0x9e
27222 +       .long   0x1cb
27223 +       .byte   0x2
27224 +       .byte   0x23
27225 +       .uleb128 0x4
27226 +       .uleb128 0xa
27227 +       .long   .LASF747
27228 +       .byte   0x4b
27229 +       .byte   0x9f
27230 +       .long   0x17a1
27231 +       .byte   0x2
27232 +       .byte   0x23
27233 +       .uleb128 0x8
27234 +       .uleb128 0xa
27235 +       .long   .LASF310
27236 +       .byte   0x4b
27237 +       .byte   0xa0
27238 +       .long   0x179b
27239 +       .byte   0x2
27240 +       .byte   0x23
27241 +       .uleb128 0xc
27242 +       .uleb128 0xa
27243 +       .long   .LASF748
27244 +       .byte   0x4b
27245 +       .byte   0xa1
27246 +       .long   0x371d
27247 +       .byte   0x2
27248 +       .byte   0x23
27249 +       .uleb128 0x10
27250 +       .uleb128 0xa
27251 +       .long   .LASF749
27252 +       .byte   0x4b
27253 +       .byte   0xa2
27254 +       .long   0x3963
27255 +       .byte   0x2
27256 +       .byte   0x23
27257 +       .uleb128 0x18
27258 +       .uleb128 0xa
27259 +       .long   .LASF750
27260 +       .byte   0x4b
27261 +       .byte   0xa3
27262 +       .long   0x3963
27263 +       .byte   0x2
27264 +       .byte   0x23
27265 +       .uleb128 0x1c
27266 +       .uleb128 0xa
27267 +       .long   .LASF751
27268 +       .byte   0x4b
27269 +       .byte   0xa4
27270 +       .long   0x371d
27271 +       .byte   0x2
27272 +       .byte   0x23
27273 +       .uleb128 0x20
27274 +       .byte   0x0
27275 +       .uleb128 0x4
27276 +       .byte   0x4
27277 +       .long   0x389d
27278 +       .uleb128 0xf
27279 +       .long   0x3957
27280 +       .long   .LASF752
27281 +       .byte   0x54
27282 +       .byte   0x4b
27283 +       .byte   0x19
27284 +       .uleb128 0xa
27285 +       .long   .LASF285
27286 +       .byte   0x4b
27287 +       .byte   0xc2
27288 +       .long   0x1680
27289 +       .byte   0x2
27290 +       .byte   0x23
27291 +       .uleb128 0x0
27292 +       .uleb128 0xa
27293 +       .long   .LASF753
27294 +       .byte   0x4b
27295 +       .byte   0xc3
27296 +       .long   0x161c
27297 +       .byte   0x2
27298 +       .byte   0x23
27299 +       .uleb128 0x4
27300 +       .uleb128 0xa
27301 +       .long   .LASF754
27302 +       .byte   0x4b
27303 +       .byte   0xc4
27304 +       .long   0x3969
27305 +       .byte   0x2
27306 +       .byte   0x23
27307 +       .uleb128 0x4
27308 +       .byte   0x0
27309 +       .uleb128 0x4
27310 +       .byte   0x4
27311 +       .long   0x3920
27312 +       .uleb128 0x18
27313 +       .byte   0x1
27314 +       .long   0x371d
27315 +       .uleb128 0x4
27316 +       .byte   0x4
27317 +       .long   0x395d
27318 +       .uleb128 0x12
27319 +       .long   0x3979
27320 +       .long   0x389d
27321 +       .uleb128 0x13
27322 +       .long   0x28
27323 +       .byte   0x1
27324 +       .byte   0x0
27325 +       .uleb128 0x2a
27326 +       .long   .LASF755
27327 +       .byte   0x0
27328 +       .byte   0x53
27329 +       .byte   0x23
27330 +       .uleb128 0x7
27331 +       .long   .LASF756
27332 +       .byte   0x34
27333 +       .byte   0x10
27334 +       .long   0x398c
27335 +       .uleb128 0x4
27336 +       .byte   0x4
27337 +       .long   0x3992
27338 +       .uleb128 0x5
27339 +       .long   0x399e
27340 +       .byte   0x1
27341 +       .uleb128 0x6
27342 +       .long   0x399e
27343 +       .byte   0x0
27344 +       .uleb128 0x4
27345 +       .byte   0x4
27346 +       .long   0x39a4
27347 +       .uleb128 0xf
27348 +       .long   0x39db
27349 +       .long   .LASF757
27350 +       .byte   0x10
27351 +       .byte   0x34
27352 +       .byte   0xf
27353 +       .uleb128 0xa
27354 +       .long   .LASF734
27355 +       .byte   0x34
27356 +       .byte   0x19
27357 +       .long   0x16cf
27358 +       .byte   0x2
27359 +       .byte   0x23
27360 +       .uleb128 0x0
27361 +       .uleb128 0xa
27362 +       .long   .LASF376
27363 +       .byte   0x34
27364 +       .byte   0x1d
27365 +       .long   0x17bc
27366 +       .byte   0x2
27367 +       .byte   0x23
27368 +       .uleb128 0x4
27369 +       .uleb128 0xa
27370 +       .long   .LASF316
27371 +       .byte   0x34
27372 +       .byte   0x1e
27373 +       .long   0x3981
27374 +       .byte   0x2
27375 +       .byte   0x23
27376 +       .uleb128 0xc
27377 +       .byte   0x0
27378 +       .uleb128 0xf
27379 +       .long   0x3a04
27380 +       .long   .LASF758
27381 +       .byte   0x28
27382 +       .byte   0x34
27383 +       .byte   0x23
27384 +       .uleb128 0xa
27385 +       .long   .LASF759
27386 +       .byte   0x34
27387 +       .byte   0x24
27388 +       .long   0x39a4
27389 +       .byte   0x2
27390 +       .byte   0x23
27391 +       .uleb128 0x0
27392 +       .uleb128 0xa
27393 +       .long   .LASF760
27394 +       .byte   0x34
27395 +       .byte   0x25
27396 +       .long   0x3728
27397 +       .byte   0x2
27398 +       .byte   0x23
27399 +       .uleb128 0x10
27400 +       .byte   0x0
27401 +       .uleb128 0xf
27402 +       .long   0x3a49
27403 +       .long   .LASF761
27404 +       .byte   0x20
27405 +       .byte   0x35
27406 +       .byte   0x39
27407 +       .uleb128 0xa
27408 +       .long   .LASF734
27409 +       .byte   0x35
27410 +       .byte   0x3a
27411 +       .long   0x157
27412 +       .byte   0x2
27413 +       .byte   0x23
27414 +       .uleb128 0x0
27415 +       .uleb128 0xb
27416 +       .string "obj"
27417 +       .byte   0x35
27418 +       .byte   0x3b
27419 +       .long   0x157
27420 +       .byte   0x2
27421 +       .byte   0x23
27422 +       .uleb128 0x8
27423 +       .uleb128 0xb
27424 +       .string "res"
27425 +       .byte   0x35
27426 +       .byte   0x3c
27427 +       .long   0x14c
27428 +       .byte   0x2
27429 +       .byte   0x23
27430 +       .uleb128 0x10
27431 +       .uleb128 0xa
27432 +       .long   .LASF762
27433 +       .byte   0x35
27434 +       .byte   0x3d
27435 +       .long   0x14c
27436 +       .byte   0x2
27437 +       .byte   0x23
27438 +       .uleb128 0x18
27439 +       .byte   0x0
27440 +       .uleb128 0xf
27441 +       .long   0x3a72
27442 +       .long   .LASF763
27443 +       .byte   0x8
27444 +       .byte   0x36
27445 +       .byte   0x15
27446 +       .uleb128 0xa
27447 +       .long   .LASF764
27448 +       .byte   0x36
27449 +       .byte   0x16
27450 +       .long   0x160b
27451 +       .byte   0x2
27452 +       .byte   0x23
27453 +       .uleb128 0x0
27454 +       .uleb128 0xa
27455 +       .long   .LASF765
27456 +       .byte   0x36
27457 +       .byte   0x17
27458 +       .long   0x6c
27459 +       .byte   0x2
27460 +       .byte   0x23
27461 +       .uleb128 0x4
27462 +       .byte   0x0
27463 +       .uleb128 0xc
27464 +       .long   0x3a91
27465 +       .byte   0x4
27466 +       .byte   0x33
27467 +       .byte   0x63
27468 +       .uleb128 0xe
27469 +       .long   .LASF237
27470 +       .byte   0x33
27471 +       .byte   0x64
27472 +       .long   0x160b
27473 +       .uleb128 0x26
27474 +       .string "tsk"
27475 +       .byte   0x33
27476 +       .byte   0x65
27477 +       .long   0x15f9
27478 +       .byte   0x0
27479 +       .uleb128 0xf
27480 +       .long   0x3bfd
27481 +       .long   .LASF766
27482 +       .byte   0x88
27483 +       .byte   0x33
27484 +       .byte   0x57
27485 +       .uleb128 0xa
27486 +       .long   .LASF767
27487 +       .byte   0x33
27488 +       .byte   0x58
27489 +       .long   0x17bc
27490 +       .byte   0x2
27491 +       .byte   0x23
27492 +       .uleb128 0x0
27493 +       .uleb128 0xa
27494 +       .long   .LASF768
27495 +       .byte   0x33
27496 +       .byte   0x59
27497 +       .long   0x5a
27498 +       .byte   0x2
27499 +       .byte   0x23
27500 +       .uleb128 0x8
27501 +       .uleb128 0xa
27502 +       .long   .LASF769
27503 +       .byte   0x33
27504 +       .byte   0x5a
27505 +       .long   0x21
27506 +       .byte   0x2
27507 +       .byte   0x23
27508 +       .uleb128 0xc
27509 +       .uleb128 0xa
27510 +       .long   .LASF770
27511 +       .byte   0x33
27512 +       .byte   0x5b
27513 +       .long   0x77
27514 +       .byte   0x2
27515 +       .byte   0x23
27516 +       .uleb128 0x10
27517 +       .uleb128 0xa
27518 +       .long   .LASF771
27519 +       .byte   0x33
27520 +       .byte   0x5d
27521 +       .long   0x3cfd
27522 +       .byte   0x2
27523 +       .byte   0x23
27524 +       .uleb128 0x14
27525 +       .uleb128 0xa
27526 +       .long   .LASF772
27527 +       .byte   0x33
27528 +       .byte   0x5e
27529 +       .long   0x3dc4
27530 +       .byte   0x2
27531 +       .byte   0x23
27532 +       .uleb128 0x18
27533 +       .uleb128 0xa
27534 +       .long   .LASF773
27535 +       .byte   0x33
27536 +       .byte   0x5f
27537 +       .long   0x3deb
27538 +       .byte   0x2
27539 +       .byte   0x23
27540 +       .uleb128 0x1c
27541 +       .uleb128 0xa
27542 +       .long   .LASF774
27543 +       .byte   0x33
27544 +       .byte   0x60
27545 +       .long   0x3e01
27546 +       .byte   0x2
27547 +       .byte   0x23
27548 +       .uleb128 0x20
27549 +       .uleb128 0xa
27550 +       .long   .LASF775
27551 +       .byte   0x33
27552 +       .byte   0x61
27553 +       .long   0x3e13
27554 +       .byte   0x2
27555 +       .byte   0x23
27556 +       .uleb128 0x24
27557 +       .uleb128 0xa
27558 +       .long   .LASF776
27559 +       .byte   0x33
27560 +       .byte   0x66
27561 +       .long   0x3a72
27562 +       .byte   0x2
27563 +       .byte   0x23
27564 +       .uleb128 0x28
27565 +       .uleb128 0xa
27566 +       .long   .LASF777
27567 +       .byte   0x33
27568 +       .byte   0x68
27569 +       .long   0x157
27570 +       .byte   0x2
27571 +       .byte   0x23
27572 +       .uleb128 0x2c
27573 +       .uleb128 0xa
27574 +       .long   .LASF778
27575 +       .byte   0x33
27576 +       .byte   0x69
27577 +       .long   0x1840
27578 +       .byte   0x2
27579 +       .byte   0x23
27580 +       .uleb128 0x34
27581 +       .uleb128 0xa
27582 +       .long   .LASF779
27583 +       .byte   0x33
27584 +       .byte   0x6a
27585 +       .long   0x1f3
27586 +       .byte   0x2
27587 +       .byte   0x23
27588 +       .uleb128 0x48
27589 +       .uleb128 0xa
27590 +       .long   .LASF780
27591 +       .byte   0x33
27592 +       .byte   0x6c
27593 +       .long   0x16c4
27594 +       .byte   0x2
27595 +       .byte   0x23
27596 +       .uleb128 0x50
27597 +       .uleb128 0xa
27598 +       .long   .LASF315
27599 +       .byte   0x33
27600 +       .byte   0x6d
27601 +       .long   0x160b
27602 +       .byte   0x2
27603 +       .byte   0x23
27604 +       .uleb128 0x54
27605 +       .uleb128 0xa
27606 +       .long   .LASF781
27607 +       .byte   0x33
27608 +       .byte   0x6f
27609 +       .long   0x53
27610 +       .byte   0x2
27611 +       .byte   0x23
27612 +       .uleb128 0x58
27613 +       .uleb128 0xa
27614 +       .long   .LASF782
27615 +       .byte   0x33
27616 +       .byte   0x70
27617 +       .long   0x1fe
27618 +       .byte   0x2
27619 +       .byte   0x23
27620 +       .uleb128 0x5c
27621 +       .uleb128 0xa
27622 +       .long   .LASF783
27623 +       .byte   0x33
27624 +       .byte   0x71
27625 +       .long   0xb5
27626 +       .byte   0x2
27627 +       .byte   0x23
27628 +       .uleb128 0x60
27629 +       .uleb128 0xa
27630 +       .long   .LASF784
27631 +       .byte   0x33
27632 +       .byte   0x72
27633 +       .long   0x1fe
27634 +       .byte   0x2
27635 +       .byte   0x23
27636 +       .uleb128 0x64
27637 +       .uleb128 0xa
27638 +       .long   .LASF785
27639 +       .byte   0x33
27640 +       .byte   0x73
27641 +       .long   0x3a49
27642 +       .byte   0x2
27643 +       .byte   0x23
27644 +       .uleb128 0x68
27645 +       .uleb128 0xa
27646 +       .long   .LASF786
27647 +       .byte   0x33
27648 +       .byte   0x74
27649 +       .long   0x3e19
27650 +       .byte   0x2
27651 +       .byte   0x23
27652 +       .uleb128 0x70
27653 +       .uleb128 0xa
27654 +       .long   .LASF787
27655 +       .byte   0x33
27656 +       .byte   0x75
27657 +       .long   0x2f
27658 +       .byte   0x2
27659 +       .byte   0x23
27660 +       .uleb128 0x74
27661 +       .uleb128 0xa
27662 +       .long   .LASF788
27663 +       .byte   0x33
27664 +       .byte   0x76
27665 +       .long   0x2f
27666 +       .byte   0x2
27667 +       .byte   0x23
27668 +       .uleb128 0x78
27669 +       .uleb128 0xa
27670 +       .long   .LASF789
27671 +       .byte   0x33
27672 +       .byte   0x78
27673 +       .long   0x17bc
27674 +       .byte   0x2
27675 +       .byte   0x23
27676 +       .uleb128 0x7c
27677 +       .uleb128 0xa
27678 +       .long   .LASF790
27679 +       .byte   0x33
27680 +       .byte   0x7f
27681 +       .long   0x3cfd
27682 +       .byte   0x3
27683 +       .byte   0x23
27684 +       .uleb128 0x84
27685 +       .byte   0x0
27686 +       .uleb128 0x15
27687 +       .long   0x3cfd
27688 +       .long   .LASF106
27689 +       .byte   0x8c
27690 +       .byte   0x18
27691 +       .value  0x22c
27692 +       .uleb128 0x17
27693 +       .string "f_u"
27694 +       .byte   0x1a
27695 +       .value  0x2d0
27696 +       .long   0x776e
27697 +       .byte   0x2
27698 +       .byte   0x23
27699 +       .uleb128 0x0
27700 +       .uleb128 0x16
27701 +       .long   .LASF791
27702 +       .byte   0x1a
27703 +       .value  0x2d1
27704 +       .long   0x628c
27705 +       .byte   0x2
27706 +       .byte   0x23
27707 +       .uleb128 0x8
27708 +       .uleb128 0x16
27709 +       .long   .LASF792
27710 +       .byte   0x1a
27711 +       .value  0x2d4
27712 +       .long   0x7538
27713 +       .byte   0x2
27714 +       .byte   0x23
27715 +       .uleb128 0x10
27716 +       .uleb128 0x16
27717 +       .long   .LASF793
27718 +       .byte   0x1a
27719 +       .value  0x2d5
27720 +       .long   0x16c4
27721 +       .byte   0x2
27722 +       .byte   0x23
27723 +       .uleb128 0x14
27724 +       .uleb128 0x16
27725 +       .long   .LASF794
27726 +       .byte   0x1a
27727 +       .value  0x2d6
27728 +       .long   0x77
27729 +       .byte   0x2
27730 +       .byte   0x23
27731 +       .uleb128 0x18
27732 +       .uleb128 0x16
27733 +       .long   .LASF795
27734 +       .byte   0x1a
27735 +       .value  0x2d7
27736 +       .long   0x1aa
27737 +       .byte   0x2
27738 +       .byte   0x23
27739 +       .uleb128 0x1c
27740 +       .uleb128 0x16
27741 +       .long   .LASF796
27742 +       .byte   0x1a
27743 +       .value  0x2d8
27744 +       .long   0x1f3
27745 +       .byte   0x2
27746 +       .byte   0x23
27747 +       .uleb128 0x20
27748 +       .uleb128 0x16
27749 +       .long   .LASF797
27750 +       .byte   0x1a
27751 +       .value  0x2d9
27752 +       .long   0x7653
27753 +       .byte   0x2
27754 +       .byte   0x23
27755 +       .uleb128 0x28
27756 +       .uleb128 0x16
27757 +       .long   .LASF798
27758 +       .byte   0x1a
27759 +       .value  0x2da
27760 +       .long   0x77
27761 +       .byte   0x2
27762 +       .byte   0x23
27763 +       .uleb128 0x40
27764 +       .uleb128 0x16
27765 +       .long   .LASF799
27766 +       .byte   0x1a
27767 +       .value  0x2da
27768 +       .long   0x77
27769 +       .byte   0x2
27770 +       .byte   0x23
27771 +       .uleb128 0x44
27772 +       .uleb128 0x16
27773 +       .long   .LASF800
27774 +       .byte   0x1a
27775 +       .value  0x2db
27776 +       .long   0x76bb
27777 +       .byte   0x2
27778 +       .byte   0x23
27779 +       .uleb128 0x48
27780 +       .uleb128 0x16
27781 +       .long   .LASF801
27782 +       .byte   0x1a
27783 +       .value  0x2dd
27784 +       .long   0x2f
27785 +       .byte   0x2
27786 +       .byte   0x23
27787 +       .uleb128 0x74
27788 +       .uleb128 0x16
27789 +       .long   .LASF802
27790 +       .byte   0x1a
27791 +       .value  0x2e2
27792 +       .long   0x160b
27793 +       .byte   0x2
27794 +       .byte   0x23
27795 +       .uleb128 0x78
27796 +       .uleb128 0x16
27797 +       .long   .LASF803
27798 +       .byte   0x1a
27799 +       .value  0x2e6
27800 +       .long   0x17bc
27801 +       .byte   0x2
27802 +       .byte   0x23
27803 +       .uleb128 0x7c
27804 +       .uleb128 0x16
27805 +       .long   .LASF804
27806 +       .byte   0x1a
27807 +       .value  0x2e7
27808 +       .long   0x1680
27809 +       .byte   0x3
27810 +       .byte   0x23
27811 +       .uleb128 0x84
27812 +       .uleb128 0x16
27813 +       .long   .LASF805
27814 +       .byte   0x1a
27815 +       .value  0x2e9
27816 +       .long   0x6e96
27817 +       .byte   0x3
27818 +       .byte   0x23
27819 +       .uleb128 0x88
27820 +       .byte   0x0
27821 +       .uleb128 0x4
27822 +       .byte   0x4
27823 +       .long   0x3bfd
27824 +       .uleb128 0xf
27825 +       .long   0x3dc4
27826 +       .long   .LASF806
27827 +       .byte   0xa0
27828 +       .byte   0x33
27829 +       .byte   0xf
27830 +       .uleb128 0xa
27831 +       .long   .LASF807
27832 +       .byte   0x33
27833 +       .byte   0xb6
27834 +       .long   0x16c4
27835 +       .byte   0x2
27836 +       .byte   0x23
27837 +       .uleb128 0x0
27838 +       .uleb128 0xa
27839 +       .long   .LASF808
27840 +       .byte   0x33
27841 +       .byte   0xb7
27842 +       .long   0x21
27843 +       .byte   0x2
27844 +       .byte   0x23
27845 +       .uleb128 0x4
27846 +       .uleb128 0xb
27847 +       .string "mm"
27848 +       .byte   0x33
27849 +       .byte   0xb8
27850 +       .long   0x36ad
27851 +       .byte   0x2
27852 +       .byte   0x23
27853 +       .uleb128 0x8
27854 +       .uleb128 0xa
27855 +       .long   .LASF809
27856 +       .byte   0x33
27857 +       .byte   0xbb
27858 +       .long   0x2f
27859 +       .byte   0x2
27860 +       .byte   0x23
27861 +       .uleb128 0xc
27862 +       .uleb128 0xa
27863 +       .long   .LASF307
27864 +       .byte   0x33
27865 +       .byte   0xbc
27866 +       .long   0x3dc4
27867 +       .byte   0x2
27868 +       .byte   0x23
27869 +       .uleb128 0x10
27870 +       .uleb128 0xa
27871 +       .long   .LASF327
27872 +       .byte   0x33
27873 +       .byte   0xbe
27874 +       .long   0x18ef
27875 +       .byte   0x2
27876 +       .byte   0x23
27877 +       .uleb128 0x14
27878 +       .uleb128 0xa
27879 +       .long   .LASF810
27880 +       .byte   0x33
27881 +       .byte   0xc0
27882 +       .long   0x1680
27883 +       .byte   0x2
27884 +       .byte   0x23
27885 +       .uleb128 0x20
27886 +       .uleb128 0xa
27887 +       .long   .LASF811
27888 +       .byte   0x33
27889 +       .byte   0xc2
27890 +       .long   0x21
27891 +       .byte   0x2
27892 +       .byte   0x23
27893 +       .uleb128 0x24
27894 +       .uleb128 0xa
27895 +       .long   .LASF812
27896 +       .byte   0x33
27897 +       .byte   0xc3
27898 +       .long   0x17bc
27899 +       .byte   0x2
27900 +       .byte   0x23
27901 +       .uleb128 0x28
27902 +       .uleb128 0xa
27903 +       .long   .LASF178
27904 +       .byte   0x33
27905 +       .byte   0xc4
27906 +       .long   0x17bc
27907 +       .byte   0x2
27908 +       .byte   0x23
27909 +       .uleb128 0x30
27910 +       .uleb128 0xa
27911 +       .long   .LASF813
27912 +       .byte   0x33
27913 +       .byte   0xc7
27914 +       .long   0x77
27915 +       .byte   0x2
27916 +       .byte   0x23
27917 +       .uleb128 0x38
27918 +       .uleb128 0xa
27919 +       .long   .LASF814
27920 +       .byte   0x33
27921 +       .byte   0xc9
27922 +       .long   0x3e1f
27923 +       .byte   0x2
27924 +       .byte   0x23
27925 +       .uleb128 0x3c
27926 +       .uleb128 0xb
27927 +       .string "wq"
27928 +       .byte   0x33
27929 +       .byte   0xcb
27930 +       .long   0x39db
27931 +       .byte   0x2
27932 +       .byte   0x23
27933 +       .uleb128 0x78
27934 +       .byte   0x0
27935 +       .uleb128 0x4
27936 +       .byte   0x4
27937 +       .long   0x3d03
27938 +       .uleb128 0x11
27939 +       .long   0x3ddf
27940 +       .byte   0x1
27941 +       .long   0x21
27942 +       .uleb128 0x6
27943 +       .long   0x3ddf
27944 +       .uleb128 0x6
27945 +       .long   0x3de5
27946 +       .byte   0x0
27947 +       .uleb128 0x4
27948 +       .byte   0x4
27949 +       .long   0x3a91
27950 +       .uleb128 0x4
27951 +       .byte   0x4
27952 +       .long   0x3a04
27953 +       .uleb128 0x4
27954 +       .byte   0x4
27955 +       .long   0x3dca
27956 +       .uleb128 0x11
27957 +       .long   0x3e01
27958 +       .byte   0x1
27959 +       .long   0x209
27960 +       .uleb128 0x6
27961 +       .long   0x3ddf
27962 +       .byte   0x0
27963 +       .uleb128 0x4
27964 +       .byte   0x4
27965 +       .long   0x3df1
27966 +       .uleb128 0x5
27967 +       .long   0x3e13
27968 +       .byte   0x1
27969 +       .uleb128 0x6
27970 +       .long   0x3ddf
27971 +       .byte   0x0
27972 +       .uleb128 0x4
27973 +       .byte   0x4
27974 +       .long   0x3e07
27975 +       .uleb128 0x4
27976 +       .byte   0x4
27977 +       .long   0x3a49
27978 +       .uleb128 0xf
27979 +       .long   0x3e9b
27980 +       .long   .LASF815
27981 +       .byte   0x3c
27982 +       .byte   0x33
27983 +       .byte   0xa8
27984 +       .uleb128 0xa
27985 +       .long   .LASF678
27986 +       .byte   0x33
27987 +       .byte   0xa9
27988 +       .long   0x2f
27989 +       .byte   0x2
27990 +       .byte   0x23
27991 +       .uleb128 0x0
27992 +       .uleb128 0xa
27993 +       .long   .LASF816
27994 +       .byte   0x33
27995 +       .byte   0xaa
27996 +       .long   0x2f
27997 +       .byte   0x2
27998 +       .byte   0x23
27999 +       .uleb128 0x4
28000 +       .uleb128 0xa
28001 +       .long   .LASF817
28002 +       .byte   0x33
28003 +       .byte   0xac
28004 +       .long   0x3e9b
28005 +       .byte   0x2
28006 +       .byte   0x23
28007 +       .uleb128 0x8
28008 +       .uleb128 0xa
28009 +       .long   .LASF818
28010 +       .byte   0x33
28011 +       .byte   0xad
28012 +       .long   0x1680
28013 +       .byte   0x2
28014 +       .byte   0x23
28015 +       .uleb128 0xc
28016 +       .uleb128 0xa
28017 +       .long   .LASF819
28018 +       .byte   0x33
28019 +       .byte   0xae
28020 +       .long   0x5a
28021 +       .byte   0x2
28022 +       .byte   0x23
28023 +       .uleb128 0x10
28024 +       .uleb128 0xb
28025 +       .string "nr"
28026 +       .byte   0x33
28027 +       .byte   0xb0
28028 +       .long   0x77
28029 +       .byte   0x2
28030 +       .byte   0x23
28031 +       .uleb128 0x14
28032 +       .uleb128 0xa
28033 +       .long   .LASF820
28034 +       .byte   0x33
28035 +       .byte   0xb0
28036 +       .long   0x77
28037 +       .byte   0x2
28038 +       .byte   0x23
28039 +       .uleb128 0x18
28040 +       .uleb128 0xa
28041 +       .long   .LASF821
28042 +       .byte   0x33
28043 +       .byte   0xb2
28044 +       .long   0x3ea1
28045 +       .byte   0x2
28046 +       .byte   0x23
28047 +       .uleb128 0x1c
28048 +       .byte   0x0
28049 +       .uleb128 0x4
28050 +       .byte   0x4
28051 +       .long   0x2d82
28052 +       .uleb128 0x12
28053 +       .long   0x3eb1
28054 +       .long   0x2d82
28055 +       .uleb128 0x13
28056 +       .long   0x28
28057 +       .byte   0x7
28058 +       .byte   0x0
28059 +       .uleb128 0x1e
28060 +       .long   .LASF822
28061 +       .byte   0xb
28062 +       .value  0x127
28063 +       .long   0x16cf
28064 +       .uleb128 0xf
28065 +       .long   0x3f9c
28066 +       .long   .LASF823
28067 +       .byte   0x54
28068 +       .byte   0x14
28069 +       .byte   0x9e
28070 +       .uleb128 0xa
28071 +       .long   .LASF824
28072 +       .byte   0x15
28073 +       .byte   0x3d
28074 +       .long   0x36ad
28075 +       .byte   0x2
28076 +       .byte   0x23
28077 +       .uleb128 0x0
28078 +       .uleb128 0xa
28079 +       .long   .LASF825
28080 +       .byte   0x15
28081 +       .byte   0x3e
28082 +       .long   0x2f
28083 +       .byte   0x2
28084 +       .byte   0x23
28085 +       .uleb128 0x4
28086 +       .uleb128 0xa
28087 +       .long   .LASF826
28088 +       .byte   0x15
28089 +       .byte   0x3f
28090 +       .long   0x2f
28091 +       .byte   0x2
28092 +       .byte   0x23
28093 +       .uleb128 0x8
28094 +       .uleb128 0xa
28095 +       .long   .LASF827
28096 +       .byte   0x15
28097 +       .byte   0x43
28098 +       .long   0x3f9c
28099 +       .byte   0x2
28100 +       .byte   0x23
28101 +       .uleb128 0xc
28102 +       .uleb128 0xa
28103 +       .long   .LASF828
28104 +       .byte   0x15
28105 +       .byte   0x45
28106 +       .long   0x36e
28107 +       .byte   0x2
28108 +       .byte   0x23
28109 +       .uleb128 0x10
28110 +       .uleb128 0xa
28111 +       .long   .LASF829
28112 +       .byte   0x15
28113 +       .byte   0x46
28114 +       .long   0x2f
28115 +       .byte   0x2
28116 +       .byte   0x23
28117 +       .uleb128 0x14
28118 +       .uleb128 0xa
28119 +       .long   .LASF830
28120 +       .byte   0x15
28121 +       .byte   0x48
28122 +       .long   0x1764
28123 +       .byte   0x2
28124 +       .byte   0x23
28125 +       .uleb128 0x18
28126 +       .uleb128 0xa
28127 +       .long   .LASF831
28128 +       .byte   0x15
28129 +       .byte   0x58
28130 +       .long   0x857c
28131 +       .byte   0x2
28132 +       .byte   0x23
28133 +       .uleb128 0x24
28134 +       .uleb128 0xa
28135 +       .long   .LASF832
28136 +       .byte   0x15
28137 +       .byte   0x60
28138 +       .long   0x17bc
28139 +       .byte   0x2
28140 +       .byte   0x23
28141 +       .uleb128 0x34
28142 +       .uleb128 0xa
28143 +       .long   .LASF833
28144 +       .byte   0x15
28145 +       .byte   0x61
28146 +       .long   0x85a1
28147 +       .byte   0x2
28148 +       .byte   0x23
28149 +       .uleb128 0x3c
28150 +       .uleb128 0xa
28151 +       .long   .LASF834
28152 +       .byte   0x15
28153 +       .byte   0x64
28154 +       .long   0x8608
28155 +       .byte   0x2
28156 +       .byte   0x23
28157 +       .uleb128 0x40
28158 +       .uleb128 0xa
28159 +       .long   .LASF835
28160 +       .byte   0x15
28161 +       .byte   0x67
28162 +       .long   0x2f
28163 +       .byte   0x2
28164 +       .byte   0x23
28165 +       .uleb128 0x44
28166 +       .uleb128 0xa
28167 +       .long   .LASF836
28168 +       .byte   0x15
28169 +       .byte   0x69
28170 +       .long   0x3cfd
28171 +       .byte   0x2
28172 +       .byte   0x23
28173 +       .uleb128 0x48
28174 +       .uleb128 0xa
28175 +       .long   .LASF837
28176 +       .byte   0x15
28177 +       .byte   0x6a
28178 +       .long   0x160b
28179 +       .byte   0x2
28180 +       .byte   0x23
28181 +       .uleb128 0x4c
28182 +       .uleb128 0xa
28183 +       .long   .LASF838
28184 +       .byte   0x15
28185 +       .byte   0x6b
28186 +       .long   0x2f
28187 +       .byte   0x2
28188 +       .byte   0x23
28189 +       .uleb128 0x50
28190 +       .byte   0x0
28191 +       .uleb128 0x4
28192 +       .byte   0x4
28193 +       .long   0x3ebd
28194 +       .uleb128 0x11
28195 +       .long   0x3fc6
28196 +       .byte   0x1
28197 +       .long   0x2f
28198 +       .uleb128 0x6
28199 +       .long   0x3cfd
28200 +       .uleb128 0x6
28201 +       .long   0x2f
28202 +       .uleb128 0x6
28203 +       .long   0x2f
28204 +       .uleb128 0x6
28205 +       .long   0x2f
28206 +       .uleb128 0x6
28207 +       .long   0x2f
28208 +       .byte   0x0
28209 +       .uleb128 0x4
28210 +       .byte   0x4
28211 +       .long   0x3fa2
28212 +       .uleb128 0x5
28213 +       .long   0x3fdd
28214 +       .byte   0x1
28215 +       .uleb128 0x6
28216 +       .long   0x36ad
28217 +       .uleb128 0x6
28218 +       .long   0x2f
28219 +       .byte   0x0
28220 +       .uleb128 0x4
28221 +       .byte   0x4
28222 +       .long   0x3fcc
28223 +       .uleb128 0x4
28224 +       .byte   0x4
28225 +       .long   0x34c
28226 +       .uleb128 0x12
28227 +       .long   0x3ff9
28228 +       .long   0x2f
28229 +       .uleb128 0x13
28230 +       .long   0x28
28231 +       .byte   0x2b
28232 +       .byte   0x0
28233 +       .uleb128 0x4
28234 +       .byte   0x4
28235 +       .long   0x28fe
28236 +       .uleb128 0x1f
28237 +       .long   0x404c
28238 +       .long   .LASF839
28239 +       .value  0x510
28240 +       .byte   0xb
28241 +       .value  0x187
28242 +       .uleb128 0x16
28243 +       .long   .LASF322
28244 +       .byte   0xb
28245 +       .value  0x188
28246 +       .long   0x16c4
28247 +       .byte   0x2
28248 +       .byte   0x23
28249 +       .uleb128 0x0
28250 +       .uleb128 0x16
28251 +       .long   .LASF840
28252 +       .byte   0xb
28253 +       .value  0x189
28254 +       .long   0x404c
28255 +       .byte   0x2
28256 +       .byte   0x23
28257 +       .uleb128 0x4
28258 +       .uleb128 0x16
28259 +       .long   .LASF841
28260 +       .byte   0xb
28261 +       .value  0x18a
28262 +       .long   0x1680
28263 +       .byte   0x3
28264 +       .byte   0x23
28265 +       .uleb128 0x504
28266 +       .uleb128 0x16
28267 +       .long   .LASF842
28268 +       .byte   0xb
28269 +       .value  0x18b
28270 +       .long   0x17bc
28271 +       .byte   0x3
28272 +       .byte   0x23
28273 +       .uleb128 0x508
28274 +       .byte   0x0
28275 +       .uleb128 0x12
28276 +       .long   0x405c
28277 +       .long   0x2447
28278 +       .uleb128 0x13
28279 +       .long   0x28
28280 +       .byte   0x3f
28281 +       .byte   0x0
28282 +       .uleb128 0x2b
28283 +       .long   0x407e
28284 +       .byte   0x4
28285 +       .byte   0xb
28286 +       .value  0x1c7
28287 +       .uleb128 0x1c
28288 +       .long   .LASF843
28289 +       .byte   0xb
28290 +       .value  0x1c8
28291 +       .long   0x1b5
28292 +       .uleb128 0x1c
28293 +       .long   .LASF844
28294 +       .byte   0xb
28295 +       .value  0x1c9
28296 +       .long   0x1b5
28297 +       .byte   0x0
28298 +       .uleb128 0x1f
28299 +       .long   0x4314
28300 +       .long   .LASF845
28301 +       .value  0x16c
28302 +       .byte   0xb
28303 +       .value  0x19d
28304 +       .uleb128 0x16
28305 +       .long   .LASF322
28306 +       .byte   0xb
28307 +       .value  0x19e
28308 +       .long   0x16c4
28309 +       .byte   0x2
28310 +       .byte   0x23
28311 +       .uleb128 0x0
28312 +       .uleb128 0x16
28313 +       .long   .LASF846
28314 +       .byte   0xb
28315 +       .value  0x19f
28316 +       .long   0x16c4
28317 +       .byte   0x2
28318 +       .byte   0x23
28319 +       .uleb128 0x4
28320 +       .uleb128 0x16
28321 +       .long   .LASF847
28322 +       .byte   0xb
28323 +       .value  0x1a1
28324 +       .long   0x18ef
28325 +       .byte   0x2
28326 +       .byte   0x23
28327 +       .uleb128 0x8
28328 +       .uleb128 0x16
28329 +       .long   .LASF848
28330 +       .byte   0xb
28331 +       .value  0x1a4
28332 +       .long   0x15f9
28333 +       .byte   0x2
28334 +       .byte   0x23
28335 +       .uleb128 0x14
28336 +       .uleb128 0x16
28337 +       .long   .LASF849
28338 +       .byte   0xb
28339 +       .value  0x1a7
28340 +       .long   0x272f
28341 +       .byte   0x2
28342 +       .byte   0x23
28343 +       .uleb128 0x18
28344 +       .uleb128 0x16
28345 +       .long   .LASF850
28346 +       .byte   0xb
28347 +       .value  0x1aa
28348 +       .long   0x21
28349 +       .byte   0x2
28350 +       .byte   0x23
28351 +       .uleb128 0x28
28352 +       .uleb128 0x16
28353 +       .long   .LASF851
28354 +       .byte   0xb
28355 +       .value  0x1b0
28356 +       .long   0x15f9
28357 +       .byte   0x2
28358 +       .byte   0x23
28359 +       .uleb128 0x2c
28360 +       .uleb128 0x16
28361 +       .long   .LASF852
28362 +       .byte   0xb
28363 +       .value  0x1b1
28364 +       .long   0x21
28365 +       .byte   0x2
28366 +       .byte   0x23
28367 +       .uleb128 0x30
28368 +       .uleb128 0x16
28369 +       .long   .LASF853
28370 +       .byte   0xb
28371 +       .value  0x1b4
28372 +       .long   0x21
28373 +       .byte   0x2
28374 +       .byte   0x23
28375 +       .uleb128 0x34
28376 +       .uleb128 0x16
28377 +       .long   .LASF53
28378 +       .byte   0xb
28379 +       .value  0x1b5
28380 +       .long   0x77
28381 +       .byte   0x2
28382 +       .byte   0x23
28383 +       .uleb128 0x38
28384 +       .uleb128 0x16
28385 +       .long   .LASF854
28386 +       .byte   0xb
28387 +       .value  0x1b8
28388 +       .long   0x17bc
28389 +       .byte   0x2
28390 +       .byte   0x23
28391 +       .uleb128 0x3c
28392 +       .uleb128 0x16
28393 +       .long   .LASF855
28394 +       .byte   0xb
28395 +       .value  0x1bb
28396 +       .long   0x382e
28397 +       .byte   0x2
28398 +       .byte   0x23
28399 +       .uleb128 0x44
28400 +       .uleb128 0x17
28401 +       .string "tsk"
28402 +       .byte   0xb
28403 +       .value  0x1bc
28404 +       .long   0x15f9
28405 +       .byte   0x2
28406 +       .byte   0x23
28407 +       .uleb128 0x64
28408 +       .uleb128 0x16
28409 +       .long   .LASF856
28410 +       .byte   0xb
28411 +       .value  0x1bd
28412 +       .long   0x371d
28413 +       .byte   0x2
28414 +       .byte   0x23
28415 +       .uleb128 0x68
28416 +       .uleb128 0x16
28417 +       .long   .LASF222
28418 +       .byte   0xb
28419 +       .value  0x1c0
28420 +       .long   0x19b4
28421 +       .byte   0x2
28422 +       .byte   0x23
28423 +       .uleb128 0x70
28424 +       .uleb128 0x16
28425 +       .long   .LASF223
28426 +       .byte   0xb
28427 +       .value  0x1c0
28428 +       .long   0x19b4
28429 +       .byte   0x2
28430 +       .byte   0x23
28431 +       .uleb128 0x74
28432 +       .uleb128 0x16
28433 +       .long   .LASF857
28434 +       .byte   0xb
28435 +       .value  0x1c1
28436 +       .long   0x19b4
28437 +       .byte   0x2
28438 +       .byte   0x23
28439 +       .uleb128 0x78
28440 +       .uleb128 0x16
28441 +       .long   .LASF858
28442 +       .byte   0xb
28443 +       .value  0x1c1
28444 +       .long   0x19b4
28445 +       .byte   0x2
28446 +       .byte   0x23
28447 +       .uleb128 0x7c
28448 +       .uleb128 0x16
28449 +       .long   .LASF859
28450 +       .byte   0xb
28451 +       .value  0x1c4
28452 +       .long   0x1b5
28453 +       .byte   0x3
28454 +       .byte   0x23
28455 +       .uleb128 0x80
28456 +       .uleb128 0x16
28457 +       .long   .LASF860
28458 +       .byte   0xb
28459 +       .value  0x1c5
28460 +       .long   0x3070
28461 +       .byte   0x3
28462 +       .byte   0x23
28463 +       .uleb128 0x84
28464 +       .uleb128 0x10
28465 +       .long   0x405c
28466 +       .byte   0x3
28467 +       .byte   0x23
28468 +       .uleb128 0x88
28469 +       .uleb128 0x16
28470 +       .long   .LASF861
28471 +       .byte   0xb
28472 +       .value  0x1cd
28473 +       .long   0x21
28474 +       .byte   0x3
28475 +       .byte   0x23
28476 +       .uleb128 0x8c
28477 +       .uleb128 0x17
28478 +       .string "tty"
28479 +       .byte   0xb
28480 +       .value  0x1cf
28481 +       .long   0x431a
28482 +       .byte   0x3
28483 +       .byte   0x23
28484 +       .uleb128 0x90
28485 +       .uleb128 0x16
28486 +       .long   .LASF215
28487 +       .byte   0xb
28488 +       .value  0x1d7
28489 +       .long   0x19b4
28490 +       .byte   0x3
28491 +       .byte   0x23
28492 +       .uleb128 0x94
28493 +       .uleb128 0x16
28494 +       .long   .LASF216
28495 +       .byte   0xb
28496 +       .value  0x1d7
28497 +       .long   0x19b4
28498 +       .byte   0x3
28499 +       .byte   0x23
28500 +       .uleb128 0x98
28501 +       .uleb128 0x16
28502 +       .long   .LASF862
28503 +       .byte   0xb
28504 +       .value  0x1d7
28505 +       .long   0x19b4
28506 +       .byte   0x3
28507 +       .byte   0x23
28508 +       .uleb128 0x9c
28509 +       .uleb128 0x16
28510 +       .long   .LASF863
28511 +       .byte   0xb
28512 +       .value  0x1d7
28513 +       .long   0x19b4
28514 +       .byte   0x3
28515 +       .byte   0x23
28516 +       .uleb128 0xa0
28517 +       .uleb128 0x16
28518 +       .long   .LASF217
28519 +       .byte   0xb
28520 +       .value  0x1d8
28521 +       .long   0x2f
28522 +       .byte   0x3
28523 +       .byte   0x23
28524 +       .uleb128 0xa4
28525 +       .uleb128 0x16
28526 +       .long   .LASF218
28527 +       .byte   0xb
28528 +       .value  0x1d8
28529 +       .long   0x2f
28530 +       .byte   0x3
28531 +       .byte   0x23
28532 +       .uleb128 0xa8
28533 +       .uleb128 0x16
28534 +       .long   .LASF864
28535 +       .byte   0xb
28536 +       .value  0x1d8
28537 +       .long   0x2f
28538 +       .byte   0x3
28539 +       .byte   0x23
28540 +       .uleb128 0xac
28541 +       .uleb128 0x16
28542 +       .long   .LASF865
28543 +       .byte   0xb
28544 +       .value  0x1d8
28545 +       .long   0x2f
28546 +       .byte   0x3
28547 +       .byte   0x23
28548 +       .uleb128 0xb0
28549 +       .uleb128 0x16
28550 +       .long   .LASF220
28551 +       .byte   0xb
28552 +       .value  0x1d9
28553 +       .long   0x2f
28554 +       .byte   0x3
28555 +       .byte   0x23
28556 +       .uleb128 0xb4
28557 +       .uleb128 0x16
28558 +       .long   .LASF221
28559 +       .byte   0xb
28560 +       .value  0x1d9
28561 +       .long   0x2f
28562 +       .byte   0x3
28563 +       .byte   0x23
28564 +       .uleb128 0xb8
28565 +       .uleb128 0x16
28566 +       .long   .LASF866
28567 +       .byte   0xb
28568 +       .value  0x1d9
28569 +       .long   0x2f
28570 +       .byte   0x3
28571 +       .byte   0x23
28572 +       .uleb128 0xbc
28573 +       .uleb128 0x16
28574 +       .long   .LASF867
28575 +       .byte   0xb
28576 +       .value  0x1d9
28577 +       .long   0x2f
28578 +       .byte   0x3
28579 +       .byte   0x23
28580 +       .uleb128 0xc0
28581 +       .uleb128 0x16
28582 +       .long   .LASF868
28583 +       .byte   0xb
28584 +       .value  0x1da
28585 +       .long   0x2f
28586 +       .byte   0x3
28587 +       .byte   0x23
28588 +       .uleb128 0xc4
28589 +       .uleb128 0x16
28590 +       .long   .LASF869
28591 +       .byte   0xb
28592 +       .value  0x1da
28593 +       .long   0x2f
28594 +       .byte   0x3
28595 +       .byte   0x23
28596 +       .uleb128 0xc8
28597 +       .uleb128 0x16
28598 +       .long   .LASF870
28599 +       .byte   0xb
28600 +       .value  0x1da
28601 +       .long   0x2f
28602 +       .byte   0x3
28603 +       .byte   0x23
28604 +       .uleb128 0xcc
28605 +       .uleb128 0x16
28606 +       .long   .LASF871
28607 +       .byte   0xb
28608 +       .value  0x1da
28609 +       .long   0x2f
28610 +       .byte   0x3
28611 +       .byte   0x23
28612 +       .uleb128 0xd0
28613 +       .uleb128 0x16
28614 +       .long   .LASF186
28615 +       .byte   0xb
28616 +       .value  0x1e2
28617 +       .long   0x162
28618 +       .byte   0x3
28619 +       .byte   0x23
28620 +       .uleb128 0xd4
28621 +       .uleb128 0x16
28622 +       .long   .LASF872
28623 +       .byte   0xb
28624 +       .value  0x1ed
28625 +       .long   0x4320
28626 +       .byte   0x3
28627 +       .byte   0x23
28628 +       .uleb128 0xdc
28629 +       .uleb128 0x16
28630 +       .long   .LASF225
28631 +       .byte   0xb
28632 +       .value  0x1ef
28633 +       .long   0x4330
28634 +       .byte   0x3
28635 +       .byte   0x23
28636 +       .uleb128 0x154
28637 +       .byte   0x0
28638 +       .uleb128 0x21
28639 +       .long   .LASF873
28640 +       .byte   0x1
28641 +       .uleb128 0x4
28642 +       .byte   0x4
28643 +       .long   0x4314
28644 +       .uleb128 0x12
28645 +       .long   0x4330
28646 +       .long   0x36dc
28647 +       .uleb128 0x13
28648 +       .long   0x28
28649 +       .byte   0xe
28650 +       .byte   0x0
28651 +       .uleb128 0x12
28652 +       .long   0x4340
28653 +       .long   0x17bc
28654 +       .uleb128 0x13
28655 +       .long   0x28
28656 +       .byte   0x2
28657 +       .byte   0x0
28658 +       .uleb128 0x15
28659 +       .long   0x439b
28660 +       .long   .LASF232
28661 +       .byte   0x8c
28662 +       .byte   0xb
28663 +       .value  0x302
28664 +       .uleb128 0x16
28665 +       .long   .LASF874
28666 +       .byte   0xb
28667 +       .value  0x303
28668 +       .long   0x21
28669 +       .byte   0x2
28670 +       .byte   0x23
28671 +       .uleb128 0x0
28672 +       .uleb128 0x16
28673 +       .long   .LASF171
28674 +       .byte   0xb
28675 +       .value  0x304
28676 +       .long   0x16c4
28677 +       .byte   0x2
28678 +       .byte   0x23
28679 +       .uleb128 0x4
28680 +       .uleb128 0x16
28681 +       .long   .LASF875
28682 +       .byte   0xb
28683 +       .value  0x305
28684 +       .long   0x439b
28685 +       .byte   0x2
28686 +       .byte   0x23
28687 +       .uleb128 0x8
28688 +       .uleb128 0x16
28689 +       .long   .LASF876
28690 +       .byte   0xb
28691 +       .value  0x306
28692 +       .long   0x21
28693 +       .byte   0x3
28694 +       .byte   0x23
28695 +       .uleb128 0x88
28696 +       .uleb128 0x16
28697 +       .long   .LASF877
28698 +       .byte   0xb
28699 +       .value  0x307
28700 +       .long   0x43ab
28701 +       .byte   0x3
28702 +       .byte   0x23
28703 +       .uleb128 0x8c
28704 +       .byte   0x0
28705 +       .uleb128 0x12
28706 +       .long   0x43ab
28707 +       .long   0x1e8
28708 +       .uleb128 0x13
28709 +       .long   0x28
28710 +       .byte   0x1f
28711 +       .byte   0x0
28712 +       .uleb128 0x12
28713 +       .long   0x43ba
28714 +       .long   0x43ba
28715 +       .uleb128 0x23
28716 +       .long   0x28
28717 +       .byte   0x0
28718 +       .uleb128 0x4
28719 +       .byte   0x4
28720 +       .long   0x1e8
28721 +       .uleb128 0x2c
28722 +       .long   0x43e6
28723 +       .long   .LASF187
28724 +       .byte   0x4
28725 +       .byte   0xb
28726 +       .value  0x32c
28727 +       .uleb128 0x28
28728 +       .long   .LASF878
28729 +       .sleb128 0
28730 +       .uleb128 0x28
28731 +       .long   .LASF879
28732 +       .sleb128 1
28733 +       .uleb128 0x28
28734 +       .long   .LASF880
28735 +       .sleb128 2
28736 +       .uleb128 0x28
28737 +       .long   .LASF881
28738 +       .sleb128 3
28739 +       .byte   0x0
28740 +       .uleb128 0x2d
28741 +       .long   0x5a
28742 +       .uleb128 0x21
28743 +       .long   .LASF882
28744 +       .byte   0x1
28745 +       .uleb128 0x4
28746 +       .byte   0x4
28747 +       .long   0x43eb
28748 +       .uleb128 0x21
28749 +       .long   .LASF883
28750 +       .byte   0x1
28751 +       .uleb128 0x4
28752 +       .byte   0x4
28753 +       .long   0x43f7
28754 +       .uleb128 0x12
28755 +       .long   0x4413
28756 +       .long   0x3047
28757 +       .uleb128 0x13
28758 +       .long   0x28
28759 +       .byte   0x2
28760 +       .byte   0x0
28761 +       .uleb128 0x4
28762 +       .byte   0x4
28763 +       .long   0x21
28764 +       .uleb128 0x4
28765 +       .byte   0x4
28766 +       .long   0x4340
28767 +       .uleb128 0x4
28768 +       .byte   0x4
28769 +       .long   0x2758
28770 +       .uleb128 0x21
28771 +       .long   .LASF884
28772 +       .byte   0x1
28773 +       .uleb128 0x4
28774 +       .byte   0x4
28775 +       .long   0x4425
28776 +       .uleb128 0x21
28777 +       .long   .LASF246
28778 +       .byte   0x1
28779 +       .uleb128 0x4
28780 +       .byte   0x4
28781 +       .long   0x4431
28782 +       .uleb128 0x4
28783 +       .byte   0x4
28784 +       .long   0x407e
28785 +       .uleb128 0x4
28786 +       .byte   0x4
28787 +       .long   0x3fff
28788 +       .uleb128 0x11
28789 +       .long   0x4459
28790 +       .byte   0x1
28791 +       .long   0x21
28792 +       .uleb128 0x6
28793 +       .long   0x160b
28794 +       .byte   0x0
28795 +       .uleb128 0x4
28796 +       .byte   0x4
28797 +       .long   0x4449
28798 +       .uleb128 0x4
28799 +       .byte   0x4
28800 +       .long   0x23bf
28801 +       .uleb128 0x21
28802 +       .long   .LASF259
28803 +       .byte   0x1
28804 +       .uleb128 0x4
28805 +       .byte   0x4
28806 +       .long   0x4465
28807 +       .uleb128 0x21
28808 +       .long   .LASF885
28809 +       .byte   0x1
28810 +       .uleb128 0x4
28811 +       .byte   0x4
28812 +       .long   0x4471
28813 +       .uleb128 0x2e
28814 +       .string "bio"
28815 +       .byte   0x1
28816 +       .uleb128 0x4
28817 +       .byte   0x4
28818 +       .long   0x447d
28819 +       .uleb128 0x4
28820 +       .byte   0x4
28821 +       .long   0x4483
28822 +       .uleb128 0x15
28823 +       .long   0x44ab
28824 +       .long   .LASF270
28825 +       .byte   0x4
28826 +       .byte   0xb
28827 +       .value  0x245
28828 +       .uleb128 0xa
28829 +       .long   .LASF886
28830 +       .byte   0x51
28831 +       .byte   0x56
28832 +       .long   0x2f
28833 +       .byte   0x2
28834 +       .byte   0x23
28835 +       .uleb128 0x0
28836 +       .byte   0x0
28837 +       .uleb128 0x4
28838 +       .byte   0x4
28839 +       .long   0x448f
28840 +       .uleb128 0x15
28841 +       .long   0x4521
28842 +       .long   .LASF271
28843 +       .byte   0x1c
28844 +       .byte   0xb
28845 +       .value  0x244
28846 +       .uleb128 0xa
28847 +       .long   .LASF887
28848 +       .byte   0x3e
28849 +       .byte   0x1c
28850 +       .long   0x2f
28851 +       .byte   0x2
28852 +       .byte   0x23
28853 +       .uleb128 0x0
28854 +       .uleb128 0xa
28855 +       .long   .LASF169
28856 +       .byte   0x3e
28857 +       .byte   0x1d
28858 +       .long   0x2f
28859 +       .byte   0x2
28860 +       .byte   0x23
28861 +       .uleb128 0x4
28862 +       .uleb128 0xa
28863 +       .long   .LASF888
28864 +       .byte   0x3e
28865 +       .byte   0x1e
28866 +       .long   0x77
28867 +       .byte   0x2
28868 +       .byte   0x23
28869 +       .uleb128 0x8
28870 +       .uleb128 0xa
28871 +       .long   .LASF889
28872 +       .byte   0x3e
28873 +       .byte   0x1f
28874 +       .long   0x8445
28875 +       .byte   0x2
28876 +       .byte   0x23
28877 +       .uleb128 0xc
28878 +       .uleb128 0xa
28879 +       .long   .LASF890
28880 +       .byte   0x3e
28881 +       .byte   0x20
28882 +       .long   0x160b
28883 +       .byte   0x2
28884 +       .byte   0x23
28885 +       .uleb128 0x10
28886 +       .uleb128 0xa
28887 +       .long   .LASF891
28888 +       .byte   0x3e
28889 +       .byte   0x21
28890 +       .long   0x845c
28891 +       .byte   0x2
28892 +       .byte   0x23
28893 +       .uleb128 0x14
28894 +       .uleb128 0xa
28895 +       .long   .LASF892
28896 +       .byte   0x3e
28897 +       .byte   0x22
28898 +       .long   0x160b
28899 +       .byte   0x2
28900 +       .byte   0x23
28901 +       .uleb128 0x18
28902 +       .byte   0x0
28903 +       .uleb128 0x4
28904 +       .byte   0x4
28905 +       .long   0x44b1
28906 +       .uleb128 0x21
28907 +       .long   .LASF272
28908 +       .byte   0x1
28909 +       .uleb128 0x4
28910 +       .byte   0x4
28911 +       .long   0x4527
28912 +       .uleb128 0x4
28913 +       .byte   0x4
28914 +       .long   0x267b
28915 +       .uleb128 0x4
28916 +       .byte   0x4
28917 +       .long   0x30b9
28918 +       .uleb128 0x21
28919 +       .long   .LASF893
28920 +       .byte   0x1
28921 +       .uleb128 0x4
28922 +       .byte   0x4
28923 +       .long   0x453f
28924 +       .uleb128 0x21
28925 +       .long   .LASF894
28926 +       .byte   0x1
28927 +       .uleb128 0x4
28928 +       .byte   0x4
28929 +       .long   0x454b
28930 +       .uleb128 0xf
28931 +       .long   0x45d4
28932 +       .long   .LASF895
28933 +       .byte   0x40
28934 +       .byte   0x66
28935 +       .byte   0x11
28936 +       .uleb128 0xa
28937 +       .long   .LASF237
28938 +       .byte   0x66
28939 +       .byte   0x12
28940 +       .long   0x19bf
28941 +       .byte   0x2
28942 +       .byte   0x23
28943 +       .uleb128 0x0
28944 +       .uleb128 0xa
28945 +       .long   .LASF896
28946 +       .byte   0x66
28947 +       .byte   0x13
28948 +       .long   0x19bf
28949 +       .byte   0x2
28950 +       .byte   0x23
28951 +       .uleb128 0x8
28952 +       .uleb128 0xa
28953 +       .long   .LASF897
28954 +       .byte   0x66
28955 +       .byte   0x14
28956 +       .long   0x19bf
28957 +       .byte   0x2
28958 +       .byte   0x23
28959 +       .uleb128 0x10
28960 +       .uleb128 0xa
28961 +       .long   .LASF898
28962 +       .byte   0x66
28963 +       .byte   0x15
28964 +       .long   0x19bf
28965 +       .byte   0x2
28966 +       .byte   0x23
28967 +       .uleb128 0x18
28968 +       .uleb128 0xb
28969 +       .string "irq"
28970 +       .byte   0x66
28971 +       .byte   0x16
28972 +       .long   0x19bf
28973 +       .byte   0x2
28974 +       .byte   0x23
28975 +       .uleb128 0x20
28976 +       .uleb128 0xa
28977 +       .long   .LASF899
28978 +       .byte   0x66
28979 +       .byte   0x17
28980 +       .long   0x19bf
28981 +       .byte   0x2
28982 +       .byte   0x23
28983 +       .uleb128 0x28
28984 +       .uleb128 0xa
28985 +       .long   .LASF900
28986 +       .byte   0x66
28987 +       .byte   0x18
28988 +       .long   0x19bf
28989 +       .byte   0x2
28990 +       .byte   0x23
28991 +       .uleb128 0x30
28992 +       .uleb128 0xa
28993 +       .long   .LASF901
28994 +       .byte   0x66
28995 +       .byte   0x19
28996 +       .long   0x19bf
28997 +       .byte   0x2
28998 +       .byte   0x23
28999 +       .uleb128 0x38
29000 +       .byte   0x0
29001 +       .uleb128 0x1a
29002 +       .long   0x45fe
29003 +       .long   .LASF902
29004 +       .value  0x3c0
29005 +       .byte   0x66
29006 +       .byte   0x1c
29007 +       .uleb128 0xa
29008 +       .long   .LASF903
29009 +       .byte   0x66
29010 +       .byte   0x1d
29011 +       .long   0x4557
29012 +       .byte   0x2
29013 +       .byte   0x23
29014 +       .uleb128 0x0
29015 +       .uleb128 0xa
29016 +       .long   .LASF904
29017 +       .byte   0x66
29018 +       .byte   0x1e
29019 +       .long   0x45fe
29020 +       .byte   0x2
29021 +       .byte   0x23
29022 +       .uleb128 0x40
29023 +       .byte   0x0
29024 +       .uleb128 0x12
29025 +       .long   0x460e
29026 +       .long   0x77
29027 +       .uleb128 0x13
29028 +       .long   0x28
29029 +       .byte   0xdf
29030 +       .byte   0x0
29031 +       .uleb128 0xf
29032 +       .long   0x46d1
29033 +       .long   .LASF905
29034 +       .byte   0x4c
29035 +       .byte   0x41
29036 +       .byte   0x3e
29037 +       .uleb128 0xb
29038 +       .string "ino"
29039 +       .byte   0x41
29040 +       .byte   0x3f
29041 +       .long   0x189
29042 +       .byte   0x2
29043 +       .byte   0x23
29044 +       .uleb128 0x0
29045 +       .uleb128 0xb
29046 +       .string "dev"
29047 +       .byte   0x41
29048 +       .byte   0x40
29049 +       .long   0x19f
29050 +       .byte   0x2
29051 +       .byte   0x23
29052 +       .uleb128 0x8
29053 +       .uleb128 0xa
29054 +       .long   .LASF626
29055 +       .byte   0x41
29056 +       .byte   0x41
29057 +       .long   0xea
29058 +       .byte   0x2
29059 +       .byte   0x23
29060 +       .uleb128 0xc
29061 +       .uleb128 0xa
29062 +       .long   .LASF906
29063 +       .byte   0x41
29064 +       .byte   0x42
29065 +       .long   0x77
29066 +       .byte   0x2
29067 +       .byte   0x23
29068 +       .uleb128 0x10
29069 +       .uleb128 0xb
29070 +       .string "uid"
29071 +       .byte   0x41
29072 +       .byte   0x43
29073 +       .long   0x1dd
29074 +       .byte   0x2
29075 +       .byte   0x23
29076 +       .uleb128 0x14
29077 +       .uleb128 0xb
29078 +       .string "gid"
29079 +       .byte   0x41
29080 +       .byte   0x44
29081 +       .long   0x1e8
29082 +       .byte   0x2
29083 +       .byte   0x23
29084 +       .uleb128 0x18
29085 +       .uleb128 0xa
29086 +       .long   .LASF907
29087 +       .byte   0x41
29088 +       .byte   0x45
29089 +       .long   0x19f
29090 +       .byte   0x2
29091 +       .byte   0x23
29092 +       .uleb128 0x1c
29093 +       .uleb128 0xa
29094 +       .long   .LASF328
29095 +       .byte   0x41
29096 +       .byte   0x46
29097 +       .long   0x1f3
29098 +       .byte   0x2
29099 +       .byte   0x23
29100 +       .uleb128 0x20
29101 +       .uleb128 0xa
29102 +       .long   .LASF908
29103 +       .byte   0x41
29104 +       .byte   0x47
29105 +       .long   0x173b
29106 +       .byte   0x2
29107 +       .byte   0x23
29108 +       .uleb128 0x28
29109 +       .uleb128 0xa
29110 +       .long   .LASF909
29111 +       .byte   0x41
29112 +       .byte   0x48
29113 +       .long   0x173b
29114 +       .byte   0x2
29115 +       .byte   0x23
29116 +       .uleb128 0x30
29117 +       .uleb128 0xa
29118 +       .long   .LASF910
29119 +       .byte   0x41
29120 +       .byte   0x49
29121 +       .long   0x173b
29122 +       .byte   0x2
29123 +       .byte   0x23
29124 +       .uleb128 0x38
29125 +       .uleb128 0xa
29126 +       .long   .LASF911
29127 +       .byte   0x41
29128 +       .byte   0x4a
29129 +       .long   0x2f
29130 +       .byte   0x2
29131 +       .byte   0x23
29132 +       .uleb128 0x40
29133 +       .uleb128 0xa
29134 +       .long   .LASF877
29135 +       .byte   0x41
29136 +       .byte   0x4b
29137 +       .long   0x162
29138 +       .byte   0x2
29139 +       .byte   0x23
29140 +       .uleb128 0x44
29141 +       .byte   0x0
29142 +       .uleb128 0x12
29143 +       .long   0x46e1
29144 +       .long   0xbb
29145 +       .uleb128 0x13
29146 +       .long   0x28
29147 +       .byte   0x1f
29148 +       .byte   0x0
29149 +       .uleb128 0x1a
29150 +       .long   0x46fd
29151 +       .long   .LASF912
29152 +       .value  0x1000
29153 +       .byte   0x67
29154 +       .byte   0x16
29155 +       .uleb128 0xb
29156 +       .string "gdt"
29157 +       .byte   0x67
29158 +       .byte   0x17
29159 +       .long   0x46fd
29160 +       .byte   0x2
29161 +       .byte   0x23
29162 +       .uleb128 0x0
29163 +       .byte   0x0
29164 +       .uleb128 0x12
29165 +       .long   0x470d
29166 +       .long   0x942
29167 +       .uleb128 0x13
29168 +       .long   0x28
29169 +       .byte   0x1f
29170 +       .byte   0x0
29171 +       .uleb128 0x7
29172 +       .long   .LASF913
29173 +       .byte   0x32
29174 +       .byte   0x13
29175 +       .long   0x141
29176 +       .uleb128 0x7
29177 +       .long   .LASF914
29178 +       .byte   0x32
29179 +       .byte   0x14
29180 +       .long   0x12b
29181 +       .uleb128 0x7
29182 +       .long   .LASF915
29183 +       .byte   0x32
29184 +       .byte   0x17
29185 +       .long   0x141
29186 +       .uleb128 0xf
29187 +       .long   0x478f
29188 +       .long   .LASF916
29189 +       .byte   0x10
29190 +       .byte   0x32
29191 +       .byte   0xab
29192 +       .uleb128 0xa
29193 +       .long   .LASF917
29194 +       .byte   0x32
29195 +       .byte   0xac
29196 +       .long   0x4723
29197 +       .byte   0x2
29198 +       .byte   0x23
29199 +       .uleb128 0x0
29200 +       .uleb128 0xa
29201 +       .long   .LASF918
29202 +       .byte   0x32
29203 +       .byte   0xad
29204 +       .long   0x470d
29205 +       .byte   0x2
29206 +       .byte   0x23
29207 +       .uleb128 0x4
29208 +       .uleb128 0xa
29209 +       .long   .LASF919
29210 +       .byte   0x32
29211 +       .byte   0xae
29212 +       .long   0x4723
29213 +       .byte   0x2
29214 +       .byte   0x23
29215 +       .uleb128 0x8
29216 +       .uleb128 0xa
29217 +       .long   .LASF920
29218 +       .byte   0x32
29219 +       .byte   0xaf
29220 +       .long   0x112
29221 +       .byte   0x2
29222 +       .byte   0x23
29223 +       .uleb128 0xc
29224 +       .uleb128 0xa
29225 +       .long   .LASF921
29226 +       .byte   0x32
29227 +       .byte   0xb0
29228 +       .long   0x112
29229 +       .byte   0x2
29230 +       .byte   0x23
29231 +       .uleb128 0xd
29232 +       .uleb128 0xa
29233 +       .long   .LASF922
29234 +       .byte   0x32
29235 +       .byte   0xb1
29236 +       .long   0x4718
29237 +       .byte   0x2
29238 +       .byte   0x23
29239 +       .uleb128 0xe
29240 +       .byte   0x0
29241 +       .uleb128 0x7
29242 +       .long   .LASF923
29243 +       .byte   0x32
29244 +       .byte   0xb2
29245 +       .long   0x472e
29246 +       .uleb128 0xf
29247 +       .long   0x47d1
29248 +       .long   .LASF924
29249 +       .byte   0xc
29250 +       .byte   0x26
29251 +       .byte   0x17
29252 +       .uleb128 0xa
29253 +       .long   .LASF414
29254 +       .byte   0x26
29255 +       .byte   0x18
29256 +       .long   0x7f2
29257 +       .byte   0x2
29258 +       .byte   0x23
29259 +       .uleb128 0x0
29260 +       .uleb128 0xa
29261 +       .long   .LASF594
29262 +       .byte   0x26
29263 +       .byte   0x19
29264 +       .long   0x4af9
29265 +       .byte   0x2
29266 +       .byte   0x23
29267 +       .uleb128 0x4
29268 +       .uleb128 0xa
29269 +       .long   .LASF626
29270 +       .byte   0x26
29271 +       .byte   0x1a
29272 +       .long   0x1aa
29273 +       .byte   0x2
29274 +       .byte   0x23
29275 +       .uleb128 0x8
29276 +       .byte   0x0
29277 +       .uleb128 0x1a
29278 +       .long   0x4af9
29279 +       .long   .LASF925
29280 +       .value  0x1200
29281 +       .byte   0x28
29282 +       .byte   0x14
29283 +       .uleb128 0xa
29284 +       .long   .LASF169
29285 +       .byte   0x29
29286 +       .byte   0xfb
29287 +       .long   0x4f37
29288 +       .byte   0x2
29289 +       .byte   0x23
29290 +       .uleb128 0x0
29291 +       .uleb128 0xa
29292 +       .long   .LASF509
29293 +       .byte   0x29
29294 +       .byte   0xfe
29295 +       .long   0x17bc
29296 +       .byte   0x2
29297 +       .byte   0x23
29298 +       .uleb128 0x4
29299 +       .uleb128 0x16
29300 +       .long   .LASF414
29301 +       .byte   0x29
29302 +       .value  0x101
29303 +       .long   0x4dea
29304 +       .byte   0x2
29305 +       .byte   0x23
29306 +       .uleb128 0xc
29307 +       .uleb128 0x16
29308 +       .long   .LASF926
29309 +       .byte   0x29
29310 +       .value  0x104
29311 +       .long   0x4ee5
29312 +       .byte   0x2
29313 +       .byte   0x23
29314 +       .uleb128 0x48
29315 +       .uleb128 0x16
29316 +       .long   .LASF927
29317 +       .byte   0x29
29318 +       .value  0x105
29319 +       .long   0x4fd9
29320 +       .byte   0x3
29321 +       .byte   0x23
29322 +       .uleb128 0x90
29323 +       .uleb128 0x16
29324 +       .long   .LASF928
29325 +       .byte   0x29
29326 +       .value  0x106
29327 +       .long   0x4e75
29328 +       .byte   0x3
29329 +       .byte   0x23
29330 +       .uleb128 0x94
29331 +       .uleb128 0x16
29332 +       .long   .LASF929
29333 +       .byte   0x29
29334 +       .value  0x107
29335 +       .long   0x7f2
29336 +       .byte   0x3
29337 +       .byte   0x23
29338 +       .uleb128 0x98
29339 +       .uleb128 0x16
29340 +       .long   .LASF930
29341 +       .byte   0x29
29342 +       .value  0x108
29343 +       .long   0x7f2
29344 +       .byte   0x3
29345 +       .byte   0x23
29346 +       .uleb128 0x9c
29347 +       .uleb128 0x16
29348 +       .long   .LASF931
29349 +       .byte   0x29
29350 +       .value  0x109
29351 +       .long   0x4b34
29352 +       .byte   0x3
29353 +       .byte   0x23
29354 +       .uleb128 0xa0
29355 +       .uleb128 0x16
29356 +       .long   .LASF932
29357 +       .byte   0x29
29358 +       .value  0x10c
29359 +       .long   0x4fdf
29360 +       .byte   0x3
29361 +       .byte   0x23
29362 +       .uleb128 0xa4
29363 +       .uleb128 0x16
29364 +       .long   .LASF933
29365 +       .byte   0x29
29366 +       .value  0x10d
29367 +       .long   0x77
29368 +       .byte   0x3
29369 +       .byte   0x23
29370 +       .uleb128 0xa8
29371 +       .uleb128 0x16
29372 +       .long   .LASF934
29373 +       .byte   0x29
29374 +       .value  0x10e
29375 +       .long   0x4fea
29376 +       .byte   0x3
29377 +       .byte   0x23
29378 +       .uleb128 0xac
29379 +       .uleb128 0x16
29380 +       .long   .LASF935
29381 +       .byte   0x29
29382 +       .value  0x111
29383 +       .long   0x4fdf
29384 +       .byte   0x3
29385 +       .byte   0x23
29386 +       .uleb128 0xb0
29387 +       .uleb128 0x16
29388 +       .long   .LASF936
29389 +       .byte   0x29
29390 +       .value  0x112
29391 +       .long   0x77
29392 +       .byte   0x3
29393 +       .byte   0x23
29394 +       .uleb128 0xb4
29395 +       .uleb128 0x16
29396 +       .long   .LASF937
29397 +       .byte   0x29
29398 +       .value  0x113
29399 +       .long   0x4fea
29400 +       .byte   0x3
29401 +       .byte   0x23
29402 +       .uleb128 0xb8
29403 +       .uleb128 0x16
29404 +       .long   .LASF938
29405 +       .byte   0x29
29406 +       .value  0x116
29407 +       .long   0x4fdf
29408 +       .byte   0x3
29409 +       .byte   0x23
29410 +       .uleb128 0xbc
29411 +       .uleb128 0x16
29412 +       .long   .LASF939
29413 +       .byte   0x29
29414 +       .value  0x117
29415 +       .long   0x77
29416 +       .byte   0x3
29417 +       .byte   0x23
29418 +       .uleb128 0xc0
29419 +       .uleb128 0x16
29420 +       .long   .LASF940
29421 +       .byte   0x29
29422 +       .value  0x118
29423 +       .long   0x4fea
29424 +       .byte   0x3
29425 +       .byte   0x23
29426 +       .uleb128 0xc4
29427 +       .uleb128 0x16
29428 +       .long   .LASF941
29429 +       .byte   0x29
29430 +       .value  0x11a
29431 +       .long   0x4fdf
29432 +       .byte   0x3
29433 +       .byte   0x23
29434 +       .uleb128 0xc8
29435 +       .uleb128 0x16
29436 +       .long   .LASF942
29437 +       .byte   0x29
29438 +       .value  0x11b
29439 +       .long   0x77
29440 +       .byte   0x3
29441 +       .byte   0x23
29442 +       .uleb128 0xcc
29443 +       .uleb128 0x16
29444 +       .long   .LASF943
29445 +       .byte   0x29
29446 +       .value  0x11c
29447 +       .long   0x4fea
29448 +       .byte   0x3
29449 +       .byte   0x23
29450 +       .uleb128 0xd0
29451 +       .uleb128 0x16
29452 +       .long   .LASF944
29453 +       .byte   0x29
29454 +       .value  0x11f
29455 +       .long   0x4fdf
29456 +       .byte   0x3
29457 +       .byte   0x23
29458 +       .uleb128 0xd4
29459 +       .uleb128 0x16
29460 +       .long   .LASF945
29461 +       .byte   0x29
29462 +       .value  0x120
29463 +       .long   0x77
29464 +       .byte   0x3
29465 +       .byte   0x23
29466 +       .uleb128 0xd8
29467 +       .uleb128 0x16
29468 +       .long   .LASF946
29469 +       .byte   0x29
29470 +       .value  0x121
29471 +       .long   0x4fea
29472 +       .byte   0x3
29473 +       .byte   0x23
29474 +       .uleb128 0xdc
29475 +       .uleb128 0x16
29476 +       .long   .LASF947
29477 +       .byte   0x29
29478 +       .value  0x124
29479 +       .long   0x77
29480 +       .byte   0x3
29481 +       .byte   0x23
29482 +       .uleb128 0xe0
29483 +       .uleb128 0x16
29484 +       .long   .LASF948
29485 +       .byte   0x29
29486 +       .value  0x125
29487 +       .long   0x501e
29488 +       .byte   0x3
29489 +       .byte   0x23
29490 +       .uleb128 0xe4
29491 +       .uleb128 0x16
29492 +       .long   .LASF949
29493 +       .byte   0x29
29494 +       .value  0x128
29495 +       .long   0x92e
29496 +       .byte   0x3
29497 +       .byte   0x23
29498 +       .uleb128 0xe8
29499 +       .uleb128 0x16
29500 +       .long   .LASF950
29501 +       .byte   0x29
29502 +       .value  0x12b
29503 +       .long   0x160b
29504 +       .byte   0x3
29505 +       .byte   0x23
29506 +       .uleb128 0xec
29507 +       .uleb128 0x16
29508 +       .long   .LASF951
29509 +       .byte   0x29
29510 +       .value  0x12e
29511 +       .long   0x160b
29512 +       .byte   0x3
29513 +       .byte   0x23
29514 +       .uleb128 0xf0
29515 +       .uleb128 0x16
29516 +       .long   .LASF952
29517 +       .byte   0x29
29518 +       .value  0x131
29519 +       .long   0x2f
29520 +       .byte   0x3
29521 +       .byte   0x23
29522 +       .uleb128 0xf4
29523 +       .uleb128 0x16
29524 +       .long   .LASF953
29525 +       .byte   0x29
29526 +       .value  0x131
29527 +       .long   0x2f
29528 +       .byte   0x3
29529 +       .byte   0x23
29530 +       .uleb128 0xf8
29531 +       .uleb128 0x16
29532 +       .long   .LASF954
29533 +       .byte   0x29
29534 +       .value  0x134
29535 +       .long   0x2f
29536 +       .byte   0x3
29537 +       .byte   0x23
29538 +       .uleb128 0xfc
29539 +       .uleb128 0x16
29540 +       .long   .LASF955
29541 +       .byte   0x29
29542 +       .value  0x134
29543 +       .long   0x2f
29544 +       .byte   0x3
29545 +       .byte   0x23
29546 +       .uleb128 0x100
29547 +       .uleb128 0x16
29548 +       .long   .LASF956
29549 +       .byte   0x29
29550 +       .value  0x137
29551 +       .long   0x160b
29552 +       .byte   0x3
29553 +       .byte   0x23
29554 +       .uleb128 0x104
29555 +       .uleb128 0x16
29556 +       .long   .LASF957
29557 +       .byte   0x29
29558 +       .value  0x13a
29559 +       .long   0x4db9
29560 +       .byte   0x3
29561 +       .byte   0x23
29562 +       .uleb128 0x108
29563 +       .uleb128 0x16
29564 +       .long   .LASF958
29565 +       .byte   0x29
29566 +       .value  0x13d
29567 +       .long   0x21
29568 +       .byte   0x3
29569 +       .byte   0x23
29570 +       .uleb128 0x108
29571 +       .uleb128 0x16
29572 +       .long   .LASF959
29573 +       .byte   0x29
29574 +       .value  0x13f
29575 +       .long   0x77
29576 +       .byte   0x3
29577 +       .byte   0x23
29578 +       .uleb128 0x10c
29579 +       .uleb128 0x16
29580 +       .long   .LASF960
29581 +       .byte   0x29
29582 +       .value  0x143
29583 +       .long   0x17bc
29584 +       .byte   0x3
29585 +       .byte   0x23
29586 +       .uleb128 0x110
29587 +       .uleb128 0x16
29588 +       .long   .LASF961
29589 +       .byte   0x29
29590 +       .value  0x144
29591 +       .long   0x5029
29592 +       .byte   0x3
29593 +       .byte   0x23
29594 +       .uleb128 0x118
29595 +       .uleb128 0x16
29596 +       .long   .LASF962
29597 +       .byte   0x29
29598 +       .value  0x145
29599 +       .long   0x77
29600 +       .byte   0x3
29601 +       .byte   0x23
29602 +       .uleb128 0x11c
29603 +       .uleb128 0x17
29604 +       .string "ref"
29605 +       .byte   0x29
29606 +       .value  0x14a
29607 +       .long   0x502f
29608 +       .byte   0x3
29609 +       .byte   0x23
29610 +       .uleb128 0x180
29611 +       .uleb128 0x16
29612 +       .long   .LASF963
29613 +       .byte   0x29
29614 +       .value  0x14d
29615 +       .long   0x17bc
29616 +       .byte   0x3
29617 +       .byte   0x23
29618 +       .uleb128 0x1180
29619 +       .uleb128 0x16
29620 +       .long   .LASF964
29621 +       .byte   0x29
29622 +       .value  0x150
29623 +       .long   0x15f9
29624 +       .byte   0x3
29625 +       .byte   0x23
29626 +       .uleb128 0x1188
29627 +       .uleb128 0x16
29628 +       .long   .LASF965
29629 +       .byte   0x29
29630 +       .value  0x153
29631 +       .long   0x93a
29632 +       .byte   0x3
29633 +       .byte   0x23
29634 +       .uleb128 0x118c
29635 +       .uleb128 0x16
29636 +       .long   .LASF966
29637 +       .byte   0x29
29638 +       .value  0x158
29639 +       .long   0x503f
29640 +       .byte   0x3
29641 +       .byte   0x23
29642 +       .uleb128 0x1190
29643 +       .uleb128 0x16
29644 +       .long   .LASF967
29645 +       .byte   0x29
29646 +       .value  0x159
29647 +       .long   0x2f
29648 +       .byte   0x3
29649 +       .byte   0x23
29650 +       .uleb128 0x1194
29651 +       .uleb128 0x16
29652 +       .long   .LASF968
29653 +       .byte   0x29
29654 +       .value  0x15a
29655 +       .long   0xb5
29656 +       .byte   0x3
29657 +       .byte   0x23
29658 +       .uleb128 0x1198
29659 +       .uleb128 0x16
29660 +       .long   .LASF969
29661 +       .byte   0x29
29662 +       .value  0x15d
29663 +       .long   0x5045
29664 +       .byte   0x3
29665 +       .byte   0x23
29666 +       .uleb128 0x119c
29667 +       .uleb128 0x16
29668 +       .long   .LASF970
29669 +       .byte   0x29
29670 +       .value  0x161
29671 +       .long   0x160b
29672 +       .byte   0x3
29673 +       .byte   0x23
29674 +       .uleb128 0x11a0
29675 +       .uleb128 0x16
29676 +       .long   .LASF971
29677 +       .byte   0x29
29678 +       .value  0x165
29679 +       .long   0xb5
29680 +       .byte   0x3
29681 +       .byte   0x23
29682 +       .uleb128 0x11a4
29683 +       .byte   0x0
29684 +       .uleb128 0x4
29685 +       .byte   0x4
29686 +       .long   0x47d1
29687 +       .uleb128 0xf
29688 +       .long   0x4b28
29689 +       .long   .LASF972
29690 +       .byte   0x8
29691 +       .byte   0x26
29692 +       .byte   0x1d
29693 +       .uleb128 0xa
29694 +       .long   .LASF414
29695 +       .byte   0x26
29696 +       .byte   0x1e
29697 +       .long   0x7f2
29698 +       .byte   0x2
29699 +       .byte   0x23
29700 +       .uleb128 0x0
29701 +       .uleb128 0xa
29702 +       .long   .LASF973
29703 +       .byte   0x26
29704 +       .byte   0x1f
29705 +       .long   0x4b28
29706 +       .byte   0x2
29707 +       .byte   0x23
29708 +       .uleb128 0x4
29709 +       .byte   0x0
29710 +       .uleb128 0x4
29711 +       .byte   0x4
29712 +       .long   0x4b2e
29713 +       .uleb128 0x4
29714 +       .byte   0x4
29715 +       .long   0x479a
29716 +       .uleb128 0x4
29717 +       .byte   0x4
29718 +       .long   0x4b3a
29719 +       .uleb128 0xf
29720 +       .long   0x4bc5
29721 +       .long   .LASF974
29722 +       .byte   0x40
29723 +       .byte   0x26
29724 +       .byte   0x12
29725 +       .uleb128 0xa
29726 +       .long   .LASF975
29727 +       .byte   0x2a
29728 +       .byte   0x33
29729 +       .long   0x7f2
29730 +       .byte   0x2
29731 +       .byte   0x23
29732 +       .uleb128 0x0
29733 +       .uleb128 0xa
29734 +       .long   .LASF414
29735 +       .byte   0x2a
29736 +       .byte   0x34
29737 +       .long   0x4c33
29738 +       .byte   0x2
29739 +       .byte   0x23
29740 +       .uleb128 0x4
29741 +       .uleb128 0xa
29742 +       .long   .LASF449
29743 +       .byte   0x2a
29744 +       .byte   0x35
29745 +       .long   0x22e4
29746 +       .byte   0x2
29747 +       .byte   0x23
29748 +       .uleb128 0x18
29749 +       .uleb128 0xa
29750 +       .long   .LASF376
29751 +       .byte   0x2a
29752 +       .byte   0x36
29753 +       .long   0x17bc
29754 +       .byte   0x2
29755 +       .byte   0x23
29756 +       .uleb128 0x1c
29757 +       .uleb128 0xa
29758 +       .long   .LASF205
29759 +       .byte   0x2a
29760 +       .byte   0x37
29761 +       .long   0x4b34
29762 +       .byte   0x2
29763 +       .byte   0x23
29764 +       .uleb128 0x24
29765 +       .uleb128 0xa
29766 +       .long   .LASF976
29767 +       .byte   0x2a
29768 +       .byte   0x38
29769 +       .long   0x4c96
29770 +       .byte   0x2
29771 +       .byte   0x23
29772 +       .uleb128 0x28
29773 +       .uleb128 0xa
29774 +       .long   .LASF977
29775 +       .byte   0x2a
29776 +       .byte   0x39
29777 +       .long   0x4cd3
29778 +       .byte   0x2
29779 +       .byte   0x23
29780 +       .uleb128 0x2c
29781 +       .uleb128 0xa
29782 +       .long   .LASF517
29783 +       .byte   0x2a
29784 +       .byte   0x3a
29785 +       .long   0x28ec
29786 +       .byte   0x2
29787 +       .byte   0x23
29788 +       .uleb128 0x30
29789 +       .uleb128 0xa
29790 +       .long   .LASF978
29791 +       .byte   0x2a
29792 +       .byte   0x3b
29793 +       .long   0x18ef
29794 +       .byte   0x2
29795 +       .byte   0x23
29796 +       .uleb128 0x34
29797 +       .byte   0x0
29798 +       .uleb128 0xf
29799 +       .long   0x4bee
29800 +       .long   .LASF979
29801 +       .byte   0x8
29802 +       .byte   0x26
29803 +       .byte   0x44
29804 +       .uleb128 0xa
29805 +       .long   .LASF980
29806 +       .byte   0x26
29807 +       .byte   0x45
29808 +       .long   0x4c08
29809 +       .byte   0x2
29810 +       .byte   0x23
29811 +       .uleb128 0x0
29812 +       .uleb128 0xa
29813 +       .long   .LASF981
29814 +       .byte   0x26
29815 +       .byte   0x46
29816 +       .long   0x4c2d
29817 +       .byte   0x2
29818 +       .byte   0x23
29819 +       .uleb128 0x4
29820 +       .byte   0x0
29821 +       .uleb128 0x11
29822 +       .long   0x4c08
29823 +       .byte   0x1
29824 +       .long   0x209
29825 +       .uleb128 0x6
29826 +       .long   0x4b34
29827 +       .uleb128 0x6
29828 +       .long   0x4b2e
29829 +       .uleb128 0x6
29830 +       .long   0xb5
29831 +       .byte   0x0
29832 +       .uleb128 0x4
29833 +       .byte   0x4
29834 +       .long   0x4bee
29835 +       .uleb128 0x11
29836 +       .long   0x4c2d
29837 +       .byte   0x1
29838 +       .long   0x209
29839 +       .uleb128 0x6
29840 +       .long   0x4b34
29841 +       .uleb128 0x6
29842 +       .long   0x4b2e
29843 +       .uleb128 0x6
29844 +       .long   0x7f2
29845 +       .uleb128 0x6
29846 +       .long   0x1fe
29847 +       .byte   0x0
29848 +       .uleb128 0x4
29849 +       .byte   0x4
29850 +       .long   0x4c0e
29851 +       .uleb128 0x12
29852 +       .long   0x4c43
29853 +       .long   0xbb
29854 +       .uleb128 0x13
29855 +       .long   0x28
29856 +       .byte   0x13
29857 +       .byte   0x0
29858 +       .uleb128 0xf
29859 +       .long   0x4c96
29860 +       .long   .LASF976
29861 +       .byte   0x54
29862 +       .byte   0x2a
29863 +       .byte   0x38
29864 +       .uleb128 0xa
29865 +       .long   .LASF977
29866 +       .byte   0x2a
29867 +       .byte   0x7f
29868 +       .long   0x4cd3
29869 +       .byte   0x2
29870 +       .byte   0x23
29871 +       .uleb128 0x0
29872 +       .uleb128 0xa
29873 +       .long   .LASF509
29874 +       .byte   0x2a
29875 +       .byte   0x80
29876 +       .long   0x17bc
29877 +       .byte   0x2
29878 +       .byte   0x23
29879 +       .uleb128 0x4
29880 +       .uleb128 0xa
29881 +       .long   .LASF982
29882 +       .byte   0x2a
29883 +       .byte   0x81
29884 +       .long   0x1680
29885 +       .byte   0x2
29886 +       .byte   0x23
29887 +       .uleb128 0xc
29888 +       .uleb128 0xa
29889 +       .long   .LASF382
29890 +       .byte   0x2a
29891 +       .byte   0x82
29892 +       .long   0x4b3a
29893 +       .byte   0x2
29894 +       .byte   0x23
29895 +       .uleb128 0x10
29896 +       .uleb128 0xa
29897 +       .long   .LASF983
29898 +       .byte   0x2a
29899 +       .byte   0x83
29900 +       .long   0x4d93
29901 +       .byte   0x2
29902 +       .byte   0x23
29903 +       .uleb128 0x50
29904 +       .byte   0x0
29905 +       .uleb128 0x4
29906 +       .byte   0x4
29907 +       .long   0x4c43
29908 +       .uleb128 0xf
29909 +       .long   0x4cd3
29910 +       .long   .LASF984
29911 +       .byte   0xc
29912 +       .byte   0x2a
29913 +       .byte   0x39
29914 +       .uleb128 0xa
29915 +       .long   .LASF404
29916 +       .byte   0x2a
29917 +       .byte   0x60
29918 +       .long   0x4ce5
29919 +       .byte   0x2
29920 +       .byte   0x23
29921 +       .uleb128 0x0
29922 +       .uleb128 0xa
29923 +       .long   .LASF979
29924 +       .byte   0x2a
29925 +       .byte   0x61
29926 +       .long   0x4ceb
29927 +       .byte   0x2
29928 +       .byte   0x23
29929 +       .uleb128 0x4
29930 +       .uleb128 0xa
29931 +       .long   .LASF985
29932 +       .byte   0x2a
29933 +       .byte   0x62
29934 +       .long   0x4b28
29935 +       .byte   0x2
29936 +       .byte   0x23
29937 +       .uleb128 0x8
29938 +       .byte   0x0
29939 +       .uleb128 0x4
29940 +       .byte   0x4
29941 +       .long   0x4c9c
29942 +       .uleb128 0x5
29943 +       .long   0x4ce5
29944 +       .byte   0x1
29945 +       .uleb128 0x6
29946 +       .long   0x4b34
29947 +       .byte   0x0
29948 +       .uleb128 0x4
29949 +       .byte   0x4
29950 +       .long   0x4cd9
29951 +       .uleb128 0x4
29952 +       .byte   0x4
29953 +       .long   0x4bc5
29954 +       .uleb128 0xf
29955 +       .long   0x4d28
29956 +       .long   .LASF986
29957 +       .byte   0xc
29958 +       .byte   0x2a
29959 +       .byte   0x77
29960 +       .uleb128 0xa
29961 +       .long   .LASF987
29962 +       .byte   0x2a
29963 +       .byte   0x78
29964 +       .long   0x4d3d
29965 +       .byte   0x2
29966 +       .byte   0x23
29967 +       .uleb128 0x0
29968 +       .uleb128 0xa
29969 +       .long   .LASF414
29970 +       .byte   0x2a
29971 +       .byte   0x79
29972 +       .long   0x4d58
29973 +       .byte   0x2
29974 +       .byte   0x23
29975 +       .uleb128 0x4
29976 +       .uleb128 0xa
29977 +       .long   .LASF988
29978 +       .byte   0x2a
29979 +       .byte   0x7b
29980 +       .long   0x4d8d
29981 +       .byte   0x2
29982 +       .byte   0x23
29983 +       .uleb128 0x8
29984 +       .byte   0x0
29985 +       .uleb128 0x11
29986 +       .long   0x4d3d
29987 +       .byte   0x1
29988 +       .long   0x21
29989 +       .uleb128 0x6
29990 +       .long   0x4c96
29991 +       .uleb128 0x6
29992 +       .long   0x4b34
29993 +       .byte   0x0
29994 +       .uleb128 0x4
29995 +       .byte   0x4
29996 +       .long   0x4d28
29997 +       .uleb128 0x11
29998 +       .long   0x4d58
29999 +       .byte   0x1
30000 +       .long   0x7f2
30001 +       .uleb128 0x6
30002 +       .long   0x4c96
30003 +       .uleb128 0x6
30004 +       .long   0x4b34
30005 +       .byte   0x0
30006 +       .uleb128 0x4
30007 +       .byte   0x4
30008 +       .long   0x4d43
30009 +       .uleb128 0x11
30010 +       .long   0x4d87
30011 +       .byte   0x1
30012 +       .long   0x21
30013 +       .uleb128 0x6
30014 +       .long   0x4c96
30015 +       .uleb128 0x6
30016 +       .long   0x4b34
30017 +       .uleb128 0x6
30018 +       .long   0x4d87
30019 +       .uleb128 0x6
30020 +       .long   0x21
30021 +       .uleb128 0x6
30022 +       .long   0xb5
30023 +       .uleb128 0x6
30024 +       .long   0x21
30025 +       .byte   0x0
30026 +       .uleb128 0x4
30027 +       .byte   0x4
30028 +       .long   0xb5
30029 +       .uleb128 0x4
30030 +       .byte   0x4
30031 +       .long   0x4d5e
30032 +       .uleb128 0x4
30033 +       .byte   0x4
30034 +       .long   0x4cf1
30035 +       .uleb128 0x9
30036 +       .long   0x4dae
30037 +       .byte   0x4
30038 +       .byte   0x30
30039 +       .byte   0x9
30040 +       .uleb128 0xb
30041 +       .string "a"
30042 +       .byte   0x30
30043 +       .byte   0xa
30044 +       .long   0x16cf
30045 +       .byte   0x2
30046 +       .byte   0x23
30047 +       .uleb128 0x0
30048 +       .byte   0x0
30049 +       .uleb128 0x7
30050 +       .long   .LASF989
30051 +       .byte   0x30
30052 +       .byte   0xb
30053 +       .long   0x4d99
30054 +       .uleb128 0x2a
30055 +       .long   .LASF990
30056 +       .byte   0x0
30057 +       .byte   0x2e
30058 +       .byte   0x6
30059 +       .uleb128 0xf
30060 +       .long   0x4dea
30061 +       .long   .LASF991
30062 +       .byte   0x8
30063 +       .byte   0x29
30064 +       .byte   0x22
30065 +       .uleb128 0xa
30066 +       .long   .LASF992
30067 +       .byte   0x29
30068 +       .byte   0x23
30069 +       .long   0x2f
30070 +       .byte   0x2
30071 +       .byte   0x23
30072 +       .uleb128 0x0
30073 +       .uleb128 0xa
30074 +       .long   .LASF414
30075 +       .byte   0x29
30076 +       .byte   0x24
30077 +       .long   0x7f2
30078 +       .byte   0x2
30079 +       .byte   0x23
30080 +       .uleb128 0x4
30081 +       .byte   0x0
30082 +       .uleb128 0x12
30083 +       .long   0x4dfa
30084 +       .long   0xbb
30085 +       .uleb128 0x13
30086 +       .long   0x28
30087 +       .byte   0x3b
30088 +       .byte   0x0
30089 +       .uleb128 0xf
30090 +       .long   0x4e5b
30091 +       .long   .LASF993
30092 +       .byte   0x20
30093 +       .byte   0x29
30094 +       .byte   0x2f
30095 +       .uleb128 0xa
30096 +       .long   .LASF994
30097 +       .byte   0x29
30098 +       .byte   0x30
30099 +       .long   0x479a
30100 +       .byte   0x2
30101 +       .byte   0x23
30102 +       .uleb128 0x0
30103 +       .uleb128 0xa
30104 +       .long   .LASF980
30105 +       .byte   0x29
30106 +       .byte   0x31
30107 +       .long   0x4e7b
30108 +       .byte   0x2
30109 +       .byte   0x23
30110 +       .uleb128 0xc
30111 +       .uleb128 0xa
30112 +       .long   .LASF981
30113 +       .byte   0x29
30114 +       .byte   0x33
30115 +       .long   0x4ea0
30116 +       .byte   0x2
30117 +       .byte   0x23
30118 +       .uleb128 0x10
30119 +       .uleb128 0xa
30120 +       .long   .LASF995
30121 +       .byte   0x29
30122 +       .byte   0x34
30123 +       .long   0x4eb7
30124 +       .byte   0x2
30125 +       .byte   0x23
30126 +       .uleb128 0x14
30127 +       .uleb128 0xa
30128 +       .long   .LASF996
30129 +       .byte   0x29
30130 +       .byte   0x35
30131 +       .long   0x4ecd
30132 +       .byte   0x2
30133 +       .byte   0x23
30134 +       .uleb128 0x18
30135 +       .uleb128 0xa
30136 +       .long   .LASF997
30137 +       .byte   0x29
30138 +       .byte   0x36
30139 +       .long   0x4edf
30140 +       .byte   0x2
30141 +       .byte   0x23
30142 +       .uleb128 0x1c
30143 +       .byte   0x0
30144 +       .uleb128 0x11
30145 +       .long   0x4e75
30146 +       .byte   0x1
30147 +       .long   0x209
30148 +       .uleb128 0x6
30149 +       .long   0x4e75
30150 +       .uleb128 0x6
30151 +       .long   0x4af9
30152 +       .uleb128 0x6
30153 +       .long   0xb5
30154 +       .byte   0x0
30155 +       .uleb128 0x4
30156 +       .byte   0x4
30157 +       .long   0x4dfa
30158 +       .uleb128 0x4
30159 +       .byte   0x4
30160 +       .long   0x4e5b
30161 +       .uleb128 0x11
30162 +       .long   0x4ea0
30163 +       .byte   0x1
30164 +       .long   0x209
30165 +       .uleb128 0x6
30166 +       .long   0x4e75
30167 +       .uleb128 0x6
30168 +       .long   0x4af9
30169 +       .uleb128 0x6
30170 +       .long   0x7f2
30171 +       .uleb128 0x6
30172 +       .long   0x1fe
30173 +       .byte   0x0
30174 +       .uleb128 0x4
30175 +       .byte   0x4
30176 +       .long   0x4e81
30177 +       .uleb128 0x5
30178 +       .long   0x4eb7
30179 +       .byte   0x1
30180 +       .uleb128 0x6
30181 +       .long   0x4af9
30182 +       .uleb128 0x6
30183 +       .long   0x7f2
30184 +       .byte   0x0
30185 +       .uleb128 0x4
30186 +       .byte   0x4
30187 +       .long   0x4ea6
30188 +       .uleb128 0x11
30189 +       .long   0x4ecd
30190 +       .byte   0x1
30191 +       .long   0x21
30192 +       .uleb128 0x6
30193 +       .long   0x4af9
30194 +       .byte   0x0
30195 +       .uleb128 0x4
30196 +       .byte   0x4
30197 +       .long   0x4ebd
30198 +       .uleb128 0x5
30199 +       .long   0x4edf
30200 +       .byte   0x1
30201 +       .uleb128 0x6
30202 +       .long   0x4af9
30203 +       .byte   0x0
30204 +       .uleb128 0x4
30205 +       .byte   0x4
30206 +       .long   0x4ed3
30207 +       .uleb128 0xf
30208 +       .long   0x4f1c
30209 +       .long   .LASF998
30210 +       .byte   0x48
30211 +       .byte   0x29
30212 +       .byte   0x3a
30213 +       .uleb128 0xa
30214 +       .long   .LASF382
30215 +       .byte   0x29
30216 +       .byte   0x3b
30217 +       .long   0x4b3a
30218 +       .byte   0x2
30219 +       .byte   0x23
30220 +       .uleb128 0x0
30221 +       .uleb128 0xb
30222 +       .string "mod"
30223 +       .byte   0x29
30224 +       .byte   0x3c
30225 +       .long   0x4af9
30226 +       .byte   0x2
30227 +       .byte   0x23
30228 +       .uleb128 0x40
30229 +       .uleb128 0xa
30230 +       .long   .LASF999
30231 +       .byte   0x29
30232 +       .byte   0x3d
30233 +       .long   0x4b34
30234 +       .byte   0x2
30235 +       .byte   0x23
30236 +       .uleb128 0x44
30237 +       .byte   0x0
30238 +       .uleb128 0xf
30239 +       .long   0x4f37
30240 +       .long   .LASF1000
30241 +       .byte   0x80
30242 +       .byte   0x29
30243 +       .byte   0xdd
30244 +       .uleb128 0xa
30245 +       .long   .LASF322
30246 +       .byte   0x29
30247 +       .byte   0xde
30248 +       .long   0x4dae
30249 +       .byte   0x2
30250 +       .byte   0x23
30251 +       .uleb128 0x0
30252 +       .byte   0x0
30253 +       .uleb128 0x27
30254 +       .long   0x4f56
30255 +       .long   .LASF1001
30256 +       .byte   0x4
30257 +       .byte   0x29
30258 +       .byte   0xe2
30259 +       .uleb128 0x28
30260 +       .long   .LASF1002
30261 +       .sleb128 0
30262 +       .uleb128 0x28
30263 +       .long   .LASF1003
30264 +       .sleb128 1
30265 +       .uleb128 0x28
30266 +       .long   .LASF1004
30267 +       .sleb128 2
30268 +       .byte   0x0
30269 +       .uleb128 0xf
30270 +       .long   0x4f8d
30271 +       .long   .LASF1005
30272 +       .byte   0x28
30273 +       .byte   0x29
30274 +       .byte   0xea
30275 +       .uleb128 0xa
30276 +       .long   .LASF1006
30277 +       .byte   0x29
30278 +       .byte   0xeb
30279 +       .long   0x4dfa
30280 +       .byte   0x2
30281 +       .byte   0x23
30282 +       .uleb128 0x0
30283 +       .uleb128 0xa
30284 +       .long   .LASF414
30285 +       .byte   0x29
30286 +       .byte   0xec
30287 +       .long   0xb5
30288 +       .byte   0x2
30289 +       .byte   0x23
30290 +       .uleb128 0x20
30291 +       .uleb128 0xa
30292 +       .long   .LASF1007
30293 +       .byte   0x29
30294 +       .byte   0xed
30295 +       .long   0x2f
30296 +       .byte   0x2
30297 +       .byte   0x23
30298 +       .uleb128 0x24
30299 +       .byte   0x0
30300 +       .uleb128 0xf
30301 +       .long   0x4fc4
30302 +       .long   .LASF1008
30303 +       .byte   0xc
30304 +       .byte   0x29
30305 +       .byte   0xf1
30306 +       .uleb128 0xb
30307 +       .string "grp"
30308 +       .byte   0x29
30309 +       .byte   0xf2
30310 +       .long   0x4aff
30311 +       .byte   0x2
30312 +       .byte   0x23
30313 +       .uleb128 0x0
30314 +       .uleb128 0xa
30315 +       .long   .LASF1009
30316 +       .byte   0x29
30317 +       .byte   0xf3
30318 +       .long   0x21
30319 +       .byte   0x2
30320 +       .byte   0x23
30321 +       .uleb128 0x8
30322 +       .uleb128 0xa
30323 +       .long   .LASF973
30324 +       .byte   0x29
30325 +       .byte   0xf4
30326 +       .long   0x4fc4
30327 +       .byte   0x2
30328 +       .byte   0x23
30329 +       .uleb128 0xc
30330 +       .byte   0x0
30331 +       .uleb128 0x12
30332 +       .long   0x4fd3
30333 +       .long   0x4f56
30334 +       .uleb128 0x23
30335 +       .long   0x28
30336 +       .byte   0x0
30337 +       .uleb128 0x21
30338 +       .long   .LASF1010
30339 +       .byte   0x1
30340 +       .uleb128 0x4
30341 +       .byte   0x4
30342 +       .long   0x4fd3
30343 +       .uleb128 0x4
30344 +       .byte   0x4
30345 +       .long   0x4fe5
30346 +       .uleb128 0x14
30347 +       .long   0x4dc1
30348 +       .uleb128 0x4
30349 +       .byte   0x4
30350 +       .long   0x4ff0
30351 +       .uleb128 0x14
30352 +       .long   0x2f
30353 +       .uleb128 0xf
30354 +       .long   0x501e
30355 +       .long   .LASF1011
30356 +       .byte   0x8
30357 +       .byte   0x29
30358 +       .byte   0x45
30359 +       .uleb128 0xa
30360 +       .long   .LASF1012
30361 +       .byte   0x2d
30362 +       .byte   0x64
30363 +       .long   0x2f
30364 +       .byte   0x2
30365 +       .byte   0x23
30366 +       .uleb128 0x0
30367 +       .uleb128 0xa
30368 +       .long   .LASF1013
30369 +       .byte   0x2d
30370 +       .byte   0x64
30371 +       .long   0x2f
30372 +       .byte   0x2
30373 +       .byte   0x23
30374 +       .uleb128 0x4
30375 +       .byte   0x0
30376 +       .uleb128 0x4
30377 +       .byte   0x4
30378 +       .long   0x5024
30379 +       .uleb128 0x14
30380 +       .long   0x4ff5
30381 +       .uleb128 0x4
30382 +       .byte   0x4
30383 +       .long   0x7ad
30384 +       .uleb128 0x12
30385 +       .long   0x503f
30386 +       .long   0x4f1c
30387 +       .uleb128 0x13
30388 +       .long   0x28
30389 +       .byte   0x1f
30390 +       .byte   0x0
30391 +       .uleb128 0x4
30392 +       .byte   0x4
30393 +       .long   0x478f
30394 +       .uleb128 0x4
30395 +       .byte   0x4
30396 +       .long   0x4f8d
30397 +       .uleb128 0x7
30398 +       .long   .LASF1014
30399 +       .byte   0x6b
30400 +       .byte   0x13
30401 +       .long   0x21
30402 +       .uleb128 0x7
30403 +       .long   .LASF1015
30404 +       .byte   0x6a
30405 +       .byte   0x1d
30406 +       .long   0x5061
30407 +       .uleb128 0x4
30408 +       .byte   0x4
30409 +       .long   0x5067
30410 +       .uleb128 0x5
30411 +       .long   0x5078
30412 +       .byte   0x1
30413 +       .uleb128 0x6
30414 +       .long   0x77
30415 +       .uleb128 0x6
30416 +       .long   0x5078
30417 +       .byte   0x0
30418 +       .uleb128 0x4
30419 +       .byte   0x4
30420 +       .long   0x507e
30421 +       .uleb128 0xf
30422 +       .long   0x5179
30423 +       .long   .LASF1016
30424 +       .byte   0x80
30425 +       .byte   0x6a
30426 +       .byte   0x1b
30427 +       .uleb128 0xa
30428 +       .long   .LASF1017
30429 +       .byte   0x6a
30430 +       .byte   0x98
30431 +       .long   0x5056
30432 +       .byte   0x2
30433 +       .byte   0x23
30434 +       .uleb128 0x0
30435 +       .uleb128 0xa
30436 +       .long   .LASF1018
30437 +       .byte   0x6a
30438 +       .byte   0x99
30439 +       .long   0x52ae
30440 +       .byte   0x2
30441 +       .byte   0x23
30442 +       .uleb128 0x4
30443 +       .uleb128 0xa
30444 +       .long   .LASF1019
30445 +       .byte   0x6a
30446 +       .byte   0x9a
30447 +       .long   0x52ba
30448 +       .byte   0x2
30449 +       .byte   0x23
30450 +       .uleb128 0x8
30451 +       .uleb128 0xa
30452 +       .long   .LASF1020
30453 +       .byte   0x6a
30454 +       .byte   0x9b
30455 +       .long   0x160b
30456 +       .byte   0x2
30457 +       .byte   0x23
30458 +       .uleb128 0xc
30459 +       .uleb128 0xa
30460 +       .long   .LASF1021
30461 +       .byte   0x6a
30462 +       .byte   0x9c
30463 +       .long   0x160b
30464 +       .byte   0x2
30465 +       .byte   0x23
30466 +       .uleb128 0x10
30467 +       .uleb128 0xa
30468 +       .long   .LASF840
30469 +       .byte   0x6a
30470 +       .byte   0x9d
30471 +       .long   0x533d
30472 +       .byte   0x2
30473 +       .byte   0x23
30474 +       .uleb128 0x14
30475 +       .uleb128 0xa
30476 +       .long   .LASF126
30477 +       .byte   0x6a
30478 +       .byte   0x9e
30479 +       .long   0x77
30480 +       .byte   0x2
30481 +       .byte   0x23
30482 +       .uleb128 0x18
30483 +       .uleb128 0xa
30484 +       .long   .LASF1022
30485 +       .byte   0x6a
30486 +       .byte   0xa0
30487 +       .long   0x77
30488 +       .byte   0x2
30489 +       .byte   0x23
30490 +       .uleb128 0x1c
30491 +       .uleb128 0xa
30492 +       .long   .LASF1023
30493 +       .byte   0x6a
30494 +       .byte   0xa1
30495 +       .long   0x77
30496 +       .byte   0x2
30497 +       .byte   0x23
30498 +       .uleb128 0x20
30499 +       .uleb128 0xa
30500 +       .long   .LASF1024
30501 +       .byte   0x6a
30502 +       .byte   0xa2
30503 +       .long   0x77
30504 +       .byte   0x2
30505 +       .byte   0x23
30506 +       .uleb128 0x24
30507 +       .uleb128 0xa
30508 +       .long   .LASF1025
30509 +       .byte   0x6a
30510 +       .byte   0xa3
30511 +       .long   0x77
30512 +       .byte   0x2
30513 +       .byte   0x23
30514 +       .uleb128 0x28
30515 +       .uleb128 0xa
30516 +       .long   .LASF285
30517 +       .byte   0x6a
30518 +       .byte   0xa4
30519 +       .long   0x1680
30520 +       .byte   0x2
30521 +       .byte   0x23
30522 +       .uleb128 0x2c
30523 +       .uleb128 0xa
30524 +       .long   .LASF1026
30525 +       .byte   0x6a
30526 +       .byte   0xa6
30527 +       .long   0x923
30528 +       .byte   0x2
30529 +       .byte   0x23
30530 +       .uleb128 0x30
30531 +       .uleb128 0xb
30532 +       .string "cpu"
30533 +       .byte   0x6a
30534 +       .byte   0xa7
30535 +       .long   0x77
30536 +       .byte   0x2
30537 +       .byte   0x23
30538 +       .uleb128 0x34
30539 +       .uleb128 0xa
30540 +       .long   .LASF1027
30541 +       .byte   0x6a
30542 +       .byte   0xaa
30543 +       .long   0x923
30544 +       .byte   0x2
30545 +       .byte   0x23
30546 +       .uleb128 0x38
30547 +       .uleb128 0xb
30548 +       .string "dir"
30549 +       .byte   0x6a
30550 +       .byte   0xad
30551 +       .long   0x5349
30552 +       .byte   0x2
30553 +       .byte   0x23
30554 +       .uleb128 0x3c
30555 +       .uleb128 0xa
30556 +       .long   .LASF414
30557 +       .byte   0x6a
30558 +       .byte   0xaf
30559 +       .long   0x7f2
30560 +       .byte   0x2
30561 +       .byte   0x23
30562 +       .uleb128 0x40
30563 +       .byte   0x0
30564 +       .uleb128 0xf
30565 +       .long   0x5266
30566 +       .long   .LASF1028
30567 +       .byte   0x40
30568 +       .byte   0x6a
30569 +       .byte   0x62
30570 +       .uleb128 0xa
30571 +       .long   .LASF414
30572 +       .byte   0x6a
30573 +       .byte   0x63
30574 +       .long   0x7f2
30575 +       .byte   0x2
30576 +       .byte   0x23
30577 +       .uleb128 0x0
30578 +       .uleb128 0xa
30579 +       .long   .LASF1029
30580 +       .byte   0x6a
30581 +       .byte   0x64
30582 +       .long   0x5276
30583 +       .byte   0x2
30584 +       .byte   0x23
30585 +       .uleb128 0x4
30586 +       .uleb128 0xa
30587 +       .long   .LASF1030
30588 +       .byte   0x6a
30589 +       .byte   0x65
30590 +       .long   0x1efe
30591 +       .byte   0x2
30592 +       .byte   0x23
30593 +       .uleb128 0x8
30594 +       .uleb128 0xa
30595 +       .long   .LASF1031
30596 +       .byte   0x6a
30597 +       .byte   0x66
30598 +       .long   0x1efe
30599 +       .byte   0x2
30600 +       .byte   0x23
30601 +       .uleb128 0xc
30602 +       .uleb128 0xa
30603 +       .long   .LASF1032
30604 +       .byte   0x6a
30605 +       .byte   0x67
30606 +       .long   0x1efe
30607 +       .byte   0x2
30608 +       .byte   0x23
30609 +       .uleb128 0x10
30610 +       .uleb128 0xb
30611 +       .string "ack"
30612 +       .byte   0x6a
30613 +       .byte   0x69
30614 +       .long   0x1efe
30615 +       .byte   0x2
30616 +       .byte   0x23
30617 +       .uleb128 0x14
30618 +       .uleb128 0xa
30619 +       .long   .LASF364
30620 +       .byte   0x6a
30621 +       .byte   0x6a
30622 +       .long   0x1efe
30623 +       .byte   0x2
30624 +       .byte   0x23
30625 +       .uleb128 0x18
30626 +       .uleb128 0xa
30627 +       .long   .LASF1033
30628 +       .byte   0x6a
30629 +       .byte   0x6b
30630 +       .long   0x1efe
30631 +       .byte   0x2
30632 +       .byte   0x23
30633 +       .uleb128 0x1c
30634 +       .uleb128 0xa
30635 +       .long   .LASF1034
30636 +       .byte   0x6a
30637 +       .byte   0x6c
30638 +       .long   0x1efe
30639 +       .byte   0x2
30640 +       .byte   0x23
30641 +       .uleb128 0x20
30642 +       .uleb128 0xb
30643 +       .string "eoi"
30644 +       .byte   0x6a
30645 +       .byte   0x6d
30646 +       .long   0x1efe
30647 +       .byte   0x2
30648 +       .byte   0x23
30649 +       .uleb128 0x24
30650 +       .uleb128 0xb
30651 +       .string "end"
30652 +       .byte   0x6a
30653 +       .byte   0x6f
30654 +       .long   0x1efe
30655 +       .byte   0x2
30656 +       .byte   0x23
30657 +       .uleb128 0x28
30658 +       .uleb128 0xa
30659 +       .long   .LASF1035
30660 +       .byte   0x6a
30661 +       .byte   0x70
30662 +       .long   0x528d
30663 +       .byte   0x2
30664 +       .byte   0x23
30665 +       .uleb128 0x2c
30666 +       .uleb128 0xa
30667 +       .long   .LASF1036
30668 +       .byte   0x6a
30669 +       .byte   0x71
30670 +       .long   0x1f14
30671 +       .byte   0x2
30672 +       .byte   0x23
30673 +       .uleb128 0x30
30674 +       .uleb128 0xa
30675 +       .long   .LASF1037
30676 +       .byte   0x6a
30677 +       .byte   0x72
30678 +       .long   0x52a8
30679 +       .byte   0x2
30680 +       .byte   0x23
30681 +       .uleb128 0x34
30682 +       .uleb128 0xa
30683 +       .long   .LASF1038
30684 +       .byte   0x6a
30685 +       .byte   0x73
30686 +       .long   0x52a8
30687 +       .byte   0x2
30688 +       .byte   0x23
30689 +       .uleb128 0x38
30690 +       .uleb128 0xa
30691 +       .long   .LASF1039
30692 +       .byte   0x6a
30693 +       .byte   0x7d
30694 +       .long   0x7f2
30695 +       .byte   0x2
30696 +       .byte   0x23
30697 +       .uleb128 0x3c
30698 +       .byte   0x0
30699 +       .uleb128 0x11
30700 +       .long   0x5276
30701 +       .byte   0x1
30702 +       .long   0x77
30703 +       .uleb128 0x6
30704 +       .long   0x77
30705 +       .byte   0x0
30706 +       .uleb128 0x4
30707 +       .byte   0x4
30708 +       .long   0x5266
30709 +       .uleb128 0x5
30710 +       .long   0x528d
30711 +       .byte   0x1
30712 +       .uleb128 0x6
30713 +       .long   0x77
30714 +       .uleb128 0x6
30715 +       .long   0x923
30716 +       .byte   0x0
30717 +       .uleb128 0x4
30718 +       .byte   0x4
30719 +       .long   0x527c
30720 +       .uleb128 0x11
30721 +       .long   0x52a8
30722 +       .byte   0x1
30723 +       .long   0x21
30724 +       .uleb128 0x6
30725 +       .long   0x77
30726 +       .uleb128 0x6
30727 +       .long   0x77
30728 +       .byte   0x0
30729 +       .uleb128 0x4
30730 +       .byte   0x4
30731 +       .long   0x5293
30732 +       .uleb128 0x4
30733 +       .byte   0x4
30734 +       .long   0x5179
30735 +       .uleb128 0x21
30736 +       .long   .LASF1019
30737 +       .byte   0x1
30738 +       .uleb128 0x4
30739 +       .byte   0x4
30740 +       .long   0x52b4
30741 +       .uleb128 0xf
30742 +       .long   0x533d
30743 +       .long   .LASF1040
30744 +       .byte   0x20
30745 +       .byte   0x6a
30746 +       .byte   0x9d
30747 +       .uleb128 0xa
30748 +       .long   .LASF1041
30749 +       .byte   0x56
30750 +       .byte   0x55
30751 +       .long   0x5c83
30752 +       .byte   0x2
30753 +       .byte   0x23
30754 +       .uleb128 0x0
30755 +       .uleb128 0xa
30756 +       .long   .LASF53
30757 +       .byte   0x56
30758 +       .byte   0x56
30759 +       .long   0x2f
30760 +       .byte   0x2
30761 +       .byte   0x23
30762 +       .uleb128 0x4
30763 +       .uleb128 0xa
30764 +       .long   .LASF364
30765 +       .byte   0x56
30766 +       .byte   0x57
30767 +       .long   0x923
30768 +       .byte   0x2
30769 +       .byte   0x23
30770 +       .uleb128 0x8
30771 +       .uleb128 0xa
30772 +       .long   .LASF414
30773 +       .byte   0x56
30774 +       .byte   0x58
30775 +       .long   0x7f2
30776 +       .byte   0x2
30777 +       .byte   0x23
30778 +       .uleb128 0xc
30779 +       .uleb128 0xa
30780 +       .long   .LASF1042
30781 +       .byte   0x56
30782 +       .byte   0x59
30783 +       .long   0x160b
30784 +       .byte   0x2
30785 +       .byte   0x23
30786 +       .uleb128 0x10
30787 +       .uleb128 0xa
30788 +       .long   .LASF307
30789 +       .byte   0x56
30790 +       .byte   0x5a
30791 +       .long   0x533d
30792 +       .byte   0x2
30793 +       .byte   0x23
30794 +       .uleb128 0x14
30795 +       .uleb128 0xb
30796 +       .string "irq"
30797 +       .byte   0x56
30798 +       .byte   0x5b
30799 +       .long   0x21
30800 +       .byte   0x2
30801 +       .byte   0x23
30802 +       .uleb128 0x18
30803 +       .uleb128 0xb
30804 +       .string "dir"
30805 +       .byte   0x56
30806 +       .byte   0x5c
30807 +       .long   0x5349
30808 +       .byte   0x2
30809 +       .byte   0x23
30810 +       .uleb128 0x1c
30811 +       .byte   0x0
30812 +       .uleb128 0x4
30813 +       .byte   0x4
30814 +       .long   0x52c0
30815 +       .uleb128 0x21
30816 +       .long   .LASF1043
30817 +       .byte   0x1
30818 +       .uleb128 0x4
30819 +       .byte   0x4
30820 +       .long   0x5343
30821 +       .uleb128 0xf
30822 +       .long   0x53be
30823 +       .long   .LASF1044
30824 +       .byte   0x24
30825 +       .byte   0x6d
30826 +       .byte   0x11
30827 +       .uleb128 0xa
30828 +       .long   .LASF1045
30829 +       .byte   0x6d
30830 +       .byte   0x12
30831 +       .long   0x24b
30832 +       .byte   0x2
30833 +       .byte   0x23
30834 +       .uleb128 0x0
30835 +       .uleb128 0xb
30836 +       .string "end"
30837 +       .byte   0x6d
30838 +       .byte   0x13
30839 +       .long   0x24b
30840 +       .byte   0x2
30841 +       .byte   0x23
30842 +       .uleb128 0x8
30843 +       .uleb128 0xa
30844 +       .long   .LASF414
30845 +       .byte   0x6d
30846 +       .byte   0x14
30847 +       .long   0x7f2
30848 +       .byte   0x2
30849 +       .byte   0x23
30850 +       .uleb128 0x10
30851 +       .uleb128 0xa
30852 +       .long   .LASF53
30853 +       .byte   0x6d
30854 +       .byte   0x15
30855 +       .long   0x2f
30856 +       .byte   0x2
30857 +       .byte   0x23
30858 +       .uleb128 0x14
30859 +       .uleb128 0xa
30860 +       .long   .LASF205
30861 +       .byte   0x6d
30862 +       .byte   0x16
30863 +       .long   0x53be
30864 +       .byte   0x2
30865 +       .byte   0x23
30866 +       .uleb128 0x18
30867 +       .uleb128 0xa
30868 +       .long   .LASF207
30869 +       .byte   0x6d
30870 +       .byte   0x16
30871 +       .long   0x53be
30872 +       .byte   0x2
30873 +       .byte   0x23
30874 +       .uleb128 0x1c
30875 +       .uleb128 0xa
30876 +       .long   .LASF1046
30877 +       .byte   0x6d
30878 +       .byte   0x16
30879 +       .long   0x53be
30880 +       .byte   0x2
30881 +       .byte   0x23
30882 +       .uleb128 0x20
30883 +       .byte   0x0
30884 +       .uleb128 0x4
30885 +       .byte   0x4
30886 +       .long   0x534f
30887 +       .uleb128 0xf
30888 +       .long   0x5409
30889 +       .long   .LASF1047
30890 +       .byte   0x14
30891 +       .byte   0x5e
30892 +       .byte   0x15
30893 +       .uleb128 0xa
30894 +       .long   .LASF1048
30895 +       .byte   0x5e
30896 +       .byte   0x16
30897 +       .long   0x1680
30898 +       .byte   0x2
30899 +       .byte   0x23
30900 +       .uleb128 0x0
30901 +       .uleb128 0xa
30902 +       .long   .LASF1049
30903 +       .byte   0x5e
30904 +       .byte   0x17
30905 +       .long   0x17bc
30906 +       .byte   0x2
30907 +       .byte   0x23
30908 +       .uleb128 0x4
30909 +       .uleb128 0xb
30910 +       .string "get"
30911 +       .byte   0x5e
30912 +       .byte   0x18
30913 +       .long   0x5460
30914 +       .byte   0x2
30915 +       .byte   0x23
30916 +       .uleb128 0xc
30917 +       .uleb128 0xb
30918 +       .string "put"
30919 +       .byte   0x5e
30920 +       .byte   0x19
30921 +       .long   0x5460
30922 +       .byte   0x2
30923 +       .byte   0x23
30924 +       .uleb128 0x10
30925 +       .byte   0x0
30926 +       .uleb128 0x5
30927 +       .long   0x5415
30928 +       .byte   0x1
30929 +       .uleb128 0x6
30930 +       .long   0x5415
30931 +       .byte   0x0
30932 +       .uleb128 0x4
30933 +       .byte   0x4
30934 +       .long   0x541b
30935 +       .uleb128 0xf
30936 +       .long   0x5460
30937 +       .long   .LASF1050
30938 +       .byte   0x20
30939 +       .byte   0x5e
30940 +       .byte   0x14
30941 +       .uleb128 0xa
30942 +       .long   .LASF1051
30943 +       .byte   0x5e
30944 +       .byte   0x21
30945 +       .long   0x5466
30946 +       .byte   0x2
30947 +       .byte   0x23
30948 +       .uleb128 0x0
30949 +       .uleb128 0xa
30950 +       .long   .LASF1052
30951 +       .byte   0x5e
30952 +       .byte   0x22
30953 +       .long   0x17bc
30954 +       .byte   0x2
30955 +       .byte   0x23
30956 +       .uleb128 0x4
30957 +       .uleb128 0xa
30958 +       .long   .LASF1053
30959 +       .byte   0x5e
30960 +       .byte   0x23
30961 +       .long   0x22e4
30962 +       .byte   0x2
30963 +       .byte   0x23
30964 +       .uleb128 0xc
30965 +       .uleb128 0xa
30966 +       .long   .LASF1054
30967 +       .byte   0x5e
30968 +       .byte   0x24
30969 +       .long   0x28fe
30970 +       .byte   0x2
30971 +       .byte   0x23
30972 +       .uleb128 0x10
30973 +       .byte   0x0
30974 +       .uleb128 0x4
30975 +       .byte   0x4
30976 +       .long   0x5409
30977 +       .uleb128 0x4
30978 +       .byte   0x4
30979 +       .long   0x53c4
30980 +       .uleb128 0xf
30981 +       .long   0x5487
30982 +       .long   .LASF1055
30983 +       .byte   0x4
30984 +       .byte   0x5f
30985 +       .byte   0x9
30986 +       .uleb128 0xa
30987 +       .long   .LASF1056
30988 +       .byte   0x5f
30989 +       .byte   0xb
30990 +       .long   0x160b
30991 +       .byte   0x2
30992 +       .byte   0x23
30993 +       .uleb128 0x0
30994 +       .byte   0x0
30995 +       .uleb128 0xf
30996 +       .long   0x54be
30997 +       .long   .LASF1057
30998 +       .byte   0x14
30999 +       .byte   0x5d
31000 +       .byte   0x27
31001 +       .uleb128 0xa
31002 +       .long   .LASF994
31003 +       .byte   0x5d
31004 +       .byte   0x28
31005 +       .long   0x479a
31006 +       .byte   0x2
31007 +       .byte   0x23
31008 +       .uleb128 0x0
31009 +       .uleb128 0xa
31010 +       .long   .LASF980
31011 +       .byte   0x5d
31012 +       .byte   0x29
31013 +       .long   0x563f
31014 +       .byte   0x2
31015 +       .byte   0x23
31016 +       .uleb128 0xc
31017 +       .uleb128 0xa
31018 +       .long   .LASF981
31019 +       .byte   0x5d
31020 +       .byte   0x2a
31021 +       .long   0x565f
31022 +       .byte   0x2
31023 +       .byte   0x23
31024 +       .uleb128 0x10
31025 +       .byte   0x0
31026 +       .uleb128 0x11
31027 +       .long   0x54d3
31028 +       .byte   0x1
31029 +       .long   0x209
31030 +       .uleb128 0x6
31031 +       .long   0x54d3
31032 +       .uleb128 0x6
31033 +       .long   0xb5
31034 +       .byte   0x0
31035 +       .uleb128 0x4
31036 +       .byte   0x4
31037 +       .long   0x54d9
31038 +       .uleb128 0x1a
31039 +       .long   0x563f
31040 +       .long   .LASF1058
31041 +       .value  0x19c
31042 +       .byte   0x5d
31043 +       .byte   0x25
31044 +       .uleb128 0xa
31045 +       .long   .LASF414
31046 +       .byte   0x5d
31047 +       .byte   0x35
31048 +       .long   0x7f2
31049 +       .byte   0x2
31050 +       .byte   0x23
31051 +       .uleb128 0x0
31052 +       .uleb128 0xa
31053 +       .long   .LASF594
31054 +       .byte   0x5d
31055 +       .byte   0x36
31056 +       .long   0x4af9
31057 +       .byte   0x2
31058 +       .byte   0x23
31059 +       .uleb128 0x4
31060 +       .uleb128 0xa
31061 +       .long   .LASF1059
31062 +       .byte   0x5d
31063 +       .byte   0x38
31064 +       .long   0x4c43
31065 +       .byte   0x2
31066 +       .byte   0x23
31067 +       .uleb128 0x8
31068 +       .uleb128 0xa
31069 +       .long   .LASF1060
31070 +       .byte   0x5d
31071 +       .byte   0x39
31072 +       .long   0x4c43
31073 +       .byte   0x2
31074 +       .byte   0x23
31075 +       .uleb128 0x5c
31076 +       .uleb128 0xa
31077 +       .long   .LASF1061
31078 +       .byte   0x5d
31079 +       .byte   0x3a
31080 +       .long   0x4c43
31081 +       .byte   0x3
31082 +       .byte   0x23
31083 +       .uleb128 0xb0
31084 +       .uleb128 0xa
31085 +       .long   .LASF1062
31086 +       .byte   0x5d
31087 +       .byte   0x3b
31088 +       .long   0x53c4
31089 +       .byte   0x3
31090 +       .byte   0x23
31091 +       .uleb128 0x104
31092 +       .uleb128 0xa
31093 +       .long   .LASF1063
31094 +       .byte   0x5d
31095 +       .byte   0x3c
31096 +       .long   0x53c4
31097 +       .byte   0x3
31098 +       .byte   0x23
31099 +       .uleb128 0x118
31100 +       .uleb128 0xa
31101 +       .long   .LASF1064
31102 +       .byte   0x5d
31103 +       .byte   0x3e
31104 +       .long   0x2e3c
31105 +       .byte   0x3
31106 +       .byte   0x23
31107 +       .uleb128 0x12c
31108 +       .uleb128 0xa
31109 +       .long   .LASF1065
31110 +       .byte   0x5d
31111 +       .byte   0x40
31112 +       .long   0x5665
31113 +       .byte   0x3
31114 +       .byte   0x23
31115 +       .uleb128 0x140
31116 +       .uleb128 0xa
31117 +       .long   .LASF1066
31118 +       .byte   0x5d
31119 +       .byte   0x41
31120 +       .long   0x56a5
31121 +       .byte   0x3
31122 +       .byte   0x23
31123 +       .uleb128 0x144
31124 +       .uleb128 0xa
31125 +       .long   .LASF1067
31126 +       .byte   0x5d
31127 +       .byte   0x42
31128 +       .long   0x56e2
31129 +       .byte   0x3
31130 +       .byte   0x23
31131 +       .uleb128 0x148
31132 +       .uleb128 0xa
31133 +       .long   .LASF1068
31134 +       .byte   0x5d
31135 +       .byte   0x43
31136 +       .long   0x5487
31137 +       .byte   0x3
31138 +       .byte   0x23
31139 +       .uleb128 0x14c
31140 +       .uleb128 0xa
31141 +       .long   .LASF1069
31142 +       .byte   0x5d
31143 +       .byte   0x44
31144 +       .long   0x5487
31145 +       .byte   0x3
31146 +       .byte   0x23
31147 +       .uleb128 0x160
31148 +       .uleb128 0xa
31149 +       .long   .LASF1070
31150 +       .byte   0x5d
31151 +       .byte   0x46
31152 +       .long   0x57ce
31153 +       .byte   0x3
31154 +       .byte   0x23
31155 +       .uleb128 0x174
31156 +       .uleb128 0xa
31157 +       .long   .LASF988
31158 +       .byte   0x5d
31159 +       .byte   0x48
31160 +       .long   0x57f8
31161 +       .byte   0x3
31162 +       .byte   0x23
31163 +       .uleb128 0x178
31164 +       .uleb128 0xa
31165 +       .long   .LASF415
31166 +       .byte   0x5d
31167 +       .byte   0x49
31168 +       .long   0x580e
31169 +       .byte   0x3
31170 +       .byte   0x23
31171 +       .uleb128 0x17c
31172 +       .uleb128 0xa
31173 +       .long   .LASF1071
31174 +       .byte   0x5d
31175 +       .byte   0x4a
31176 +       .long   0x580e
31177 +       .byte   0x3
31178 +       .byte   0x23
31179 +       .uleb128 0x180
31180 +       .uleb128 0xa
31181 +       .long   .LASF1030
31182 +       .byte   0x5d
31183 +       .byte   0x4b
31184 +       .long   0x5820
31185 +       .byte   0x3
31186 +       .byte   0x23
31187 +       .uleb128 0x184
31188 +       .uleb128 0xa
31189 +       .long   .LASF1072
31190 +       .byte   0x5d
31191 +       .byte   0x4d
31192 +       .long   0x583b
31193 +       .byte   0x3
31194 +       .byte   0x23
31195 +       .uleb128 0x188
31196 +       .uleb128 0xa
31197 +       .long   .LASF1073
31198 +       .byte   0x5d
31199 +       .byte   0x4e
31200 +       .long   0x583b
31201 +       .byte   0x3
31202 +       .byte   0x23
31203 +       .uleb128 0x18c
31204 +       .uleb128 0xa
31205 +       .long   .LASF1074
31206 +       .byte   0x5d
31207 +       .byte   0x4f
31208 +       .long   0x580e
31209 +       .byte   0x3
31210 +       .byte   0x23
31211 +       .uleb128 0x190
31212 +       .uleb128 0xa
31213 +       .long   .LASF1075
31214 +       .byte   0x5d
31215 +       .byte   0x50
31216 +       .long   0x580e
31217 +       .byte   0x3
31218 +       .byte   0x23
31219 +       .uleb128 0x194
31220 +       .uleb128 0x2f
31221 +       .long   .LASF1076
31222 +       .byte   0x5d
31223 +       .byte   0x52
31224 +       .long   0x77
31225 +       .byte   0x4
31226 +       .byte   0x1
31227 +       .byte   0x1f
31228 +       .byte   0x3
31229 +       .byte   0x23
31230 +       .uleb128 0x198
31231 +       .byte   0x0
31232 +       .uleb128 0x4
31233 +       .byte   0x4
31234 +       .long   0x54be
31235 +       .uleb128 0x11
31236 +       .long   0x565f
31237 +       .byte   0x1
31238 +       .long   0x209
31239 +       .uleb128 0x6
31240 +       .long   0x54d3
31241 +       .uleb128 0x6
31242 +       .long   0x7f2
31243 +       .uleb128 0x6
31244 +       .long   0x1fe
31245 +       .byte   0x0
31246 +       .uleb128 0x4
31247 +       .byte   0x4
31248 +       .long   0x5645
31249 +       .uleb128 0x4
31250 +       .byte   0x4
31251 +       .long   0x5487
31252 +       .uleb128 0xf
31253 +       .long   0x56a5
31254 +       .long   .LASF1077
31255 +       .byte   0x14
31256 +       .byte   0x5d
31257 +       .byte   0x41
31258 +       .uleb128 0x16
31259 +       .long   .LASF994
31260 +       .byte   0x5d
31261 +       .value  0x160
31262 +       .long   0x479a
31263 +       .byte   0x2
31264 +       .byte   0x23
31265 +       .uleb128 0x0
31266 +       .uleb128 0x16
31267 +       .long   .LASF980
31268 +       .byte   0x5d
31269 +       .value  0x162
31270 +       .long   0x5c40
31271 +       .byte   0x2
31272 +       .byte   0x23
31273 +       .uleb128 0xc
31274 +       .uleb128 0x16
31275 +       .long   .LASF981
31276 +       .byte   0x5d
31277 +       .value  0x164
31278 +       .long   0x5c65
31279 +       .byte   0x2
31280 +       .byte   0x23
31281 +       .uleb128 0x10
31282 +       .byte   0x0
31283 +       .uleb128 0x4
31284 +       .byte   0x4
31285 +       .long   0x566b
31286 +       .uleb128 0xf
31287 +       .long   0x56e2
31288 +       .long   .LASF1078
31289 +       .byte   0x14
31290 +       .byte   0x5d
31291 +       .byte   0x42
31292 +       .uleb128 0xa
31293 +       .long   .LASF994
31294 +       .byte   0x5d
31295 +       .byte   0x9b
31296 +       .long   0x479a
31297 +       .byte   0x2
31298 +       .byte   0x23
31299 +       .uleb128 0x0
31300 +       .uleb128 0xa
31301 +       .long   .LASF980
31302 +       .byte   0x5d
31303 +       .byte   0x9c
31304 +       .long   0x585c
31305 +       .byte   0x2
31306 +       .byte   0x23
31307 +       .uleb128 0xc
31308 +       .uleb128 0xa
31309 +       .long   .LASF981
31310 +       .byte   0x5d
31311 +       .byte   0x9d
31312 +       .long   0x587c
31313 +       .byte   0x2
31314 +       .byte   0x23
31315 +       .uleb128 0x10
31316 +       .byte   0x0
31317 +       .uleb128 0x4
31318 +       .byte   0x4
31319 +       .long   0x56ab
31320 +       .uleb128 0x11
31321 +       .long   0x56fd
31322 +       .byte   0x1
31323 +       .long   0x21
31324 +       .uleb128 0x6
31325 +       .long   0x1e7d
31326 +       .uleb128 0x6
31327 +       .long   0x56fd
31328 +       .byte   0x0
31329 +       .uleb128 0x4
31330 +       .byte   0x4
31331 +       .long   0x5703
31332 +       .uleb128 0x15
31333 +       .long   0x57ce
31334 +       .long   .LASF1079
31335 +       .byte   0x9c
31336 +       .byte   0x29
31337 +       .value  0x23e
31338 +       .uleb128 0xa
31339 +       .long   .LASF414
31340 +       .byte   0x5d
31341 +       .byte   0x7d
31342 +       .long   0x7f2
31343 +       .byte   0x2
31344 +       .byte   0x23
31345 +       .uleb128 0x0
31346 +       .uleb128 0xb
31347 +       .string "bus"
31348 +       .byte   0x5d
31349 +       .byte   0x7e
31350 +       .long   0x54d3
31351 +       .byte   0x2
31352 +       .byte   0x23
31353 +       .uleb128 0x4
31354 +       .uleb128 0xa
31355 +       .long   .LASF382
31356 +       .byte   0x5d
31357 +       .byte   0x80
31358 +       .long   0x4b3a
31359 +       .byte   0x2
31360 +       .byte   0x23
31361 +       .uleb128 0x8
31362 +       .uleb128 0xa
31363 +       .long   .LASF1062
31364 +       .byte   0x5d
31365 +       .byte   0x81
31366 +       .long   0x53c4
31367 +       .byte   0x2
31368 +       .byte   0x23
31369 +       .uleb128 0x48
31370 +       .uleb128 0xa
31371 +       .long   .LASF381
31372 +       .byte   0x5d
31373 +       .byte   0x82
31374 +       .long   0x541b
31375 +       .byte   0x2
31376 +       .byte   0x23
31377 +       .uleb128 0x5c
31378 +       .uleb128 0xa
31379 +       .long   .LASF594
31380 +       .byte   0x5d
31381 +       .byte   0x84
31382 +       .long   0x4af9
31383 +       .byte   0x2
31384 +       .byte   0x23
31385 +       .uleb128 0x7c
31386 +       .uleb128 0xa
31387 +       .long   .LASF1080
31388 +       .byte   0x5d
31389 +       .byte   0x85
31390 +       .long   0x7f2
31391 +       .byte   0x3
31392 +       .byte   0x23
31393 +       .uleb128 0x80
31394 +       .uleb128 0xa
31395 +       .long   .LASF926
31396 +       .byte   0x5d
31397 +       .byte   0x86
31398 +       .long   0x5841
31399 +       .byte   0x3
31400 +       .byte   0x23
31401 +       .uleb128 0x84
31402 +       .uleb128 0xa
31403 +       .long   .LASF415
31404 +       .byte   0x5d
31405 +       .byte   0x88
31406 +       .long   0x580e
31407 +       .byte   0x3
31408 +       .byte   0x23
31409 +       .uleb128 0x88
31410 +       .uleb128 0xa
31411 +       .long   .LASF1071
31412 +       .byte   0x5d
31413 +       .byte   0x89
31414 +       .long   0x580e
31415 +       .byte   0x3
31416 +       .byte   0x23
31417 +       .uleb128 0x8c
31418 +       .uleb128 0xa
31419 +       .long   .LASF1030
31420 +       .byte   0x5d
31421 +       .byte   0x8a
31422 +       .long   0x5820
31423 +       .byte   0x3
31424 +       .byte   0x23
31425 +       .uleb128 0x90
31426 +       .uleb128 0xa
31427 +       .long   .LASF1072
31428 +       .byte   0x5d
31429 +       .byte   0x8b
31430 +       .long   0x583b
31431 +       .byte   0x3
31432 +       .byte   0x23
31433 +       .uleb128 0x94
31434 +       .uleb128 0xa
31435 +       .long   .LASF1075
31436 +       .byte   0x5d
31437 +       .byte   0x8c
31438 +       .long   0x580e
31439 +       .byte   0x3
31440 +       .byte   0x23
31441 +       .uleb128 0x98
31442 +       .byte   0x0
31443 +       .uleb128 0x4
31444 +       .byte   0x4
31445 +       .long   0x56e8
31446 +       .uleb128 0x11
31447 +       .long   0x57f8
31448 +       .byte   0x1
31449 +       .long   0x21
31450 +       .uleb128 0x6
31451 +       .long   0x1e7d
31452 +       .uleb128 0x6
31453 +       .long   0x4d87
31454 +       .uleb128 0x6
31455 +       .long   0x21
31456 +       .uleb128 0x6
31457 +       .long   0xb5
31458 +       .uleb128 0x6
31459 +       .long   0x21
31460 +       .byte   0x0
31461 +       .uleb128 0x4
31462 +       .byte   0x4
31463 +       .long   0x57d4
31464 +       .uleb128 0x11
31465 +       .long   0x580e
31466 +       .byte   0x1
31467 +       .long   0x21
31468 +       .uleb128 0x6
31469 +       .long   0x1e7d
31470 +       .byte   0x0
31471 +       .uleb128 0x4
31472 +       .byte   0x4
31473 +       .long   0x57fe
31474 +       .uleb128 0x5
31475 +       .long   0x5820
31476 +       .byte   0x1
31477 +       .uleb128 0x6
31478 +       .long   0x1e7d
31479 +       .byte   0x0
31480 +       .uleb128 0x4
31481 +       .byte   0x4
31482 +       .long   0x5814
31483 +       .uleb128 0x11
31484 +       .long   0x583b
31485 +       .byte   0x1
31486 +       .long   0x21
31487 +       .uleb128 0x6
31488 +       .long   0x1e7d
31489 +       .uleb128 0x6
31490 +       .long   0x1c07
31491 +       .byte   0x0
31492 +       .uleb128 0x4
31493 +       .byte   0x4
31494 +       .long   0x5826
31495 +       .uleb128 0x4
31496 +       .byte   0x4
31497 +       .long   0x4ee5
31498 +       .uleb128 0x11
31499 +       .long   0x585c
31500 +       .byte   0x1
31501 +       .long   0x209
31502 +       .uleb128 0x6
31503 +       .long   0x56fd
31504 +       .uleb128 0x6
31505 +       .long   0xb5
31506 +       .byte   0x0
31507 +       .uleb128 0x4
31508 +       .byte   0x4
31509 +       .long   0x5847
31510 +       .uleb128 0x11
31511 +       .long   0x587c
31512 +       .byte   0x1
31513 +       .long   0x209
31514 +       .uleb128 0x6
31515 +       .long   0x56fd
31516 +       .uleb128 0x6
31517 +       .long   0x7f2
31518 +       .uleb128 0x6
31519 +       .long   0x1fe
31520 +       .byte   0x0
31521 +       .uleb128 0x4
31522 +       .byte   0x4
31523 +       .long   0x5862
31524 +       .uleb128 0x1a
31525 +       .long   0x5997
31526 +       .long   .LASF401
31527 +       .value  0x104
31528 +       .byte   0x5d
31529 +       .byte   0x23
31530 +       .uleb128 0xa
31531 +       .long   .LASF414
31532 +       .byte   0x5d
31533 +       .byte   0xb2
31534 +       .long   0x7f2
31535 +       .byte   0x2
31536 +       .byte   0x23
31537 +       .uleb128 0x0
31538 +       .uleb128 0xa
31539 +       .long   .LASF594
31540 +       .byte   0x5d
31541 +       .byte   0xb3
31542 +       .long   0x4af9
31543 +       .byte   0x2
31544 +       .byte   0x23
31545 +       .uleb128 0x4
31546 +       .uleb128 0xa
31547 +       .long   .LASF1059
31548 +       .byte   0x5d
31549 +       .byte   0xb5
31550 +       .long   0x4c43
31551 +       .byte   0x2
31552 +       .byte   0x23
31553 +       .uleb128 0x8
31554 +       .uleb128 0xa
31555 +       .long   .LASF206
31556 +       .byte   0x5d
31557 +       .byte   0xb6
31558 +       .long   0x17bc
31559 +       .byte   0x2
31560 +       .byte   0x23
31561 +       .uleb128 0x5c
31562 +       .uleb128 0xa
31563 +       .long   .LASF1061
31564 +       .byte   0x5d
31565 +       .byte   0xb7
31566 +       .long   0x17bc
31567 +       .byte   0x2
31568 +       .byte   0x23
31569 +       .uleb128 0x64
31570 +       .uleb128 0xa
31571 +       .long   .LASF1081
31572 +       .byte   0x5d
31573 +       .byte   0xb8
31574 +       .long   0x17bc
31575 +       .byte   0x2
31576 +       .byte   0x23
31577 +       .uleb128 0x6c
31578 +       .uleb128 0xa
31579 +       .long   .LASF1082
31580 +       .byte   0x5d
31581 +       .byte   0xb9
31582 +       .long   0x4c43
31583 +       .byte   0x2
31584 +       .byte   0x23
31585 +       .uleb128 0x74
31586 +       .uleb128 0xb
31587 +       .string "sem"
31588 +       .byte   0x5d
31589 +       .byte   0xba
31590 +       .long   0x1931
31591 +       .byte   0x3
31592 +       .byte   0x23
31593 +       .uleb128 0xc8
31594 +       .uleb128 0xa
31595 +       .long   .LASF1083
31596 +       .byte   0x5d
31597 +       .byte   0xbc
31598 +       .long   0x59ce
31599 +       .byte   0x3
31600 +       .byte   0x23
31601 +       .uleb128 0xdc
31602 +       .uleb128 0xa
31603 +       .long   .LASF1084
31604 +       .byte   0x5d
31605 +       .byte   0xbd
31606 +       .long   0x5a0b
31607 +       .byte   0x3
31608 +       .byte   0x23
31609 +       .uleb128 0xe0
31610 +       .uleb128 0xa
31611 +       .long   .LASF1066
31612 +       .byte   0x5d
31613 +       .byte   0xbe
31614 +       .long   0x56a5
31615 +       .byte   0x3
31616 +       .byte   0x23
31617 +       .uleb128 0xe4
31618 +       .uleb128 0xa
31619 +       .long   .LASF988
31620 +       .byte   0x5d
31621 +       .byte   0xc1
31622 +       .long   0x5b0c
31623 +       .byte   0x3
31624 +       .byte   0x23
31625 +       .uleb128 0xe8
31626 +       .uleb128 0xa
31627 +       .long   .LASF1085
31628 +       .byte   0x5d
31629 +       .byte   0xc3
31630 +       .long   0x57f8
31631 +       .byte   0x3
31632 +       .byte   0x23
31633 +       .uleb128 0xec
31634 +       .uleb128 0xa
31635 +       .long   .LASF404
31636 +       .byte   0x5d
31637 +       .byte   0xc5
31638 +       .long   0x5b1e
31639 +       .byte   0x3
31640 +       .byte   0x23
31641 +       .uleb128 0xf0
31642 +       .uleb128 0xa
31643 +       .long   .LASF1086
31644 +       .byte   0x5d
31645 +       .byte   0xc6
31646 +       .long   0x5b36
31647 +       .byte   0x3
31648 +       .byte   0x23
31649 +       .uleb128 0xf4
31650 +       .uleb128 0xa
31651 +       .long   .LASF1087
31652 +       .byte   0x5d
31653 +       .byte   0xc7
31654 +       .long   0x5820
31655 +       .byte   0x3
31656 +       .byte   0x23
31657 +       .uleb128 0xf8
31658 +       .uleb128 0xa
31659 +       .long   .LASF1072
31660 +       .byte   0x5d
31661 +       .byte   0xc9
31662 +       .long   0x583b
31663 +       .byte   0x3
31664 +       .byte   0x23
31665 +       .uleb128 0xfc
31666 +       .uleb128 0xa
31667 +       .long   .LASF1075
31668 +       .byte   0x5d
31669 +       .byte   0xca
31670 +       .long   0x580e
31671 +       .byte   0x3
31672 +       .byte   0x23
31673 +       .uleb128 0x100
31674 +       .byte   0x0
31675 +       .uleb128 0xf
31676 +       .long   0x59ce
31677 +       .long   .LASF1088
31678 +       .byte   0x14
31679 +       .byte   0x5d
31680 +       .byte   0xbc
31681 +       .uleb128 0xa
31682 +       .long   .LASF994
31683 +       .byte   0x5d
31684 +       .byte   0xd2
31685 +       .long   0x479a
31686 +       .byte   0x2
31687 +       .byte   0x23
31688 +       .uleb128 0x0
31689 +       .uleb128 0xa
31690 +       .long   .LASF980
31691 +       .byte   0x5d
31692 +       .byte   0xd3
31693 +       .long   0x5b51
31694 +       .byte   0x2
31695 +       .byte   0x23
31696 +       .uleb128 0xc
31697 +       .uleb128 0xa
31698 +       .long   .LASF981
31699 +       .byte   0x5d
31700 +       .byte   0xd4
31701 +       .long   0x5b71
31702 +       .byte   0x2
31703 +       .byte   0x23
31704 +       .uleb128 0x10
31705 +       .byte   0x0
31706 +       .uleb128 0x4
31707 +       .byte   0x4
31708 +       .long   0x5997
31709 +       .uleb128 0xf
31710 +       .long   0x5a0b
31711 +       .long   .LASF1089
31712 +       .byte   0x14
31713 +       .byte   0x5d
31714 +       .byte   0xbd
31715 +       .uleb128 0xa
31716 +       .long   .LASF994
31717 +       .byte   0x5d
31718 +       .byte   0xdf
31719 +       .long   0x479a
31720 +       .byte   0x2
31721 +       .byte   0x23
31722 +       .uleb128 0x0
31723 +       .uleb128 0xa
31724 +       .long   .LASF980
31725 +       .byte   0x5d
31726 +       .byte   0xe0
31727 +       .long   0x5b8c
31728 +       .byte   0x2
31729 +       .byte   0x23
31730 +       .uleb128 0xc
31731 +       .uleb128 0xa
31732 +       .long   .LASF981
31733 +       .byte   0x5d
31734 +       .byte   0xe1
31735 +       .long   0x5bac
31736 +       .byte   0x2
31737 +       .byte   0x23
31738 +       .uleb128 0x10
31739 +       .byte   0x0
31740 +       .uleb128 0x4
31741 +       .byte   0x4
31742 +       .long   0x59d4
31743 +       .uleb128 0x11
31744 +       .long   0x5a35
31745 +       .byte   0x1
31746 +       .long   0x21
31747 +       .uleb128 0x6
31748 +       .long   0x5a35
31749 +       .uleb128 0x6
31750 +       .long   0x4d87
31751 +       .uleb128 0x6
31752 +       .long   0x21
31753 +       .uleb128 0x6
31754 +       .long   0xb5
31755 +       .uleb128 0x6
31756 +       .long   0x21
31757 +       .byte   0x0
31758 +       .uleb128 0x4
31759 +       .byte   0x4
31760 +       .long   0x5a3b
31761 +       .uleb128 0xf
31762 +       .long   0x5b0c
31763 +       .long   .LASF1090
31764 +       .byte   0x94
31765 +       .byte   0x5d
31766 +       .byte   0x24
31767 +       .uleb128 0x16
31768 +       .long   .LASF400
31769 +       .byte   0x5d
31770 +       .value  0x105
31771 +       .long   0x17bc
31772 +       .byte   0x2
31773 +       .byte   0x23
31774 +       .uleb128 0x0
31775 +       .uleb128 0x16
31776 +       .long   .LASF382
31777 +       .byte   0x5d
31778 +       .value  0x107
31779 +       .long   0x4b3a
31780 +       .byte   0x2
31781 +       .byte   0x23
31782 +       .uleb128 0x8
31783 +       .uleb128 0x16
31784 +       .long   .LASF401
31785 +       .byte   0x5d
31786 +       .value  0x108
31787 +       .long   0x5b30
31788 +       .byte   0x2
31789 +       .byte   0x23
31790 +       .uleb128 0x48
31791 +       .uleb128 0x16
31792 +       .long   .LASF402
31793 +       .byte   0x5d
31794 +       .value  0x109
31795 +       .long   0x19f
31796 +       .byte   0x2
31797 +       .byte   0x23
31798 +       .uleb128 0x4c
31799 +       .uleb128 0x16
31800 +       .long   .LASF388
31801 +       .byte   0x5d
31802 +       .value  0x10a
31803 +       .long   0x5a0b
31804 +       .byte   0x2
31805 +       .byte   0x23
31806 +       .uleb128 0x50
31807 +       .uleb128 0x16
31808 +       .long   .LASF387
31809 +       .byte   0x5d
31810 +       .value  0x10b
31811 +       .long   0x59d4
31812 +       .byte   0x2
31813 +       .byte   0x23
31814 +       .uleb128 0x54
31815 +       .uleb128 0x17
31816 +       .string "dev"
31817 +       .byte   0x5d
31818 +       .value  0x10c
31819 +       .long   0x1e7d
31820 +       .byte   0x2
31821 +       .byte   0x23
31822 +       .uleb128 0x68
31823 +       .uleb128 0x16
31824 +       .long   .LASF1091
31825 +       .byte   0x5d
31826 +       .value  0x10d
31827 +       .long   0x160b
31828 +       .byte   0x2
31829 +       .byte   0x23
31830 +       .uleb128 0x6c
31831 +       .uleb128 0x16
31832 +       .long   .LASF205
31833 +       .byte   0x5d
31834 +       .value  0x10e
31835 +       .long   0x5a35
31836 +       .byte   0x2
31837 +       .byte   0x23
31838 +       .uleb128 0x70
31839 +       .uleb128 0x16
31840 +       .long   .LASF403
31841 +       .byte   0x5d
31842 +       .value  0x10f
31843 +       .long   0x5bb2
31844 +       .byte   0x2
31845 +       .byte   0x23
31846 +       .uleb128 0x74
31847 +       .uleb128 0x16
31848 +       .long   .LASF404
31849 +       .byte   0x5d
31850 +       .value  0x111
31851 +       .long   0x5b1e
31852 +       .byte   0x2
31853 +       .byte   0x23
31854 +       .uleb128 0x78
31855 +       .uleb128 0x16
31856 +       .long   .LASF988
31857 +       .byte   0x5d
31858 +       .value  0x113
31859 +       .long   0x5b0c
31860 +       .byte   0x2
31861 +       .byte   0x23
31862 +       .uleb128 0x7c
31863 +       .uleb128 0x16
31864 +       .long   .LASF1092
31865 +       .byte   0x5d
31866 +       .value  0x114
31867 +       .long   0x4c33
31868 +       .byte   0x3
31869 +       .byte   0x23
31870 +       .uleb128 0x80
31871 +       .byte   0x0
31872 +       .uleb128 0x4
31873 +       .byte   0x4
31874 +       .long   0x5a11
31875 +       .uleb128 0x5
31876 +       .long   0x5b1e
31877 +       .byte   0x1
31878 +       .uleb128 0x6
31879 +       .long   0x5a35
31880 +       .byte   0x0
31881 +       .uleb128 0x4
31882 +       .byte   0x4
31883 +       .long   0x5b12
31884 +       .uleb128 0x5
31885 +       .long   0x5b30
31886 +       .byte   0x1
31887 +       .uleb128 0x6
31888 +       .long   0x5b30
31889 +       .byte   0x0
31890 +       .uleb128 0x4
31891 +       .byte   0x4
31892 +       .long   0x5882
31893 +       .uleb128 0x4
31894 +       .byte   0x4
31895 +       .long   0x5b24
31896 +       .uleb128 0x11
31897 +       .long   0x5b51
31898 +       .byte   0x1
31899 +       .long   0x209
31900 +       .uleb128 0x6
31901 +       .long   0x5b30
31902 +       .uleb128 0x6
31903 +       .long   0xb5
31904 +       .byte   0x0
31905 +       .uleb128 0x4
31906 +       .byte   0x4
31907 +       .long   0x5b3c
31908 +       .uleb128 0x11
31909 +       .long   0x5b71
31910 +       .byte   0x1
31911 +       .long   0x209
31912 +       .uleb128 0x6
31913 +       .long   0x5b30
31914 +       .uleb128 0x6
31915 +       .long   0x7f2
31916 +       .uleb128 0x6
31917 +       .long   0x1fe
31918 +       .byte   0x0
31919 +       .uleb128 0x4
31920 +       .byte   0x4
31921 +       .long   0x5b57
31922 +       .uleb128 0x11
31923 +       .long   0x5b8c
31924 +       .byte   0x1
31925 +       .long   0x209
31926 +       .uleb128 0x6
31927 +       .long   0x5a35
31928 +       .uleb128 0x6
31929 +       .long   0xb5
31930 +       .byte   0x0
31931 +       .uleb128 0x4
31932 +       .byte   0x4
31933 +       .long   0x5b77
31934 +       .uleb128 0x11
31935 +       .long   0x5bac
31936 +       .byte   0x1
31937 +       .long   0x209
31938 +       .uleb128 0x6
31939 +       .long   0x5a35
31940 +       .uleb128 0x6
31941 +       .long   0x7f2
31942 +       .uleb128 0x6
31943 +       .long   0x1fe
31944 +       .byte   0x0
31945 +       .uleb128 0x4
31946 +       .byte   0x4
31947 +       .long   0x5b92
31948 +       .uleb128 0x4
31949 +       .byte   0x4
31950 +       .long   0x5bb8
31951 +       .uleb128 0x4
31952 +       .byte   0x4
31953 +       .long   0x4aff
31954 +       .uleb128 0x15
31955 +       .long   0x5c26
31956 +       .long   .LASF1093
31957 +       .byte   0x18
31958 +       .byte   0x5d
31959 +       .value  0x154
31960 +       .uleb128 0x16
31961 +       .long   .LASF414
31962 +       .byte   0x5d
31963 +       .value  0x155
31964 +       .long   0x7f2
31965 +       .byte   0x2
31966 +       .byte   0x23
31967 +       .uleb128 0x0
31968 +       .uleb128 0x16
31969 +       .long   .LASF403
31970 +       .byte   0x5d
31971 +       .value  0x156
31972 +       .long   0x5bb2
31973 +       .byte   0x2
31974 +       .byte   0x23
31975 +       .uleb128 0x4
31976 +       .uleb128 0x16
31977 +       .long   .LASF988
31978 +       .byte   0x5d
31979 +       .value  0x158
31980 +       .long   0x57f8
31981 +       .byte   0x2
31982 +       .byte   0x23
31983 +       .uleb128 0x8
31984 +       .uleb128 0x16
31985 +       .long   .LASF404
31986 +       .byte   0x5d
31987 +       .value  0x159
31988 +       .long   0x5820
31989 +       .byte   0x2
31990 +       .byte   0x23
31991 +       .uleb128 0xc
31992 +       .uleb128 0x16
31993 +       .long   .LASF1072
31994 +       .byte   0x5d
31995 +       .value  0x15a
31996 +       .long   0x583b
31997 +       .byte   0x2
31998 +       .byte   0x23
31999 +       .uleb128 0x10
32000 +       .uleb128 0x16
32001 +       .long   .LASF1075
32002 +       .byte   0x5d
32003 +       .value  0x15b
32004 +       .long   0x580e
32005 +       .byte   0x2
32006 +       .byte   0x23
32007 +       .uleb128 0x14
32008 +       .byte   0x0
32009 +       .uleb128 0x11
32010 +       .long   0x5c40
32011 +       .byte   0x1
32012 +       .long   0x209
32013 +       .uleb128 0x6
32014 +       .long   0x1e7d
32015 +       .uleb128 0x6
32016 +       .long   0x56a5
32017 +       .uleb128 0x6
32018 +       .long   0xb5
32019 +       .byte   0x0
32020 +       .uleb128 0x4
32021 +       .byte   0x4
32022 +       .long   0x5c26
32023 +       .uleb128 0x11
32024 +       .long   0x5c65
32025 +       .byte   0x1
32026 +       .long   0x209
32027 +       .uleb128 0x6
32028 +       .long   0x1e7d
32029 +       .uleb128 0x6
32030 +       .long   0x56a5
32031 +       .uleb128 0x6
32032 +       .long   0x7f2
32033 +       .uleb128 0x6
32034 +       .long   0x1fe
32035 +       .byte   0x0
32036 +       .uleb128 0x4
32037 +       .byte   0x4
32038 +       .long   0x5c46
32039 +       .uleb128 0x4
32040 +       .byte   0x4
32041 +       .long   0x5bbe
32042 +       .uleb128 0x4
32043 +       .byte   0x4
32044 +       .long   0x189
32045 +       .uleb128 0x21
32046 +       .long   .LASF1094
32047 +       .byte   0x1
32048 +       .uleb128 0x4
32049 +       .byte   0x4
32050 +       .long   0x5c77
32051 +       .uleb128 0x7
32052 +       .long   .LASF1095
32053 +       .byte   0x56
32054 +       .byte   0x52
32055 +       .long   0x5c8e
32056 +       .uleb128 0x4
32057 +       .byte   0x4
32058 +       .long   0x5c94
32059 +       .uleb128 0x11
32060 +       .long   0x5ca9
32061 +       .byte   0x1
32062 +       .long   0x504b
32063 +       .uleb128 0x6
32064 +       .long   0x21
32065 +       .uleb128 0x6
32066 +       .long   0x160b
32067 +       .byte   0x0
32068 +       .uleb128 0x15
32069 +       .long   0x5cd5
32070 +       .long   .LASF1096
32071 +       .byte   0x8
32072 +       .byte   0x56
32073 +       .value  0x117
32074 +       .uleb128 0x16
32075 +       .long   .LASF840
32076 +       .byte   0x56
32077 +       .value  0x118
32078 +       .long   0x5ce7
32079 +       .byte   0x2
32080 +       .byte   0x23
32081 +       .uleb128 0x0
32082 +       .uleb128 0x16
32083 +       .long   .LASF734
32084 +       .byte   0x56
32085 +       .value  0x119
32086 +       .long   0x160b
32087 +       .byte   0x2
32088 +       .byte   0x23
32089 +       .uleb128 0x4
32090 +       .byte   0x0
32091 +       .uleb128 0x5
32092 +       .long   0x5ce1
32093 +       .byte   0x1
32094 +       .uleb128 0x6
32095 +       .long   0x5ce1
32096 +       .byte   0x0
32097 +       .uleb128 0x4
32098 +       .byte   0x4
32099 +       .long   0x5ca9
32100 +       .uleb128 0x4
32101 +       .byte   0x4
32102 +       .long   0x5cd5
32103 +       .uleb128 0xf
32104 +       .long   0x5d24
32105 +       .long   .LASF1097
32106 +       .byte   0xc
32107 +       .byte   0x17
32108 +       .byte   0xe
32109 +       .uleb128 0xa
32110 +       .long   .LASF1098
32111 +       .byte   0x17
32112 +       .byte   0xf
32113 +       .long   0x5d77
32114 +       .byte   0x2
32115 +       .byte   0x23
32116 +       .uleb128 0x0
32117 +       .uleb128 0xa
32118 +       .long   .LASF1099
32119 +       .byte   0x17
32120 +       .byte   0x10
32121 +       .long   0x5d77
32122 +       .byte   0x2
32123 +       .byte   0x23
32124 +       .uleb128 0x4
32125 +       .uleb128 0xa
32126 +       .long   .LASF205
32127 +       .byte   0x17
32128 +       .byte   0x11
32129 +       .long   0x5d77
32130 +       .byte   0x2
32131 +       .byte   0x23
32132 +       .uleb128 0x8
32133 +       .byte   0x0
32134 +       .uleb128 0xf
32135 +       .long   0x5d77
32136 +       .long   .LASF1100
32137 +       .byte   0x14
32138 +       .byte   0x17
32139 +       .byte   0xf
32140 +       .uleb128 0xa
32141 +       .long   .LASF1098
32142 +       .byte   0x17
32143 +       .byte   0x15
32144 +       .long   0x5d77
32145 +       .byte   0x2
32146 +       .byte   0x23
32147 +       .uleb128 0x0
32148 +       .uleb128 0xa
32149 +       .long   .LASF1099
32150 +       .byte   0x17
32151 +       .byte   0x16
32152 +       .long   0x5d77
32153 +       .byte   0x2
32154 +       .byte   0x23
32155 +       .uleb128 0x4
32156 +       .uleb128 0xa
32157 +       .long   .LASF205
32158 +       .byte   0x17
32159 +       .byte   0x17
32160 +       .long   0x5d77
32161 +       .byte   0x2
32162 +       .byte   0x23
32163 +       .uleb128 0x8
32164 +       .uleb128 0xa
32165 +       .long   .LASF1045
32166 +       .byte   0x17
32167 +       .byte   0x18
32168 +       .long   0x2f
32169 +       .byte   0x2
32170 +       .byte   0x23
32171 +       .uleb128 0xc
32172 +       .uleb128 0xa
32173 +       .long   .LASF1101
32174 +       .byte   0x17
32175 +       .byte   0x19
32176 +       .long   0x2f
32177 +       .byte   0x2
32178 +       .byte   0x23
32179 +       .uleb128 0x10
32180 +       .byte   0x0
32181 +       .uleb128 0x4
32182 +       .byte   0x4
32183 +       .long   0x5d24
32184 +       .uleb128 0xf
32185 +       .long   0x5db4
32186 +       .long   .LASF1102
32187 +       .byte   0x8
32188 +       .byte   0x17
32189 +       .byte   0x1c
32190 +       .uleb128 0xa
32191 +       .long   .LASF1100
32192 +       .byte   0x17
32193 +       .byte   0x1d
32194 +       .long   0x5d77
32195 +       .byte   0x2
32196 +       .byte   0x23
32197 +       .uleb128 0x0
32198 +       .uleb128 0xa
32199 +       .long   .LASF1103
32200 +       .byte   0x17
32201 +       .byte   0x1e
32202 +       .long   0x53
32203 +       .byte   0x2
32204 +       .byte   0x23
32205 +       .uleb128 0x4
32206 +       .uleb128 0xb
32207 +       .string "raw"
32208 +       .byte   0x17
32209 +       .byte   0x1f
32210 +       .long   0x53
32211 +       .byte   0x2
32212 +       .byte   0x23
32213 +       .uleb128 0x6
32214 +       .byte   0x0
32215 +       .uleb128 0xf
32216 +       .long   0x5deb
32217 +       .long   .LASF1104
32218 +       .byte   0xc
32219 +       .byte   0x24
32220 +       .byte   0x21
32221 +       .uleb128 0xa
32222 +       .long   .LASF1105
32223 +       .byte   0x24
32224 +       .byte   0x22
32225 +       .long   0x77
32226 +       .byte   0x2
32227 +       .byte   0x23
32228 +       .uleb128 0x0
32229 +       .uleb128 0xb
32230 +       .string "len"
32231 +       .byte   0x24
32232 +       .byte   0x23
32233 +       .long   0x77
32234 +       .byte   0x2
32235 +       .byte   0x23
32236 +       .uleb128 0x4
32237 +       .uleb128 0xa
32238 +       .long   .LASF414
32239 +       .byte   0x24
32240 +       .byte   0x24
32241 +       .long   0x5deb
32242 +       .byte   0x2
32243 +       .byte   0x23
32244 +       .uleb128 0x8
32245 +       .byte   0x0
32246 +       .uleb128 0x4
32247 +       .byte   0x4
32248 +       .long   0x5df1
32249 +       .uleb128 0x14
32250 +       .long   0x112
32251 +       .uleb128 0xc
32252 +       .long   0x5e15
32253 +       .byte   0x8
32254 +       .byte   0x24
32255 +       .byte   0x64
32256 +       .uleb128 0xe
32257 +       .long   .LASF1106
32258 +       .byte   0x24
32259 +       .byte   0x65
32260 +       .long   0x17bc
32261 +       .uleb128 0xe
32262 +       .long   .LASF1107
32263 +       .byte   0x24
32264 +       .byte   0x66
32265 +       .long   0x2ea8
32266 +       .byte   0x0
32267 +       .uleb128 0xf
32268 +       .long   0x5e84
32269 +       .long   .LASF1108
32270 +       .byte   0x1c
32271 +       .byte   0x24
32272 +       .byte   0x6b
32273 +       .uleb128 0xa
32274 +       .long   .LASF1109
32275 +       .byte   0x24
32276 +       .byte   0x82
32277 +       .long   0x618b
32278 +       .byte   0x2
32279 +       .byte   0x23
32280 +       .uleb128 0x0
32281 +       .uleb128 0xa
32282 +       .long   .LASF522
32283 +       .byte   0x24
32284 +       .byte   0x83
32285 +       .long   0x61ac
32286 +       .byte   0x2
32287 +       .byte   0x23
32288 +       .uleb128 0x4
32289 +       .uleb128 0xa
32290 +       .long   .LASF1110
32291 +       .byte   0x24
32292 +       .byte   0x84
32293 +       .long   0x61cc
32294 +       .byte   0x2
32295 +       .byte   0x23
32296 +       .uleb128 0x8
32297 +       .uleb128 0xa
32298 +       .long   .LASF1111
32299 +       .byte   0x24
32300 +       .byte   0x85
32301 +       .long   0x61e2
32302 +       .byte   0x2
32303 +       .byte   0x23
32304 +       .uleb128 0xc
32305 +       .uleb128 0xa
32306 +       .long   .LASF1112
32307 +       .byte   0x24
32308 +       .byte   0x86
32309 +       .long   0x61f4
32310 +       .byte   0x2
32311 +       .byte   0x23
32312 +       .uleb128 0x10
32313 +       .uleb128 0xa
32314 +       .long   .LASF1113
32315 +       .byte   0x24
32316 +       .byte   0x87
32317 +       .long   0x620b
32318 +       .byte   0x2
32319 +       .byte   0x23
32320 +       .uleb128 0x14
32321 +       .uleb128 0xa
32322 +       .long   .LASF1114
32323 +       .byte   0x24
32324 +       .byte   0x88
32325 +       .long   0x622b
32326 +       .byte   0x2
32327 +       .byte   0x23
32328 +       .uleb128 0x18
32329 +       .byte   0x0
32330 +       .uleb128 0x4
32331 +       .byte   0x4
32332 +       .long   0x5e15
32333 +       .uleb128 0x1a
32334 +       .long   0x60d1
32335 +       .long   .LASF1115
32336 +       .value  0x18c
32337 +       .byte   0x24
32338 +       .byte   0x6c
32339 +       .uleb128 0x16
32340 +       .long   .LASF1116
32341 +       .byte   0x1a
32342 +       .value  0x38d
32343 +       .long   0x17bc
32344 +       .byte   0x2
32345 +       .byte   0x23
32346 +       .uleb128 0x0
32347 +       .uleb128 0x16
32348 +       .long   .LASF1117
32349 +       .byte   0x1a
32350 +       .value  0x38e
32351 +       .long   0x19f
32352 +       .byte   0x2
32353 +       .byte   0x23
32354 +       .uleb128 0x8
32355 +       .uleb128 0x16
32356 +       .long   .LASF1118
32357 +       .byte   0x1a
32358 +       .value  0x38f
32359 +       .long   0x2f
32360 +       .byte   0x2
32361 +       .byte   0x23
32362 +       .uleb128 0xc
32363 +       .uleb128 0x16
32364 +       .long   .LASF1119
32365 +       .byte   0x1a
32366 +       .value  0x390
32367 +       .long   0x112
32368 +       .byte   0x2
32369 +       .byte   0x23
32370 +       .uleb128 0x10
32371 +       .uleb128 0x16
32372 +       .long   .LASF1120
32373 +       .byte   0x1a
32374 +       .value  0x391
32375 +       .long   0x112
32376 +       .byte   0x2
32377 +       .byte   0x23
32378 +       .uleb128 0x11
32379 +       .uleb128 0x16
32380 +       .long   .LASF1121
32381 +       .byte   0x1a
32382 +       .value  0x392
32383 +       .long   0x162
32384 +       .byte   0x2
32385 +       .byte   0x23
32386 +       .uleb128 0x14
32387 +       .uleb128 0x16
32388 +       .long   .LASF1122
32389 +       .byte   0x1a
32390 +       .value  0x393
32391 +       .long   0x7a6e
32392 +       .byte   0x2
32393 +       .byte   0x23
32394 +       .uleb128 0x1c
32395 +       .uleb128 0x16
32396 +       .long   .LASF1123
32397 +       .byte   0x1a
32398 +       .value  0x394
32399 +       .long   0x7b9f
32400 +       .byte   0x2
32401 +       .byte   0x23
32402 +       .uleb128 0x20
32403 +       .uleb128 0x16
32404 +       .long   .LASF1124
32405 +       .byte   0x1a
32406 +       .value  0x395
32407 +       .long   0x7baa
32408 +       .byte   0x2
32409 +       .byte   0x23
32410 +       .uleb128 0x24
32411 +       .uleb128 0x16
32412 +       .long   .LASF1125
32413 +       .byte   0x1a
32414 +       .value  0x396
32415 +       .long   0x7bb0
32416 +       .byte   0x2
32417 +       .byte   0x23
32418 +       .uleb128 0x28
32419 +       .uleb128 0x16
32420 +       .long   .LASF1126
32421 +       .byte   0x1a
32422 +       .value  0x397
32423 +       .long   0x7c1e
32424 +       .byte   0x2
32425 +       .byte   0x23
32426 +       .uleb128 0x2c
32427 +       .uleb128 0x16
32428 +       .long   .LASF1127
32429 +       .byte   0x1a
32430 +       .value  0x398
32431 +       .long   0x2f
32432 +       .byte   0x2
32433 +       .byte   0x23
32434 +       .uleb128 0x30
32435 +       .uleb128 0x16
32436 +       .long   .LASF1128
32437 +       .byte   0x1a
32438 +       .value  0x399
32439 +       .long   0x2f
32440 +       .byte   0x2
32441 +       .byte   0x23
32442 +       .uleb128 0x34
32443 +       .uleb128 0x16
32444 +       .long   .LASF1129
32445 +       .byte   0x1a
32446 +       .value  0x39a
32447 +       .long   0x28ec
32448 +       .byte   0x2
32449 +       .byte   0x23
32450 +       .uleb128 0x38
32451 +       .uleb128 0x16
32452 +       .long   .LASF1130
32453 +       .byte   0x1a
32454 +       .value  0x39b
32455 +       .long   0x18fa
32456 +       .byte   0x2
32457 +       .byte   0x23
32458 +       .uleb128 0x3c
32459 +       .uleb128 0x16
32460 +       .long   .LASF1131
32461 +       .byte   0x1a
32462 +       .value  0x39c
32463 +       .long   0x2d94
32464 +       .byte   0x2
32465 +       .byte   0x23
32466 +       .uleb128 0x4c
32467 +       .uleb128 0x16
32468 +       .long   .LASF1132
32469 +       .byte   0x1a
32470 +       .value  0x39d
32471 +       .long   0x21
32472 +       .byte   0x2
32473 +       .byte   0x23
32474 +       .uleb128 0x60
32475 +       .uleb128 0x16
32476 +       .long   .LASF1133
32477 +       .byte   0x1a
32478 +       .value  0x39e
32479 +       .long   0x21
32480 +       .byte   0x2
32481 +       .byte   0x23
32482 +       .uleb128 0x64
32483 +       .uleb128 0x16
32484 +       .long   .LASF1134
32485 +       .byte   0x1a
32486 +       .value  0x39f
32487 +       .long   0x21
32488 +       .byte   0x2
32489 +       .byte   0x23
32490 +       .uleb128 0x68
32491 +       .uleb128 0x16
32492 +       .long   .LASF1135
32493 +       .byte   0x1a
32494 +       .value  0x3a0
32495 +       .long   0x16c4
32496 +       .byte   0x2
32497 +       .byte   0x23
32498 +       .uleb128 0x6c
32499 +       .uleb128 0x16
32500 +       .long   .LASF1136
32501 +       .byte   0x1a
32502 +       .value  0x3a4
32503 +       .long   0x7c2a
32504 +       .byte   0x2
32505 +       .byte   0x23
32506 +       .uleb128 0x70
32507 +       .uleb128 0x16
32508 +       .long   .LASF1137
32509 +       .byte   0x1a
32510 +       .value  0x3a6
32511 +       .long   0x17bc
32512 +       .byte   0x2
32513 +       .byte   0x23
32514 +       .uleb128 0x74
32515 +       .uleb128 0x16
32516 +       .long   .LASF1138
32517 +       .byte   0x1a
32518 +       .value  0x3a7
32519 +       .long   0x17bc
32520 +       .byte   0x2
32521 +       .byte   0x23
32522 +       .uleb128 0x7c
32523 +       .uleb128 0x16
32524 +       .long   .LASF1139
32525 +       .byte   0x1a
32526 +       .value  0x3a8
32527 +       .long   0x17bc
32528 +       .byte   0x3
32529 +       .byte   0x23
32530 +       .uleb128 0x84
32531 +       .uleb128 0x16
32532 +       .long   .LASF1140
32533 +       .byte   0x1a
32534 +       .value  0x3a9
32535 +       .long   0x17eb
32536 +       .byte   0x3
32537 +       .byte   0x23
32538 +       .uleb128 0x8c
32539 +       .uleb128 0x16
32540 +       .long   .LASF1141
32541 +       .byte   0x1a
32542 +       .value  0x3aa
32543 +       .long   0x17bc
32544 +       .byte   0x3
32545 +       .byte   0x23
32546 +       .uleb128 0x90
32547 +       .uleb128 0x16
32548 +       .long   .LASF1142
32549 +       .byte   0x1a
32550 +       .value  0x3ac
32551 +       .long   0x71e9
32552 +       .byte   0x3
32553 +       .byte   0x23
32554 +       .uleb128 0x98
32555 +       .uleb128 0x16
32556 +       .long   .LASF1143
32557 +       .byte   0x1a
32558 +       .value  0x3ad
32559 +       .long   0x7c3c
32560 +       .byte   0x3
32561 +       .byte   0x23
32562 +       .uleb128 0x9c
32563 +       .uleb128 0x16
32564 +       .long   .LASF1144
32565 +       .byte   0x1a
32566 +       .value  0x3ae
32567 +       .long   0x17bc
32568 +       .byte   0x3
32569 +       .byte   0x23
32570 +       .uleb128 0xa0
32571 +       .uleb128 0x16
32572 +       .long   .LASF1145
32573 +       .byte   0x1a
32574 +       .value  0x3af
32575 +       .long   0x6c96
32576 +       .byte   0x3
32577 +       .byte   0x23
32578 +       .uleb128 0xa8
32579 +       .uleb128 0x16
32580 +       .long   .LASF1146
32581 +       .byte   0x1a
32582 +       .value  0x3b1
32583 +       .long   0x21
32584 +       .byte   0x3
32585 +       .byte   0x23
32586 +       .uleb128 0x13c
32587 +       .uleb128 0x16
32588 +       .long   .LASF1147
32589 +       .byte   0x1a
32590 +       .value  0x3b2
32591 +       .long   0x18ef
32592 +       .byte   0x3
32593 +       .byte   0x23
32594 +       .uleb128 0x140
32595 +       .uleb128 0x16
32596 +       .long   .LASF1148
32597 +       .byte   0x1a
32598 +       .value  0x3b4
32599 +       .long   0x46d1
32600 +       .byte   0x3
32601 +       .byte   0x23
32602 +       .uleb128 0x14c
32603 +       .uleb128 0x16
32604 +       .long   .LASF1149
32605 +       .byte   0x1a
32606 +       .value  0x3b6
32607 +       .long   0x160b
32608 +       .byte   0x3
32609 +       .byte   0x23
32610 +       .uleb128 0x16c
32611 +       .uleb128 0x16
32612 +       .long   .LASF1150
32613 +       .byte   0x1a
32614 +       .value  0x3bc
32615 +       .long   0x2d94
32616 +       .byte   0x3
32617 +       .byte   0x23
32618 +       .uleb128 0x170
32619 +       .uleb128 0x16
32620 +       .long   .LASF1151
32621 +       .byte   0x1a
32622 +       .value  0x3c0
32623 +       .long   0x173
32624 +       .byte   0x3
32625 +       .byte   0x23
32626 +       .uleb128 0x184
32627 +       .uleb128 0x16
32628 +       .long   .LASF1152
32629 +       .byte   0x1a
32630 +       .value  0x3c6
32631 +       .long   0xb5
32632 +       .byte   0x3
32633 +       .byte   0x23
32634 +       .uleb128 0x188
32635 +       .byte   0x0
32636 +       .uleb128 0x4
32637 +       .byte   0x4
32638 +       .long   0x5e8a
32639 +       .uleb128 0x21
32640 +       .long   .LASF1153
32641 +       .byte   0x1
32642 +       .uleb128 0x4
32643 +       .byte   0x4
32644 +       .long   0x60d7
32645 +       .uleb128 0x12
32646 +       .long   0x60f3
32647 +       .long   0x112
32648 +       .uleb128 0x13
32649 +       .long   0x28
32650 +       .byte   0x23
32651 +       .byte   0x0
32652 +       .uleb128 0x11
32653 +       .long   0x6108
32654 +       .byte   0x1
32655 +       .long   0x21
32656 +       .uleb128 0x6
32657 +       .long   0x28ec
32658 +       .uleb128 0x6
32659 +       .long   0x6108
32660 +       .byte   0x0
32661 +       .uleb128 0x4
32662 +       .byte   0x4
32663 +       .long   0x610e
32664 +       .uleb128 0xf
32665 +       .long   0x618b
32666 +       .long   .LASF1154
32667 +       .byte   0x50
32668 +       .byte   0x26
32669 +       .byte   0x14
32670 +       .uleb128 0xa
32671 +       .long   .LASF517
32672 +       .byte   0x27
32673 +       .byte   0x12
32674 +       .long   0x28ec
32675 +       .byte   0x2
32676 +       .byte   0x23
32677 +       .uleb128 0x0
32678 +       .uleb128 0xb
32679 +       .string "mnt"
32680 +       .byte   0x27
32681 +       .byte   0x13
32682 +       .long   0x28f8
32683 +       .byte   0x2
32684 +       .byte   0x23
32685 +       .uleb128 0x4
32686 +       .uleb128 0xa
32687 +       .long   .LASF1101
32688 +       .byte   0x27
32689 +       .byte   0x14
32690 +       .long   0x5db4
32691 +       .byte   0x2
32692 +       .byte   0x23
32693 +       .uleb128 0x8
32694 +       .uleb128 0xa
32695 +       .long   .LASF53
32696 +       .byte   0x27
32697 +       .byte   0x15
32698 +       .long   0x77
32699 +       .byte   0x2
32700 +       .byte   0x23
32701 +       .uleb128 0x14
32702 +       .uleb128 0xa
32703 +       .long   .LASF1155
32704 +       .byte   0x27
32705 +       .byte   0x16
32706 +       .long   0x21
32707 +       .byte   0x2
32708 +       .byte   0x23
32709 +       .uleb128 0x18
32710 +       .uleb128 0xa
32711 +       .long   .LASF1022
32712 +       .byte   0x27
32713 +       .byte   0x17
32714 +       .long   0x77
32715 +       .byte   0x2
32716 +       .byte   0x23
32717 +       .uleb128 0x1c
32718 +       .uleb128 0xa
32719 +       .long   .LASF1156
32720 +       .byte   0x27
32721 +       .byte   0x18
32722 +       .long   0x627c
32723 +       .byte   0x2
32724 +       .byte   0x23
32725 +       .uleb128 0x20
32726 +       .uleb128 0xa
32727 +       .long   .LASF1157
32728 +       .byte   0x27
32729 +       .byte   0x1d
32730 +       .long   0x6268
32731 +       .byte   0x2
32732 +       .byte   0x23
32733 +       .uleb128 0x44
32734 +       .byte   0x0
32735 +       .uleb128 0x4
32736 +       .byte   0x4
32737 +       .long   0x60f3
32738 +       .uleb128 0x11
32739 +       .long   0x61a6
32740 +       .byte   0x1
32741 +       .long   0x21
32742 +       .uleb128 0x6
32743 +       .long   0x28ec
32744 +       .uleb128 0x6
32745 +       .long   0x61a6
32746 +       .byte   0x0
32747 +       .uleb128 0x4
32748 +       .byte   0x4
32749 +       .long   0x5db4
32750 +       .uleb128 0x4
32751 +       .byte   0x4
32752 +       .long   0x6191
32753 +       .uleb128 0x11
32754 +       .long   0x61cc
32755 +       .byte   0x1
32756 +       .long   0x21
32757 +       .uleb128 0x6
32758 +       .long   0x28ec
32759 +       .uleb128 0x6
32760 +       .long   0x61a6
32761 +       .uleb128 0x6
32762 +       .long   0x61a6
32763 +       .byte   0x0
32764 +       .uleb128 0x4
32765 +       .byte   0x4
32766 +       .long   0x61b2
32767 +       .uleb128 0x11
32768 +       .long   0x61e2
32769 +       .byte   0x1
32770 +       .long   0x21
32771 +       .uleb128 0x6
32772 +       .long   0x28ec
32773 +       .byte   0x0
32774 +       .uleb128 0x4
32775 +       .byte   0x4
32776 +       .long   0x61d2
32777 +       .uleb128 0x5
32778 +       .long   0x61f4
32779 +       .byte   0x1
32780 +       .uleb128 0x6
32781 +       .long   0x28ec
32782 +       .byte   0x0
32783 +       .uleb128 0x4
32784 +       .byte   0x4
32785 +       .long   0x61e8
32786 +       .uleb128 0x5
32787 +       .long   0x620b
32788 +       .byte   0x1
32789 +       .uleb128 0x6
32790 +       .long   0x28ec
32791 +       .uleb128 0x6
32792 +       .long   0x3381
32793 +       .byte   0x0
32794 +       .uleb128 0x4
32795 +       .byte   0x4
32796 +       .long   0x61fa
32797 +       .uleb128 0x11
32798 +       .long   0x622b
32799 +       .byte   0x1
32800 +       .long   0xb5
32801 +       .uleb128 0x6
32802 +       .long   0x28ec
32803 +       .uleb128 0x6
32804 +       .long   0xb5
32805 +       .uleb128 0x6
32806 +       .long   0x21
32807 +       .byte   0x0
32808 +       .uleb128 0x4
32809 +       .byte   0x4
32810 +       .long   0x6211
32811 +       .uleb128 0xf
32812 +       .long   0x6268
32813 +       .long   .LASF1158
32814 +       .byte   0xc
32815 +       .byte   0x27
32816 +       .byte   0x9
32817 +       .uleb128 0xa
32818 +       .long   .LASF53
32819 +       .byte   0x27
32820 +       .byte   0xa
32821 +       .long   0x21
32822 +       .byte   0x2
32823 +       .byte   0x23
32824 +       .uleb128 0x0
32825 +       .uleb128 0xa
32826 +       .long   .LASF1159
32827 +       .byte   0x27
32828 +       .byte   0xb
32829 +       .long   0x21
32830 +       .byte   0x2
32831 +       .byte   0x23
32832 +       .uleb128 0x4
32833 +       .uleb128 0xa
32834 +       .long   .LASF106
32835 +       .byte   0x27
32836 +       .byte   0xc
32837 +       .long   0x3cfd
32838 +       .byte   0x2
32839 +       .byte   0x23
32840 +       .uleb128 0x8
32841 +       .byte   0x0
32842 +       .uleb128 0xc
32843 +       .long   0x627c
32844 +       .byte   0xc
32845 +       .byte   0x27
32846 +       .byte   0x1b
32847 +       .uleb128 0xe
32848 +       .long   .LASF1160
32849 +       .byte   0x27
32850 +       .byte   0x1c
32851 +       .long   0x6231
32852 +       .byte   0x0
32853 +       .uleb128 0x12
32854 +       .long   0x628c
32855 +       .long   0xb5
32856 +       .uleb128 0x13
32857 +       .long   0x28
32858 +       .byte   0x8
32859 +       .byte   0x0
32860 +       .uleb128 0xf
32861 +       .long   0x62b5
32862 +       .long   .LASF1161
32863 +       .byte   0x8
32864 +       .byte   0x27
32865 +       .byte   0x20
32866 +       .uleb128 0xb
32867 +       .string "mnt"
32868 +       .byte   0x27
32869 +       .byte   0x21
32870 +       .long   0x28f8
32871 +       .byte   0x2
32872 +       .byte   0x23
32873 +       .uleb128 0x0
32874 +       .uleb128 0xa
32875 +       .long   .LASF517
32876 +       .byte   0x27
32877 +       .byte   0x22
32878 +       .long   0x28ec
32879 +       .byte   0x2
32880 +       .byte   0x23
32881 +       .uleb128 0x4
32882 +       .byte   0x0
32883 +       .uleb128 0xf
32884 +       .long   0x62ec
32885 +       .long   .LASF1162
32886 +       .byte   0xc
32887 +       .byte   0x42
32888 +       .byte   0x3a
32889 +       .uleb128 0xa
32890 +       .long   .LASF1163
32891 +       .byte   0x42
32892 +       .byte   0x3b
32893 +       .long   0x77
32894 +       .byte   0x2
32895 +       .byte   0x23
32896 +       .uleb128 0x0
32897 +       .uleb128 0xa
32898 +       .long   .LASF1164
32899 +       .byte   0x42
32900 +       .byte   0x3c
32901 +       .long   0x240
32902 +       .byte   0x2
32903 +       .byte   0x23
32904 +       .uleb128 0x4
32905 +       .uleb128 0xa
32906 +       .long   .LASF1165
32907 +       .byte   0x42
32908 +       .byte   0x3d
32909 +       .long   0x62f2
32910 +       .byte   0x2
32911 +       .byte   0x23
32912 +       .uleb128 0x8
32913 +       .byte   0x0
32914 +       .uleb128 0x21
32915 +       .long   .LASF1166
32916 +       .byte   0x1
32917 +       .uleb128 0x4
32918 +       .byte   0x4
32919 +       .long   0x62ec
32920 +       .uleb128 0x15
32921 +       .long   0x638d
32922 +       .long   .LASF1167
32923 +       .byte   0x34
32924 +       .byte   0x1a
32925 +       .value  0x154
32926 +       .uleb128 0x16
32927 +       .long   .LASF1168
32928 +       .byte   0x1a
32929 +       .value  0x155
32930 +       .long   0x77
32931 +       .byte   0x2
32932 +       .byte   0x23
32933 +       .uleb128 0x0
32934 +       .uleb128 0x16
32935 +       .long   .LASF1169
32936 +       .byte   0x1a
32937 +       .value  0x156
32938 +       .long   0xea
32939 +       .byte   0x2
32940 +       .byte   0x23
32941 +       .uleb128 0x4
32942 +       .uleb128 0x16
32943 +       .long   .LASF1170
32944 +       .byte   0x1a
32945 +       .value  0x157
32946 +       .long   0x1dd
32947 +       .byte   0x2
32948 +       .byte   0x23
32949 +       .uleb128 0x8
32950 +       .uleb128 0x16
32951 +       .long   .LASF1171
32952 +       .byte   0x1a
32953 +       .value  0x158
32954 +       .long   0x1e8
32955 +       .byte   0x2
32956 +       .byte   0x23
32957 +       .uleb128 0xc
32958 +       .uleb128 0x16
32959 +       .long   .LASF1172
32960 +       .byte   0x1a
32961 +       .value  0x159
32962 +       .long   0x1f3
32963 +       .byte   0x2
32964 +       .byte   0x23
32965 +       .uleb128 0x10
32966 +       .uleb128 0x16
32967 +       .long   .LASF1173
32968 +       .byte   0x1a
32969 +       .value  0x15a
32970 +       .long   0x173b
32971 +       .byte   0x2
32972 +       .byte   0x23
32973 +       .uleb128 0x18
32974 +       .uleb128 0x16
32975 +       .long   .LASF1174
32976 +       .byte   0x1a
32977 +       .value  0x15b
32978 +       .long   0x173b
32979 +       .byte   0x2
32980 +       .byte   0x23
32981 +       .uleb128 0x20
32982 +       .uleb128 0x16
32983 +       .long   .LASF1175
32984 +       .byte   0x1a
32985 +       .value  0x15c
32986 +       .long   0x173b
32987 +       .byte   0x2
32988 +       .byte   0x23
32989 +       .uleb128 0x28
32990 +       .uleb128 0x16
32991 +       .long   .LASF1176
32992 +       .byte   0x1a
32993 +       .value  0x163
32994 +       .long   0x3cfd
32995 +       .byte   0x2
32996 +       .byte   0x23
32997 +       .uleb128 0x30
32998 +       .byte   0x0
32999 +       .uleb128 0x7
33000 +       .long   .LASF1177
33001 +       .byte   0x3b
33002 +       .byte   0x2c
33003 +       .long   0xc2
33004 +       .uleb128 0x7
33005 +       .long   .LASF1178
33006 +       .byte   0x3b
33007 +       .byte   0x2d
33008 +       .long   0x157
33009 +       .uleb128 0xf
33010 +       .long   0x642e
33011 +       .long   .LASF1179
33012 +       .byte   0x44
33013 +       .byte   0x3b
33014 +       .byte   0x67
33015 +       .uleb128 0xa
33016 +       .long   .LASF1180
33017 +       .byte   0x3b
33018 +       .byte   0x68
33019 +       .long   0x157
33020 +       .byte   0x2
33021 +       .byte   0x23
33022 +       .uleb128 0x0
33023 +       .uleb128 0xa
33024 +       .long   .LASF1181
33025 +       .byte   0x3b
33026 +       .byte   0x69
33027 +       .long   0x157
33028 +       .byte   0x2
33029 +       .byte   0x23
33030 +       .uleb128 0x8
33031 +       .uleb128 0xa
33032 +       .long   .LASF1182
33033 +       .byte   0x3b
33034 +       .byte   0x6a
33035 +       .long   0x157
33036 +       .byte   0x2
33037 +       .byte   0x23
33038 +       .uleb128 0x10
33039 +       .uleb128 0xa
33040 +       .long   .LASF1183
33041 +       .byte   0x3b
33042 +       .byte   0x6b
33043 +       .long   0x157
33044 +       .byte   0x2
33045 +       .byte   0x23
33046 +       .uleb128 0x18
33047 +       .uleb128 0xa
33048 +       .long   .LASF1184
33049 +       .byte   0x3b
33050 +       .byte   0x6c
33051 +       .long   0x157
33052 +       .byte   0x2
33053 +       .byte   0x23
33054 +       .uleb128 0x20
33055 +       .uleb128 0xa
33056 +       .long   .LASF1185
33057 +       .byte   0x3b
33058 +       .byte   0x6d
33059 +       .long   0x157
33060 +       .byte   0x2
33061 +       .byte   0x23
33062 +       .uleb128 0x28
33063 +       .uleb128 0xa
33064 +       .long   .LASF1186
33065 +       .byte   0x3b
33066 +       .byte   0x6e
33067 +       .long   0x157
33068 +       .byte   0x2
33069 +       .byte   0x23
33070 +       .uleb128 0x30
33071 +       .uleb128 0xa
33072 +       .long   .LASF1187
33073 +       .byte   0x3b
33074 +       .byte   0x6f
33075 +       .long   0x157
33076 +       .byte   0x2
33077 +       .byte   0x23
33078 +       .uleb128 0x38
33079 +       .uleb128 0xa
33080 +       .long   .LASF1188
33081 +       .byte   0x3b
33082 +       .byte   0x70
33083 +       .long   0x141
33084 +       .byte   0x2
33085 +       .byte   0x23
33086 +       .uleb128 0x40
33087 +       .byte   0x0
33088 +       .uleb128 0xf
33089 +       .long   0x6473
33090 +       .long   .LASF1189
33091 +       .byte   0x18
33092 +       .byte   0x3b
33093 +       .byte   0x7c
33094 +       .uleb128 0xa
33095 +       .long   .LASF1190
33096 +       .byte   0x3b
33097 +       .byte   0x7d
33098 +       .long   0x157
33099 +       .byte   0x2
33100 +       .byte   0x23
33101 +       .uleb128 0x0
33102 +       .uleb128 0xa
33103 +       .long   .LASF1191
33104 +       .byte   0x3b
33105 +       .byte   0x7e
33106 +       .long   0x157
33107 +       .byte   0x2
33108 +       .byte   0x23
33109 +       .uleb128 0x8
33110 +       .uleb128 0xa
33111 +       .long   .LASF1192
33112 +       .byte   0x3b
33113 +       .byte   0x7f
33114 +       .long   0x141
33115 +       .byte   0x2
33116 +       .byte   0x23
33117 +       .uleb128 0x10
33118 +       .uleb128 0xa
33119 +       .long   .LASF1193
33120 +       .byte   0x3b
33121 +       .byte   0x80
33122 +       .long   0x141
33123 +       .byte   0x2
33124 +       .byte   0x23
33125 +       .uleb128 0x14
33126 +       .byte   0x0
33127 +       .uleb128 0xf
33128 +       .long   0x65b4
33129 +       .long   .LASF1194
33130 +       .byte   0x70
33131 +       .byte   0x3c
33132 +       .byte   0x32
33133 +       .uleb128 0xa
33134 +       .long   .LASF1195
33135 +       .byte   0x3c
33136 +       .byte   0x33
33137 +       .long   0xf5
33138 +       .byte   0x2
33139 +       .byte   0x23
33140 +       .uleb128 0x0
33141 +       .uleb128 0xa
33142 +       .long   .LASF519
33143 +       .byte   0x3c
33144 +       .byte   0x34
33145 +       .long   0xf5
33146 +       .byte   0x2
33147 +       .byte   0x23
33148 +       .uleb128 0x1
33149 +       .uleb128 0xa
33150 +       .long   .LASF1196
33151 +       .byte   0x3c
33152 +       .byte   0x35
33153 +       .long   0x12b
33154 +       .byte   0x2
33155 +       .byte   0x23
33156 +       .uleb128 0x2
33157 +       .uleb128 0xa
33158 +       .long   .LASF1197
33159 +       .byte   0x3c
33160 +       .byte   0x36
33161 +       .long   0x141
33162 +       .byte   0x2
33163 +       .byte   0x23
33164 +       .uleb128 0x4
33165 +       .uleb128 0xa
33166 +       .long   .LASF1198
33167 +       .byte   0x3c
33168 +       .byte   0x37
33169 +       .long   0x157
33170 +       .byte   0x2
33171 +       .byte   0x23
33172 +       .uleb128 0x8
33173 +       .uleb128 0xa
33174 +       .long   .LASF1199
33175 +       .byte   0x3c
33176 +       .byte   0x38
33177 +       .long   0x157
33178 +       .byte   0x2
33179 +       .byte   0x23
33180 +       .uleb128 0x10
33181 +       .uleb128 0xa
33182 +       .long   .LASF1200
33183 +       .byte   0x3c
33184 +       .byte   0x39
33185 +       .long   0x157
33186 +       .byte   0x2
33187 +       .byte   0x23
33188 +       .uleb128 0x18
33189 +       .uleb128 0xa
33190 +       .long   .LASF1201
33191 +       .byte   0x3c
33192 +       .byte   0x3a
33193 +       .long   0x157
33194 +       .byte   0x2
33195 +       .byte   0x23
33196 +       .uleb128 0x20
33197 +       .uleb128 0xa
33198 +       .long   .LASF1202
33199 +       .byte   0x3c
33200 +       .byte   0x3b
33201 +       .long   0x157
33202 +       .byte   0x2
33203 +       .byte   0x23
33204 +       .uleb128 0x28
33205 +       .uleb128 0xa
33206 +       .long   .LASF1203
33207 +       .byte   0x3c
33208 +       .byte   0x3c
33209 +       .long   0x157
33210 +       .byte   0x2
33211 +       .byte   0x23
33212 +       .uleb128 0x30
33213 +       .uleb128 0xa
33214 +       .long   .LASF1204
33215 +       .byte   0x3c
33216 +       .byte   0x3d
33217 +       .long   0x136
33218 +       .byte   0x2
33219 +       .byte   0x23
33220 +       .uleb128 0x38
33221 +       .uleb128 0xa
33222 +       .long   .LASF1205
33223 +       .byte   0x3c
33224 +       .byte   0x3f
33225 +       .long   0x136
33226 +       .byte   0x2
33227 +       .byte   0x23
33228 +       .uleb128 0x3c
33229 +       .uleb128 0xa
33230 +       .long   .LASF1206
33231 +       .byte   0x3c
33232 +       .byte   0x40
33233 +       .long   0x12b
33234 +       .byte   0x2
33235 +       .byte   0x23
33236 +       .uleb128 0x40
33237 +       .uleb128 0xa
33238 +       .long   .LASF1207
33239 +       .byte   0x3c
33240 +       .byte   0x41
33241 +       .long   0x12b
33242 +       .byte   0x2
33243 +       .byte   0x23
33244 +       .uleb128 0x42
33245 +       .uleb128 0xa
33246 +       .long   .LASF1208
33247 +       .byte   0x3c
33248 +       .byte   0x42
33249 +       .long   0x136
33250 +       .byte   0x2
33251 +       .byte   0x23
33252 +       .uleb128 0x44
33253 +       .uleb128 0xa
33254 +       .long   .LASF1209
33255 +       .byte   0x3c
33256 +       .byte   0x43
33257 +       .long   0x157
33258 +       .byte   0x2
33259 +       .byte   0x23
33260 +       .uleb128 0x48
33261 +       .uleb128 0xa
33262 +       .long   .LASF1210
33263 +       .byte   0x3c
33264 +       .byte   0x44
33265 +       .long   0x157
33266 +       .byte   0x2
33267 +       .byte   0x23
33268 +       .uleb128 0x50
33269 +       .uleb128 0xa
33270 +       .long   .LASF1211
33271 +       .byte   0x3c
33272 +       .byte   0x45
33273 +       .long   0x157
33274 +       .byte   0x2
33275 +       .byte   0x23
33276 +       .uleb128 0x58
33277 +       .uleb128 0xa
33278 +       .long   .LASF1212
33279 +       .byte   0x3c
33280 +       .byte   0x46
33281 +       .long   0x136
33282 +       .byte   0x2
33283 +       .byte   0x23
33284 +       .uleb128 0x60
33285 +       .uleb128 0xa
33286 +       .long   .LASF1213
33287 +       .byte   0x3c
33288 +       .byte   0x47
33289 +       .long   0x12b
33290 +       .byte   0x2
33291 +       .byte   0x23
33292 +       .uleb128 0x64
33293 +       .uleb128 0xa
33294 +       .long   .LASF1214
33295 +       .byte   0x3c
33296 +       .byte   0x48
33297 +       .long   0x119
33298 +       .byte   0x2
33299 +       .byte   0x23
33300 +       .uleb128 0x66
33301 +       .uleb128 0xa
33302 +       .long   .LASF1215
33303 +       .byte   0x3c
33304 +       .byte   0x49
33305 +       .long   0x8ec
33306 +       .byte   0x2
33307 +       .byte   0x23
33308 +       .uleb128 0x68
33309 +       .byte   0x0
33310 +       .uleb128 0xf
33311 +       .long   0x65eb
33312 +       .long   .LASF1216
33313 +       .byte   0x14
33314 +       .byte   0x3c
33315 +       .byte   0x89
33316 +       .uleb128 0xa
33317 +       .long   .LASF1217
33318 +       .byte   0x3c
33319 +       .byte   0x8a
33320 +       .long   0x157
33321 +       .byte   0x2
33322 +       .byte   0x23
33323 +       .uleb128 0x0
33324 +       .uleb128 0xa
33325 +       .long   .LASF1218
33326 +       .byte   0x3c
33327 +       .byte   0x8b
33328 +       .long   0x157
33329 +       .byte   0x2
33330 +       .byte   0x23
33331 +       .uleb128 0x8
33332 +       .uleb128 0xa
33333 +       .long   .LASF1219
33334 +       .byte   0x3c
33335 +       .byte   0x8c
33336 +       .long   0x141
33337 +       .byte   0x2
33338 +       .byte   0x23
33339 +       .uleb128 0x10
33340 +       .byte   0x0
33341 +       .uleb128 0x7
33342 +       .long   .LASF1220
33343 +       .byte   0x3c
33344 +       .byte   0x8d
33345 +       .long   0x65b4
33346 +       .uleb128 0xf
33347 +       .long   0x669d
33348 +       .long   .LASF1221
33349 +       .byte   0x44
33350 +       .byte   0x3c
33351 +       .byte   0x8f
33352 +       .uleb128 0xa
33353 +       .long   .LASF1222
33354 +       .byte   0x3c
33355 +       .byte   0x90
33356 +       .long   0xf5
33357 +       .byte   0x2
33358 +       .byte   0x23
33359 +       .uleb128 0x0
33360 +       .uleb128 0xa
33361 +       .long   .LASF1223
33362 +       .byte   0x3c
33363 +       .byte   0x91
33364 +       .long   0x12b
33365 +       .byte   0x2
33366 +       .byte   0x23
33367 +       .uleb128 0x2
33368 +       .uleb128 0xa
33369 +       .long   .LASF1224
33370 +       .byte   0x3c
33371 +       .byte   0x92
33372 +       .long   0xf5
33373 +       .byte   0x2
33374 +       .byte   0x23
33375 +       .uleb128 0x4
33376 +       .uleb128 0xa
33377 +       .long   .LASF1225
33378 +       .byte   0x3c
33379 +       .byte   0x93
33380 +       .long   0x65eb
33381 +       .byte   0x2
33382 +       .byte   0x23
33383 +       .uleb128 0x8
33384 +       .uleb128 0xa
33385 +       .long   .LASF1226
33386 +       .byte   0x3c
33387 +       .byte   0x94
33388 +       .long   0x65eb
33389 +       .byte   0x2
33390 +       .byte   0x23
33391 +       .uleb128 0x1c
33392 +       .uleb128 0xa
33393 +       .long   .LASF1227
33394 +       .byte   0x3c
33395 +       .byte   0x95
33396 +       .long   0x141
33397 +       .byte   0x2
33398 +       .byte   0x23
33399 +       .uleb128 0x30
33400 +       .uleb128 0xa
33401 +       .long   .LASF1228
33402 +       .byte   0x3c
33403 +       .byte   0x96
33404 +       .long   0x136
33405 +       .byte   0x2
33406 +       .byte   0x23
33407 +       .uleb128 0x34
33408 +       .uleb128 0xa
33409 +       .long   .LASF1229
33410 +       .byte   0x3c
33411 +       .byte   0x97
33412 +       .long   0x136
33413 +       .byte   0x2
33414 +       .byte   0x23
33415 +       .uleb128 0x38
33416 +       .uleb128 0xa
33417 +       .long   .LASF1230
33418 +       .byte   0x3c
33419 +       .byte   0x98
33420 +       .long   0x136
33421 +       .byte   0x2
33422 +       .byte   0x23
33423 +       .uleb128 0x3c
33424 +       .uleb128 0xa
33425 +       .long   .LASF1231
33426 +       .byte   0x3c
33427 +       .byte   0x99
33428 +       .long   0x12b
33429 +       .byte   0x2
33430 +       .byte   0x23
33431 +       .uleb128 0x40
33432 +       .uleb128 0xa
33433 +       .long   .LASF1232
33434 +       .byte   0x3c
33435 +       .byte   0x9a
33436 +       .long   0x12b
33437 +       .byte   0x2
33438 +       .byte   0x23
33439 +       .uleb128 0x42
33440 +       .byte   0x0
33441 +       .uleb128 0x2a
33442 +       .long   .LASF1233
33443 +       .byte   0x0
33444 +       .byte   0x3f
33445 +       .byte   0x15
33446 +       .uleb128 0xf
33447 +       .long   0x66dc
33448 +       .long   .LASF1234
33449 +       .byte   0xc
33450 +       .byte   0x40
33451 +       .byte   0x14
33452 +       .uleb128 0xa
33453 +       .long   .LASF1235
33454 +       .byte   0x40
33455 +       .byte   0x15
33456 +       .long   0x77
33457 +       .byte   0x2
33458 +       .byte   0x23
33459 +       .uleb128 0x0
33460 +       .uleb128 0xa
33461 +       .long   .LASF1236
33462 +       .byte   0x40
33463 +       .byte   0x16
33464 +       .long   0x77
33465 +       .byte   0x2
33466 +       .byte   0x23
33467 +       .uleb128 0x4
33468 +       .uleb128 0xa
33469 +       .long   .LASF1237
33470 +       .byte   0x40
33471 +       .byte   0x17
33472 +       .long   0x77
33473 +       .byte   0x2
33474 +       .byte   0x23
33475 +       .uleb128 0x8
33476 +       .byte   0x0
33477 +       .uleb128 0xf
33478 +       .long   0x6759
33479 +       .long   .LASF1238
33480 +       .byte   0x24
33481 +       .byte   0x3b
33482 +       .byte   0x98
33483 +       .uleb128 0xa
33484 +       .long   .LASF1180
33485 +       .byte   0x3b
33486 +       .byte   0x99
33487 +       .long   0x141
33488 +       .byte   0x2
33489 +       .byte   0x23
33490 +       .uleb128 0x0
33491 +       .uleb128 0xa
33492 +       .long   .LASF1181
33493 +       .byte   0x3b
33494 +       .byte   0x9a
33495 +       .long   0x141
33496 +       .byte   0x2
33497 +       .byte   0x23
33498 +       .uleb128 0x4
33499 +       .uleb128 0xa
33500 +       .long   .LASF1182
33501 +       .byte   0x3b
33502 +       .byte   0x9b
33503 +       .long   0x6398
33504 +       .byte   0x2
33505 +       .byte   0x23
33506 +       .uleb128 0x8
33507 +       .uleb128 0xa
33508 +       .long   .LASF1183
33509 +       .byte   0x3b
33510 +       .byte   0x9c
33511 +       .long   0x141
33512 +       .byte   0x2
33513 +       .byte   0x23
33514 +       .uleb128 0x10
33515 +       .uleb128 0xa
33516 +       .long   .LASF1184
33517 +       .byte   0x3b
33518 +       .byte   0x9d
33519 +       .long   0x141
33520 +       .byte   0x2
33521 +       .byte   0x23
33522 +       .uleb128 0x14
33523 +       .uleb128 0xa
33524 +       .long   .LASF1185
33525 +       .byte   0x3b
33526 +       .byte   0x9e
33527 +       .long   0x141
33528 +       .byte   0x2
33529 +       .byte   0x23
33530 +       .uleb128 0x18
33531 +       .uleb128 0xa
33532 +       .long   .LASF1186
33533 +       .byte   0x3b
33534 +       .byte   0x9f
33535 +       .long   0x214
33536 +       .byte   0x2
33537 +       .byte   0x23
33538 +       .uleb128 0x1c
33539 +       .uleb128 0xa
33540 +       .long   .LASF1187
33541 +       .byte   0x3b
33542 +       .byte   0xa0
33543 +       .long   0x214
33544 +       .byte   0x2
33545 +       .byte   0x23
33546 +       .uleb128 0x20
33547 +       .byte   0x0
33548 +       .uleb128 0xc
33549 +       .long   0x6778
33550 +       .byte   0xc
33551 +       .byte   0x3b
33552 +       .byte   0xae
33553 +       .uleb128 0xe
33554 +       .long   .LASF1239
33555 +       .byte   0x3b
33556 +       .byte   0xaf
33557 +       .long   0x669d
33558 +       .uleb128 0xe
33559 +       .long   .LASF1240
33560 +       .byte   0x3b
33561 +       .byte   0xb0
33562 +       .long   0x66a5
33563 +       .byte   0x0
33564 +       .uleb128 0xf
33565 +       .long   0x67d7
33566 +       .long   .LASF1241
33567 +       .byte   0x24
33568 +       .byte   0x3b
33569 +       .byte   0xa8
33570 +       .uleb128 0xa
33571 +       .long   .LASF1242
33572 +       .byte   0x3b
33573 +       .byte   0xa9
33574 +       .long   0x6820
33575 +       .byte   0x2
33576 +       .byte   0x23
33577 +       .uleb128 0x0
33578 +       .uleb128 0xa
33579 +       .long   .LASF1243
33580 +       .byte   0x3b
33581 +       .byte   0xaa
33582 +       .long   0x17bc
33583 +       .byte   0x2
33584 +       .byte   0x23
33585 +       .uleb128 0x4
33586 +       .uleb128 0xa
33587 +       .long   .LASF1192
33588 +       .byte   0x3b
33589 +       .byte   0xab
33590 +       .long   0x2f
33591 +       .byte   0x2
33592 +       .byte   0x23
33593 +       .uleb128 0xc
33594 +       .uleb128 0xa
33595 +       .long   .LASF1190
33596 +       .byte   0x3b
33597 +       .byte   0xac
33598 +       .long   0x77
33599 +       .byte   0x2
33600 +       .byte   0x23
33601 +       .uleb128 0x10
33602 +       .uleb128 0xa
33603 +       .long   .LASF1191
33604 +       .byte   0x3b
33605 +       .byte   0xad
33606 +       .long   0x77
33607 +       .byte   0x2
33608 +       .byte   0x23
33609 +       .uleb128 0x14
33610 +       .uleb128 0xb
33611 +       .string "u"
33612 +       .byte   0x3b
33613 +       .byte   0xb1
33614 +       .long   0x6759
33615 +       .byte   0x2
33616 +       .byte   0x23
33617 +       .uleb128 0x18
33618 +       .byte   0x0
33619 +       .uleb128 0xf
33620 +       .long   0x6820
33621 +       .long   .LASF1244
33622 +       .byte   0x10
33623 +       .byte   0x3b
33624 +       .byte   0xa6
33625 +       .uleb128 0x16
33626 +       .long   .LASF1245
33627 +       .byte   0x3b
33628 +       .value  0x116
33629 +       .long   0x21
33630 +       .byte   0x2
33631 +       .byte   0x23
33632 +       .uleb128 0x0
33633 +       .uleb128 0x16
33634 +       .long   .LASF1246
33635 +       .byte   0x3b
33636 +       .value  0x117
33637 +       .long   0x6c90
33638 +       .byte   0x2
33639 +       .byte   0x23
33640 +       .uleb128 0x4
33641 +       .uleb128 0x16
33642 +       .long   .LASF1247
33643 +       .byte   0x3b
33644 +       .value  0x118
33645 +       .long   0x4af9
33646 +       .byte   0x2
33647 +       .byte   0x23
33648 +       .uleb128 0x8
33649 +       .uleb128 0x16
33650 +       .long   .LASF1248
33651 +       .byte   0x3b
33652 +       .value  0x119
33653 +       .long   0x6820
33654 +       .byte   0x2
33655 +       .byte   0x23
33656 +       .uleb128 0xc
33657 +       .byte   0x0
33658 +       .uleb128 0x4
33659 +       .byte   0x4
33660 +       .long   0x67d7
33661 +       .uleb128 0xf
33662 +       .long   0x68e9
33663 +       .long   .LASF1249
33664 +       .byte   0x80
33665 +       .byte   0x3b
33666 +       .byte   0xd6
33667 +       .uleb128 0xa
33668 +       .long   .LASF1250
33669 +       .byte   0x3b
33670 +       .byte   0xd7
33671 +       .long   0x1808
33672 +       .byte   0x2
33673 +       .byte   0x23
33674 +       .uleb128 0x0
33675 +       .uleb128 0xa
33676 +       .long   .LASF1251
33677 +       .byte   0x3b
33678 +       .byte   0xd8
33679 +       .long   0x17bc
33680 +       .byte   0x2
33681 +       .byte   0x23
33682 +       .uleb128 0x8
33683 +       .uleb128 0xa
33684 +       .long   .LASF1252
33685 +       .byte   0x3b
33686 +       .byte   0xd9
33687 +       .long   0x17bc
33688 +       .byte   0x2
33689 +       .byte   0x23
33690 +       .uleb128 0x10
33691 +       .uleb128 0xa
33692 +       .long   .LASF1253
33693 +       .byte   0x3b
33694 +       .byte   0xda
33695 +       .long   0x17bc
33696 +       .byte   0x2
33697 +       .byte   0x23
33698 +       .uleb128 0x18
33699 +       .uleb128 0xa
33700 +       .long   .LASF1254
33701 +       .byte   0x3b
33702 +       .byte   0xdb
33703 +       .long   0x2d94
33704 +       .byte   0x2
33705 +       .byte   0x23
33706 +       .uleb128 0x20
33707 +       .uleb128 0xa
33708 +       .long   .LASF1255
33709 +       .byte   0x3b
33710 +       .byte   0xdc
33711 +       .long   0x16c4
33712 +       .byte   0x2
33713 +       .byte   0x23
33714 +       .uleb128 0x34
33715 +       .uleb128 0xa
33716 +       .long   .LASF1256
33717 +       .byte   0x3b
33718 +       .byte   0xdd
33719 +       .long   0x18ef
33720 +       .byte   0x2
33721 +       .byte   0x23
33722 +       .uleb128 0x38
33723 +       .uleb128 0xa
33724 +       .long   .LASF1257
33725 +       .byte   0x3b
33726 +       .byte   0xde
33727 +       .long   0x60d1
33728 +       .byte   0x2
33729 +       .byte   0x23
33730 +       .uleb128 0x44
33731 +       .uleb128 0xa
33732 +       .long   .LASF1258
33733 +       .byte   0x3b
33734 +       .byte   0xdf
33735 +       .long   0x77
33736 +       .byte   0x2
33737 +       .byte   0x23
33738 +       .uleb128 0x48
33739 +       .uleb128 0xa
33740 +       .long   .LASF1259
33741 +       .byte   0x3b
33742 +       .byte   0xe0
33743 +       .long   0x1f3
33744 +       .byte   0x2
33745 +       .byte   0x23
33746 +       .uleb128 0x4c
33747 +       .uleb128 0xa
33748 +       .long   .LASF1260
33749 +       .byte   0x3b
33750 +       .byte   0xe1
33751 +       .long   0x2f
33752 +       .byte   0x2
33753 +       .byte   0x23
33754 +       .uleb128 0x54
33755 +       .uleb128 0xa
33756 +       .long   .LASF1261
33757 +       .byte   0x3b
33758 +       .byte   0xe2
33759 +       .long   0x124
33760 +       .byte   0x2
33761 +       .byte   0x23
33762 +       .uleb128 0x58
33763 +       .uleb128 0xa
33764 +       .long   .LASF1262
33765 +       .byte   0x3b
33766 +       .byte   0xe3
33767 +       .long   0x66dc
33768 +       .byte   0x2
33769 +       .byte   0x23
33770 +       .uleb128 0x5c
33771 +       .byte   0x0
33772 +       .uleb128 0xf
33773 +       .long   0x6958
33774 +       .long   .LASF1263
33775 +       .byte   0x1c
33776 +       .byte   0x3b
33777 +       .byte   0xec
33778 +       .uleb128 0xa
33779 +       .long   .LASF1264
33780 +       .byte   0x3b
33781 +       .byte   0xed
33782 +       .long   0x696d
33783 +       .byte   0x2
33784 +       .byte   0x23
33785 +       .uleb128 0x0
33786 +       .uleb128 0xa
33787 +       .long   .LASF1265
33788 +       .byte   0x3b
33789 +       .byte   0xee
33790 +       .long   0x696d
33791 +       .byte   0x2
33792 +       .byte   0x23
33793 +       .uleb128 0x4
33794 +       .uleb128 0xa
33795 +       .long   .LASF1266
33796 +       .byte   0x3b
33797 +       .byte   0xef
33798 +       .long   0x696d
33799 +       .byte   0x2
33800 +       .byte   0x23
33801 +       .uleb128 0x8
33802 +       .uleb128 0xa
33803 +       .long   .LASF1267
33804 +       .byte   0x3b
33805 +       .byte   0xf0
33806 +       .long   0x696d
33807 +       .byte   0x2
33808 +       .byte   0x23
33809 +       .uleb128 0xc
33810 +       .uleb128 0xa
33811 +       .long   .LASF1268
33812 +       .byte   0x3b
33813 +       .byte   0xf1
33814 +       .long   0x6989
33815 +       .byte   0x2
33816 +       .byte   0x23
33817 +       .uleb128 0x10
33818 +       .uleb128 0xa
33819 +       .long   .LASF1269
33820 +       .byte   0x3b
33821 +       .byte   0xf2
33822 +       .long   0x6989
33823 +       .byte   0x2
33824 +       .byte   0x23
33825 +       .uleb128 0x14
33826 +       .uleb128 0xa
33827 +       .long   .LASF1270
33828 +       .byte   0x3b
33829 +       .byte   0xf3
33830 +       .long   0x6989
33831 +       .byte   0x2
33832 +       .byte   0x23
33833 +       .uleb128 0x18
33834 +       .byte   0x0
33835 +       .uleb128 0x11
33836 +       .long   0x696d
33837 +       .byte   0x1
33838 +       .long   0x21
33839 +       .uleb128 0x6
33840 +       .long   0x60d1
33841 +       .uleb128 0x6
33842 +       .long   0x21
33843 +       .byte   0x0
33844 +       .uleb128 0x4
33845 +       .byte   0x4
33846 +       .long   0x6958
33847 +       .uleb128 0x11
33848 +       .long   0x6983
33849 +       .byte   0x1
33850 +       .long   0x21
33851 +       .uleb128 0x6
33852 +       .long   0x6983
33853 +       .byte   0x0
33854 +       .uleb128 0x4
33855 +       .byte   0x4
33856 +       .long   0x6826
33857 +       .uleb128 0x4
33858 +       .byte   0x4
33859 +       .long   0x6973
33860 +       .uleb128 0xf
33861 +       .long   0x6a48
33862 +       .long   .LASF1271
33863 +       .byte   0x30
33864 +       .byte   0x3b
33865 +       .byte   0xf7
33866 +       .uleb128 0xa
33867 +       .long   .LASF1272
33868 +       .byte   0x3b
33869 +       .byte   0xf8
33870 +       .long   0x6a5d
33871 +       .byte   0x2
33872 +       .byte   0x23
33873 +       .uleb128 0x0
33874 +       .uleb128 0xa
33875 +       .long   .LASF1273
33876 +       .byte   0x3b
33877 +       .byte   0xf9
33878 +       .long   0x6a73
33879 +       .byte   0x2
33880 +       .byte   0x23
33881 +       .uleb128 0x4
33882 +       .uleb128 0xa
33883 +       .long   .LASF1274
33884 +       .byte   0x3b
33885 +       .byte   0xfa
33886 +       .long   0x6a93
33887 +       .byte   0x2
33888 +       .byte   0x23
33889 +       .uleb128 0x8
33890 +       .uleb128 0xa
33891 +       .long   .LASF1275
33892 +       .byte   0x3b
33893 +       .byte   0xfb
33894 +       .long   0x6ab9
33895 +       .byte   0x2
33896 +       .byte   0x23
33897 +       .uleb128 0xc
33898 +       .uleb128 0xa
33899 +       .long   .LASF1276
33900 +       .byte   0x3b
33901 +       .byte   0xfc
33902 +       .long   0x6ad4
33903 +       .byte   0x2
33904 +       .byte   0x23
33905 +       .uleb128 0x10
33906 +       .uleb128 0xa
33907 +       .long   .LASF1277
33908 +       .byte   0x3b
33909 +       .byte   0xfd
33910 +       .long   0x6ab9
33911 +       .byte   0x2
33912 +       .byte   0x23
33913 +       .uleb128 0x14
33914 +       .uleb128 0xa
33915 +       .long   .LASF1278
33916 +       .byte   0x3b
33917 +       .byte   0xfe
33918 +       .long   0x6af5
33919 +       .byte   0x2
33920 +       .byte   0x23
33921 +       .uleb128 0x18
33922 +       .uleb128 0xa
33923 +       .long   .LASF1279
33924 +       .byte   0x3b
33925 +       .byte   0xff
33926 +       .long   0x6989
33927 +       .byte   0x2
33928 +       .byte   0x23
33929 +       .uleb128 0x1c
33930 +       .uleb128 0x16
33931 +       .long   .LASF1280
33932 +       .byte   0x3b
33933 +       .value  0x100
33934 +       .long   0x6989
33935 +       .byte   0x2
33936 +       .byte   0x23
33937 +       .uleb128 0x20
33938 +       .uleb128 0x16
33939 +       .long   .LASF1281
33940 +       .byte   0x3b
33941 +       .value  0x101
33942 +       .long   0x6989
33943 +       .byte   0x2
33944 +       .byte   0x23
33945 +       .uleb128 0x24
33946 +       .uleb128 0x16
33947 +       .long   .LASF1282
33948 +       .byte   0x3b
33949 +       .value  0x102
33950 +       .long   0x6989
33951 +       .byte   0x2
33952 +       .byte   0x23
33953 +       .uleb128 0x28
33954 +       .uleb128 0x16
33955 +       .long   .LASF1283
33956 +       .byte   0x3b
33957 +       .value  0x103
33958 +       .long   0x696d
33959 +       .byte   0x2
33960 +       .byte   0x23
33961 +       .uleb128 0x2c
33962 +       .byte   0x0
33963 +       .uleb128 0x11
33964 +       .long   0x6a5d
33965 +       .byte   0x1
33966 +       .long   0x21
33967 +       .uleb128 0x6
33968 +       .long   0x3381
33969 +       .uleb128 0x6
33970 +       .long   0x21
33971 +       .byte   0x0
33972 +       .uleb128 0x4
33973 +       .byte   0x4
33974 +       .long   0x6a48
33975 +       .uleb128 0x11
33976 +       .long   0x6a73
33977 +       .byte   0x1
33978 +       .long   0x21
33979 +       .uleb128 0x6
33980 +       .long   0x3381
33981 +       .byte   0x0
33982 +       .uleb128 0x4
33983 +       .byte   0x4
33984 +       .long   0x6a63
33985 +       .uleb128 0x11
33986 +       .long   0x6a93
33987 +       .byte   0x1
33988 +       .long   0x21
33989 +       .uleb128 0x6
33990 +       .long   0x3381
33991 +       .uleb128 0x6
33992 +       .long   0x6398
33993 +       .uleb128 0x6
33994 +       .long   0x21
33995 +       .byte   0x0
33996 +       .uleb128 0x4
33997 +       .byte   0x4
33998 +       .long   0x6a79
33999 +       .uleb128 0x11
34000 +       .long   0x6aae
34001 +       .byte   0x1
34002 +       .long   0x21
34003 +       .uleb128 0x6
34004 +       .long   0x6aae
34005 +       .uleb128 0x6
34006 +       .long   0x2f
34007 +       .byte   0x0
34008 +       .uleb128 0x4
34009 +       .byte   0x4
34010 +       .long   0x6ab4
34011 +       .uleb128 0x14
34012 +       .long   0x30f0
34013 +       .uleb128 0x4
34014 +       .byte   0x4
34015 +       .long   0x6a99
34016 +       .uleb128 0x11
34017 +       .long   0x6ad4
34018 +       .byte   0x1
34019 +       .long   0x21
34020 +       .uleb128 0x6
34021 +       .long   0x3381
34022 +       .uleb128 0x6
34023 +       .long   0x6398
34024 +       .byte   0x0
34025 +       .uleb128 0x4
34026 +       .byte   0x4
34027 +       .long   0x6abf
34028 +       .uleb128 0x11
34029 +       .long   0x6aef
34030 +       .byte   0x1
34031 +       .long   0x21
34032 +       .uleb128 0x6
34033 +       .long   0x3381
34034 +       .uleb128 0x6
34035 +       .long   0x6aef
34036 +       .byte   0x0
34037 +       .uleb128 0x4
34038 +       .byte   0x4
34039 +       .long   0x62f8
34040 +       .uleb128 0x4
34041 +       .byte   0x4
34042 +       .long   0x6ada
34043 +       .uleb128 0x15
34044 +       .long   0x6bae
34045 +       .long   .LASF1284
34046 +       .byte   0x2c
34047 +       .byte   0x3b
34048 +       .value  0x107
34049 +       .uleb128 0x16
34050 +       .long   .LASF1285
34051 +       .byte   0x3b
34052 +       .value  0x108
34053 +       .long   0x6bcd
34054 +       .byte   0x2
34055 +       .byte   0x23
34056 +       .uleb128 0x0
34057 +       .uleb128 0x16
34058 +       .long   .LASF1286
34059 +       .byte   0x3b
34060 +       .value  0x109
34061 +       .long   0x696d
34062 +       .byte   0x2
34063 +       .byte   0x23
34064 +       .uleb128 0x4
34065 +       .uleb128 0x16
34066 +       .long   .LASF1287
34067 +       .byte   0x3b
34068 +       .value  0x10a
34069 +       .long   0x696d
34070 +       .byte   0x2
34071 +       .byte   0x23
34072 +       .uleb128 0x8
34073 +       .uleb128 0x16
34074 +       .long   .LASF1288
34075 +       .byte   0x3b
34076 +       .value  0x10b
34077 +       .long   0x6bf3
34078 +       .byte   0x2
34079 +       .byte   0x23
34080 +       .uleb128 0xc
34081 +       .uleb128 0x16
34082 +       .long   .LASF1289
34083 +       .byte   0x3b
34084 +       .value  0x10c
34085 +       .long   0x6bf3
34086 +       .byte   0x2
34087 +       .byte   0x23
34088 +       .uleb128 0x10
34089 +       .uleb128 0x16
34090 +       .long   .LASF1290
34091 +       .byte   0x3b
34092 +       .value  0x10d
34093 +       .long   0x6c1e
34094 +       .byte   0x2
34095 +       .byte   0x23
34096 +       .uleb128 0x14
34097 +       .uleb128 0x16
34098 +       .long   .LASF1291
34099 +       .byte   0x3b
34100 +       .value  0x10e
34101 +       .long   0x6c1e
34102 +       .byte   0x2
34103 +       .byte   0x23
34104 +       .uleb128 0x18
34105 +       .uleb128 0x16
34106 +       .long   .LASF1292
34107 +       .byte   0x3b
34108 +       .value  0x10f
34109 +       .long   0x6c3f
34110 +       .byte   0x2
34111 +       .byte   0x23
34112 +       .uleb128 0x1c
34113 +       .uleb128 0x16
34114 +       .long   .LASF1293
34115 +       .byte   0x3b
34116 +       .value  0x110
34117 +       .long   0x6c5f
34118 +       .byte   0x2
34119 +       .byte   0x23
34120 +       .uleb128 0x20
34121 +       .uleb128 0x16
34122 +       .long   .LASF1294
34123 +       .byte   0x3b
34124 +       .value  0x111
34125 +       .long   0x6c8a
34126 +       .byte   0x2
34127 +       .byte   0x23
34128 +       .uleb128 0x24
34129 +       .uleb128 0x16
34130 +       .long   .LASF1295
34131 +       .byte   0x3b
34132 +       .value  0x112
34133 +       .long   0x6c8a
34134 +       .byte   0x2
34135 +       .byte   0x23
34136 +       .uleb128 0x28
34137 +       .byte   0x0
34138 +       .uleb128 0x11
34139 +       .long   0x6bcd
34140 +       .byte   0x1
34141 +       .long   0x21
34142 +       .uleb128 0x6
34143 +       .long   0x60d1
34144 +       .uleb128 0x6
34145 +       .long   0x21
34146 +       .uleb128 0x6
34147 +       .long   0x21
34148 +       .uleb128 0x6
34149 +       .long   0xb5
34150 +       .byte   0x0
34151 +       .uleb128 0x4
34152 +       .byte   0x4
34153 +       .long   0x6bae
34154 +       .uleb128 0x11
34155 +       .long   0x6bed
34156 +       .byte   0x1
34157 +       .long   0x21
34158 +       .uleb128 0x6
34159 +       .long   0x60d1
34160 +       .uleb128 0x6
34161 +       .long   0x21
34162 +       .uleb128 0x6
34163 +       .long   0x6bed
34164 +       .byte   0x0
34165 +       .uleb128 0x4
34166 +       .byte   0x4
34167 +       .long   0x642e
34168 +       .uleb128 0x4
34169 +       .byte   0x4
34170 +       .long   0x6bd3
34171 +       .uleb128 0x11
34172 +       .long   0x6c18
34173 +       .byte   0x1
34174 +       .long   0x21
34175 +       .uleb128 0x6
34176 +       .long   0x60d1
34177 +       .uleb128 0x6
34178 +       .long   0x21
34179 +       .uleb128 0x6
34180 +       .long   0x638d
34181 +       .uleb128 0x6
34182 +       .long   0x6c18
34183 +       .byte   0x0
34184 +       .uleb128 0x4
34185 +       .byte   0x4
34186 +       .long   0x63a3
34187 +       .uleb128 0x4
34188 +       .byte   0x4
34189 +       .long   0x6bf9
34190 +       .uleb128 0x11
34191 +       .long   0x6c39
34192 +       .byte   0x1
34193 +       .long   0x21
34194 +       .uleb128 0x6
34195 +       .long   0x60d1
34196 +       .uleb128 0x6
34197 +       .long   0x6c39
34198 +       .byte   0x0
34199 +       .uleb128 0x4
34200 +       .byte   0x4
34201 +       .long   0x65f6
34202 +       .uleb128 0x4
34203 +       .byte   0x4
34204 +       .long   0x6c24
34205 +       .uleb128 0x11
34206 +       .long   0x6c5f
34207 +       .byte   0x1
34208 +       .long   0x21
34209 +       .uleb128 0x6
34210 +       .long   0x60d1
34211 +       .uleb128 0x6
34212 +       .long   0x77
34213 +       .uleb128 0x6
34214 +       .long   0x21
34215 +       .byte   0x0
34216 +       .uleb128 0x4
34217 +       .byte   0x4
34218 +       .long   0x6c45
34219 +       .uleb128 0x11
34220 +       .long   0x6c84
34221 +       .byte   0x1
34222 +       .long   0x21
34223 +       .uleb128 0x6
34224 +       .long   0x60d1
34225 +       .uleb128 0x6
34226 +       .long   0x21
34227 +       .uleb128 0x6
34228 +       .long   0x638d
34229 +       .uleb128 0x6
34230 +       .long   0x6c84
34231 +       .byte   0x0
34232 +       .uleb128 0x4
34233 +       .byte   0x4
34234 +       .long   0x6473
34235 +       .uleb128 0x4
34236 +       .byte   0x4
34237 +       .long   0x6c65
34238 +       .uleb128 0x4
34239 +       .byte   0x4
34240 +       .long   0x68e9
34241 +       .uleb128 0x15
34242 +       .long   0x6d0e
34243 +       .long   .LASF1296
34244 +       .byte   0x94
34245 +       .byte   0x3b
34246 +       .value  0x11f
34247 +       .uleb128 0x16
34248 +       .long   .LASF53
34249 +       .byte   0x3b
34250 +       .value  0x120
34251 +       .long   0x77
34252 +       .byte   0x2
34253 +       .byte   0x23
34254 +       .uleb128 0x0
34255 +       .uleb128 0x16
34256 +       .long   .LASF1297
34257 +       .byte   0x3b
34258 +       .value  0x121
34259 +       .long   0x2d94
34260 +       .byte   0x2
34261 +       .byte   0x23
34262 +       .uleb128 0x4
34263 +       .uleb128 0x16
34264 +       .long   .LASF1298
34265 +       .byte   0x3b
34266 +       .value  0x122
34267 +       .long   0x2d94
34268 +       .byte   0x2
34269 +       .byte   0x23
34270 +       .uleb128 0x18
34271 +       .uleb128 0x16
34272 +       .long   .LASF1299
34273 +       .byte   0x3b
34274 +       .value  0x123
34275 +       .long   0x18fa
34276 +       .byte   0x2
34277 +       .byte   0x23
34278 +       .uleb128 0x2c
34279 +       .uleb128 0x16
34280 +       .long   .LASF245
34281 +       .byte   0x3b
34282 +       .value  0x124
34283 +       .long   0x6d0e
34284 +       .byte   0x2
34285 +       .byte   0x23
34286 +       .uleb128 0x3c
34287 +       .uleb128 0x16
34288 +       .long   .LASF82
34289 +       .byte   0x3b
34290 +       .value  0x125
34291 +       .long   0x6d1e
34292 +       .byte   0x2
34293 +       .byte   0x23
34294 +       .uleb128 0x44
34295 +       .uleb128 0x17
34296 +       .string "ops"
34297 +       .byte   0x3b
34298 +       .value  0x126
34299 +       .long   0x6d2e
34300 +       .byte   0x3
34301 +       .byte   0x23
34302 +       .uleb128 0x8c
34303 +       .byte   0x0
34304 +       .uleb128 0x12
34305 +       .long   0x6d1e
34306 +       .long   0x3381
34307 +       .uleb128 0x13
34308 +       .long   0x28
34309 +       .byte   0x1
34310 +       .byte   0x0
34311 +       .uleb128 0x12
34312 +       .long   0x6d2e
34313 +       .long   0x6778
34314 +       .uleb128 0x13
34315 +       .long   0x28
34316 +       .byte   0x1
34317 +       .byte   0x0
34318 +       .uleb128 0x12
34319 +       .long   0x6d3e
34320 +       .long   0x6c90
34321 +       .uleb128 0x13
34322 +       .long   0x28
34323 +       .byte   0x1
34324 +       .byte   0x0
34325 +       .uleb128 0x15
34326 +       .long   0x6e2d
34327 +       .long   .LASF1300
34328 +       .byte   0x3c
34329 +       .byte   0x1a
34330 +       .value  0x191
34331 +       .uleb128 0x16
34332 +       .long   .LASF1301
34333 +       .byte   0x1a
34334 +       .value  0x192
34335 +       .long   0x6e4e
34336 +       .byte   0x2
34337 +       .byte   0x23
34338 +       .uleb128 0x0
34339 +       .uleb128 0x16
34340 +       .long   .LASF1302
34341 +       .byte   0x1a
34342 +       .value  0x193
34343 +       .long   0x6e69
34344 +       .byte   0x2
34345 +       .byte   0x23
34346 +       .uleb128 0x4
34347 +       .uleb128 0x16
34348 +       .long   .LASF1303
34349 +       .byte   0x1a
34350 +       .value  0x194
34351 +       .long   0x6e7b
34352 +       .byte   0x2
34353 +       .byte   0x23
34354 +       .uleb128 0x8
34355 +       .uleb128 0x16
34356 +       .long   .LASF1304
34357 +       .byte   0x1a
34358 +       .value  0x197
34359 +       .long   0x6f9a
34360 +       .byte   0x2
34361 +       .byte   0x23
34362 +       .uleb128 0xc
34363 +       .uleb128 0x16
34364 +       .long   .LASF1305
34365 +       .byte   0x1a
34366 +       .value  0x19a
34367 +       .long   0x6fb0
34368 +       .byte   0x2
34369 +       .byte   0x23
34370 +       .uleb128 0x10
34371 +       .uleb128 0x16
34372 +       .long   .LASF1306
34373 +       .byte   0x1a
34374 +       .value  0x19d
34375 +       .long   0x6fd5
34376 +       .byte   0x2
34377 +       .byte   0x23
34378 +       .uleb128 0x14
34379 +       .uleb128 0x16
34380 +       .long   .LASF1307
34381 +       .byte   0x1a
34382 +       .value  0x1a3
34383 +       .long   0x6ffa
34384 +       .byte   0x2
34385 +       .byte   0x23
34386 +       .uleb128 0x18
34387 +       .uleb128 0x16
34388 +       .long   .LASF1308
34389 +       .byte   0x1a
34390 +       .value  0x1a4
34391 +       .long   0x6ffa
34392 +       .byte   0x2
34393 +       .byte   0x23
34394 +       .uleb128 0x1c
34395 +       .uleb128 0x16
34396 +       .long   .LASF1309
34397 +       .byte   0x1a
34398 +       .value  0x1a6
34399 +       .long   0x7015
34400 +       .byte   0x2
34401 +       .byte   0x23
34402 +       .uleb128 0x20
34403 +       .uleb128 0x16
34404 +       .long   .LASF1310
34405 +       .byte   0x1a
34406 +       .value  0x1a7
34407 +       .long   0x702c
34408 +       .byte   0x2
34409 +       .byte   0x23
34410 +       .uleb128 0x24
34411 +       .uleb128 0x16
34412 +       .long   .LASF1311
34413 +       .byte   0x1a
34414 +       .value  0x1a8
34415 +       .long   0x7047
34416 +       .byte   0x2
34417 +       .byte   0x23
34418 +       .uleb128 0x28
34419 +       .uleb128 0x16
34420 +       .long   .LASF1312
34421 +       .byte   0x1a
34422 +       .value  0x1aa
34423 +       .long   0x707c
34424 +       .byte   0x2
34425 +       .byte   0x23
34426 +       .uleb128 0x2c
34427 +       .uleb128 0x16
34428 +       .long   .LASF1313
34429 +       .byte   0x1a
34430 +       .value  0x1ac
34431 +       .long   0x709c
34432 +       .byte   0x2
34433 +       .byte   0x23
34434 +       .uleb128 0x30
34435 +       .uleb128 0x16
34436 +       .long   .LASF1314
34437 +       .byte   0x1a
34438 +       .value  0x1af
34439 +       .long   0x70bc
34440 +       .byte   0x2
34441 +       .byte   0x23
34442 +       .uleb128 0x34
34443 +       .uleb128 0x16
34444 +       .long   .LASF1315
34445 +       .byte   0x1a
34446 +       .value  0x1b0
34447 +       .long   0x6fb0
34448 +       .byte   0x2
34449 +       .byte   0x23
34450 +       .uleb128 0x38
34451 +       .byte   0x0
34452 +       .uleb128 0x11
34453 +       .long   0x6e42
34454 +       .byte   0x1
34455 +       .long   0x21
34456 +       .uleb128 0x6
34457 +       .long   0x2d82
34458 +       .uleb128 0x6
34459 +       .long   0x6e42
34460 +       .byte   0x0
34461 +       .uleb128 0x4
34462 +       .byte   0x4
34463 +       .long   0x6e48
34464 +       .uleb128 0x21
34465 +       .long   .LASF1316
34466 +       .byte   0x1
34467 +       .uleb128 0x4
34468 +       .byte   0x4
34469 +       .long   0x6e2d
34470 +       .uleb128 0x11
34471 +       .long   0x6e69
34472 +       .byte   0x1
34473 +       .long   0x21
34474 +       .uleb128 0x6
34475 +       .long   0x3cfd
34476 +       .uleb128 0x6
34477 +       .long   0x2d82
34478 +       .byte   0x0
34479 +       .uleb128 0x4
34480 +       .byte   0x4
34481 +       .long   0x6e54
34482 +       .uleb128 0x5
34483 +       .long   0x6e7b
34484 +       .byte   0x1
34485 +       .uleb128 0x6
34486 +       .long   0x2d82
34487 +       .byte   0x0
34488 +       .uleb128 0x4
34489 +       .byte   0x4
34490 +       .long   0x6e6f
34491 +       .uleb128 0x11
34492 +       .long   0x6e96
34493 +       .byte   0x1
34494 +       .long   0x21
34495 +       .uleb128 0x6
34496 +       .long   0x6e96
34497 +       .uleb128 0x6
34498 +       .long   0x6e42
34499 +       .byte   0x0
34500 +       .uleb128 0x4
34501 +       .byte   0x4
34502 +       .long   0x6e9c
34503 +       .uleb128 0x15
34504 +       .long   0x6f9a
34505 +       .long   .LASF1317
34506 +       .byte   0x54
34507 +       .byte   0x1a
34508 +       .value  0x18e
34509 +       .uleb128 0x16
34510 +       .long   .LASF1318
34511 +       .byte   0x1a
34512 +       .value  0x1b5
34513 +       .long   0x3381
34514 +       .byte   0x2
34515 +       .byte   0x23
34516 +       .uleb128 0x0
34517 +       .uleb128 0x16
34518 +       .long   .LASF1319
34519 +       .byte   0x1a
34520 +       .value  0x1b6
34521 +       .long   0x62b5
34522 +       .byte   0x2
34523 +       .byte   0x23
34524 +       .uleb128 0x4
34525 +       .uleb128 0x16
34526 +       .long   .LASF1320
34527 +       .byte   0x1a
34528 +       .value  0x1b7
34529 +       .long   0x16a2
34530 +       .byte   0x2
34531 +       .byte   0x23
34532 +       .uleb128 0x10
34533 +       .uleb128 0x16
34534 +       .long   .LASF1321
34535 +       .byte   0x1a
34536 +       .value  0x1b8
34537 +       .long   0x77
34538 +       .byte   0x2
34539 +       .byte   0x23
34540 +       .uleb128 0x14
34541 +       .uleb128 0x16
34542 +       .long   .LASF1322
34543 +       .byte   0x1a
34544 +       .value  0x1b9
34545 +       .long   0x5d7d
34546 +       .byte   0x2
34547 +       .byte   0x23
34548 +       .uleb128 0x18
34549 +       .uleb128 0x16
34550 +       .long   .LASF1323
34551 +       .byte   0x1a
34552 +       .value  0x1ba
34553 +       .long   0x17bc
34554 +       .byte   0x2
34555 +       .byte   0x23
34556 +       .uleb128 0x20
34557 +       .uleb128 0x16
34558 +       .long   .LASF1324
34559 +       .byte   0x1a
34560 +       .value  0x1bb
34561 +       .long   0x1680
34562 +       .byte   0x2
34563 +       .byte   0x23
34564 +       .uleb128 0x28
34565 +       .uleb128 0x16
34566 +       .long   .LASF1325
34567 +       .byte   0x1a
34568 +       .value  0x1bc
34569 +       .long   0x77
34570 +       .byte   0x2
34571 +       .byte   0x23
34572 +       .uleb128 0x2c
34573 +       .uleb128 0x16
34574 +       .long   .LASF1326
34575 +       .byte   0x1a
34576 +       .value  0x1bd
34577 +       .long   0x2f
34578 +       .byte   0x2
34579 +       .byte   0x23
34580 +       .uleb128 0x30
34581 +       .uleb128 0x16
34582 +       .long   .LASF1327
34583 +       .byte   0x1a
34584 +       .value  0x1be
34585 +       .long   0x2f
34586 +       .byte   0x2
34587 +       .byte   0x23
34588 +       .uleb128 0x34
34589 +       .uleb128 0x16
34590 +       .long   .LASF1328
34591 +       .byte   0x1a
34592 +       .value  0x1bf
34593 +       .long   0x70c2
34594 +       .byte   0x2
34595 +       .byte   0x23
34596 +       .uleb128 0x38
34597 +       .uleb128 0x16
34598 +       .long   .LASF53
34599 +       .byte   0x1a
34600 +       .value  0x1c0
34601 +       .long   0x2f
34602 +       .byte   0x2
34603 +       .byte   0x23
34604 +       .uleb128 0x3c
34605 +       .uleb128 0x16
34606 +       .long   .LASF271
34607 +       .byte   0x1a
34608 +       .value  0x1c1
34609 +       .long   0x4521
34610 +       .byte   0x2
34611 +       .byte   0x23
34612 +       .uleb128 0x40
34613 +       .uleb128 0x16
34614 +       .long   .LASF1329
34615 +       .byte   0x1a
34616 +       .value  0x1c2
34617 +       .long   0x1680
34618 +       .byte   0x2
34619 +       .byte   0x23
34620 +       .uleb128 0x44
34621 +       .uleb128 0x16
34622 +       .long   .LASF1330
34623 +       .byte   0x1a
34624 +       .value  0x1c3
34625 +       .long   0x17bc
34626 +       .byte   0x2
34627 +       .byte   0x23
34628 +       .uleb128 0x48
34629 +       .uleb128 0x16
34630 +       .long   .LASF1331
34631 +       .byte   0x1a
34632 +       .value  0x1c4
34633 +       .long   0x6e96
34634 +       .byte   0x2
34635 +       .byte   0x23
34636 +       .uleb128 0x50
34637 +       .byte   0x0
34638 +       .uleb128 0x4
34639 +       .byte   0x4
34640 +       .long   0x6e81
34641 +       .uleb128 0x11
34642 +       .long   0x6fb0
34643 +       .byte   0x1
34644 +       .long   0x21
34645 +       .uleb128 0x6
34646 +       .long   0x2d82
34647 +       .byte   0x0
34648 +       .uleb128 0x4
34649 +       .byte   0x4
34650 +       .long   0x6fa0
34651 +       .uleb128 0x11
34652 +       .long   0x6fd5
34653 +       .byte   0x1
34654 +       .long   0x21
34655 +       .uleb128 0x6
34656 +       .long   0x3cfd
34657 +       .uleb128 0x6
34658 +       .long   0x6e96
34659 +       .uleb128 0x6
34660 +       .long   0x17e5
34661 +       .uleb128 0x6
34662 +       .long   0x77
34663 +       .byte   0x0
34664 +       .uleb128 0x4
34665 +       .byte   0x4
34666 +       .long   0x6fb6
34667 +       .uleb128 0x11
34668 +       .long   0x6ffa
34669 +       .byte   0x1
34670 +       .long   0x21
34671 +       .uleb128 0x6
34672 +       .long   0x3cfd
34673 +       .uleb128 0x6
34674 +       .long   0x2d82
34675 +       .uleb128 0x6
34676 +       .long   0x77
34677 +       .uleb128 0x6
34678 +       .long   0x77
34679 +       .byte   0x0
34680 +       .uleb128 0x4
34681 +       .byte   0x4
34682 +       .long   0x6fdb
34683 +       .uleb128 0x11
34684 +       .long   0x7015
34685 +       .byte   0x1
34686 +       .long   0x22a
34687 +       .uleb128 0x6
34688 +       .long   0x6e96
34689 +       .uleb128 0x6
34690 +       .long   0x22a
34691 +       .byte   0x0
34692 +       .uleb128 0x4
34693 +       .byte   0x4
34694 +       .long   0x7000
34695 +       .uleb128 0x5
34696 +       .long   0x702c
34697 +       .byte   0x1
34698 +       .uleb128 0x6
34699 +       .long   0x2d82
34700 +       .uleb128 0x6
34701 +       .long   0x2f
34702 +       .byte   0x0
34703 +       .uleb128 0x4
34704 +       .byte   0x4
34705 +       .long   0x701b
34706 +       .uleb128 0x11
34707 +       .long   0x7047
34708 +       .byte   0x1
34709 +       .long   0x21
34710 +       .uleb128 0x6
34711 +       .long   0x2d82
34712 +       .uleb128 0x6
34713 +       .long   0x240
34714 +       .byte   0x0
34715 +       .uleb128 0x4
34716 +       .byte   0x4
34717 +       .long   0x7032
34718 +       .uleb128 0x11
34719 +       .long   0x7071
34720 +       .byte   0x1
34721 +       .long   0x209
34722 +       .uleb128 0x6
34723 +       .long   0x21
34724 +       .uleb128 0x6
34725 +       .long   0x3ddf
34726 +       .uleb128 0x6
34727 +       .long   0x7071
34728 +       .uleb128 0x6
34729 +       .long   0x1f3
34730 +       .uleb128 0x6
34731 +       .long   0x2f
34732 +       .byte   0x0
34733 +       .uleb128 0x4
34734 +       .byte   0x4
34735 +       .long   0x7077
34736 +       .uleb128 0x14
34737 +       .long   0x3a49
34738 +       .uleb128 0x4
34739 +       .byte   0x4
34740 +       .long   0x704d
34741 +       .uleb128 0x11
34742 +       .long   0x709c
34743 +       .byte   0x1
34744 +       .long   0x2d82
34745 +       .uleb128 0x6
34746 +       .long   0x6e96
34747 +       .uleb128 0x6
34748 +       .long   0x22a
34749 +       .uleb128 0x6
34750 +       .long   0x21
34751 +       .byte   0x0
34752 +       .uleb128 0x4
34753 +       .byte   0x4
34754 +       .long   0x7082
34755 +       .uleb128 0x11
34756 +       .long   0x70bc
34757 +       .byte   0x1
34758 +       .long   0x21
34759 +       .uleb128 0x6
34760 +       .long   0x6e96
34761 +       .uleb128 0x6
34762 +       .long   0x2d82
34763 +       .uleb128 0x6
34764 +       .long   0x2d82
34765 +       .byte   0x0
34766 +       .uleb128 0x4
34767 +       .byte   0x4
34768 +       .long   0x70a2
34769 +       .uleb128 0x4
34770 +       .byte   0x4
34771 +       .long   0x70c8
34772 +       .uleb128 0x14
34773 +       .long   0x6d3e
34774 +       .uleb128 0x15
34775 +       .long   0x71e9
34776 +       .long   .LASF1332
34777 +       .byte   0x74
34778 +       .byte   0x1a
34779 +       .value  0x1cc
34780 +       .uleb128 0x16
34781 +       .long   .LASF1333
34782 +       .byte   0x1a
34783 +       .value  0x1cd
34784 +       .long   0x19f
34785 +       .byte   0x2
34786 +       .byte   0x23
34787 +       .uleb128 0x0
34788 +       .uleb128 0x16
34789 +       .long   .LASF1334
34790 +       .byte   0x1a
34791 +       .value  0x1ce
34792 +       .long   0x3381
34793 +       .byte   0x2
34794 +       .byte   0x23
34795 +       .uleb128 0x4
34796 +       .uleb128 0x16
34797 +       .long   .LASF1335
34798 +       .byte   0x1a
34799 +       .value  0x1cf
34800 +       .long   0x21
34801 +       .byte   0x2
34802 +       .byte   0x23
34803 +       .uleb128 0x8
34804 +       .uleb128 0x16
34805 +       .long   .LASF1336
34806 +       .byte   0x1a
34807 +       .value  0x1d0
34808 +       .long   0x2d94
34809 +       .byte   0x2
34810 +       .byte   0x23
34811 +       .uleb128 0xc
34812 +       .uleb128 0x16
34813 +       .long   .LASF1337
34814 +       .byte   0x1a
34815 +       .value  0x1d1
34816 +       .long   0x1931
34817 +       .byte   0x2
34818 +       .byte   0x23
34819 +       .uleb128 0x20
34820 +       .uleb128 0x16
34821 +       .long   .LASF1338
34822 +       .byte   0x1a
34823 +       .value  0x1d2
34824 +       .long   0x17bc
34825 +       .byte   0x2
34826 +       .byte   0x23
34827 +       .uleb128 0x34
34828 +       .uleb128 0x16
34829 +       .long   .LASF1339
34830 +       .byte   0x1a
34831 +       .value  0x1d3
34832 +       .long   0x160b
34833 +       .byte   0x2
34834 +       .byte   0x23
34835 +       .uleb128 0x3c
34836 +       .uleb128 0x16
34837 +       .long   .LASF1340
34838 +       .byte   0x1a
34839 +       .value  0x1d4
34840 +       .long   0x21
34841 +       .byte   0x2
34842 +       .byte   0x23
34843 +       .uleb128 0x40
34844 +       .uleb128 0x16
34845 +       .long   .LASF1341
34846 +       .byte   0x1a
34847 +       .value  0x1d6
34848 +       .long   0x17bc
34849 +       .byte   0x2
34850 +       .byte   0x23
34851 +       .uleb128 0x44
34852 +       .uleb128 0x16
34853 +       .long   .LASF1342
34854 +       .byte   0x1a
34855 +       .value  0x1d8
34856 +       .long   0x71e9
34857 +       .byte   0x2
34858 +       .byte   0x23
34859 +       .uleb128 0x4c
34860 +       .uleb128 0x16
34861 +       .long   .LASF1343
34862 +       .byte   0x1a
34863 +       .value  0x1d9
34864 +       .long   0x77
34865 +       .byte   0x2
34866 +       .byte   0x23
34867 +       .uleb128 0x50
34868 +       .uleb128 0x16
34869 +       .long   .LASF1344
34870 +       .byte   0x1a
34871 +       .value  0x1da
34872 +       .long   0x71f5
34873 +       .byte   0x2
34874 +       .byte   0x23
34875 +       .uleb128 0x54
34876 +       .uleb128 0x16
34877 +       .long   .LASF1345
34878 +       .byte   0x1a
34879 +       .value  0x1dc
34880 +       .long   0x77
34881 +       .byte   0x2
34882 +       .byte   0x23
34883 +       .uleb128 0x58
34884 +       .uleb128 0x16
34885 +       .long   .LASF1346
34886 +       .byte   0x1a
34887 +       .value  0x1dd
34888 +       .long   0x21
34889 +       .byte   0x2
34890 +       .byte   0x23
34891 +       .uleb128 0x5c
34892 +       .uleb128 0x16
34893 +       .long   .LASF1347
34894 +       .byte   0x1a
34895 +       .value  0x1de
34896 +       .long   0x7201
34897 +       .byte   0x2
34898 +       .byte   0x23
34899 +       .uleb128 0x60
34900 +       .uleb128 0x16
34901 +       .long   .LASF1348
34902 +       .byte   0x1a
34903 +       .value  0x1df
34904 +       .long   0x17bc
34905 +       .byte   0x2
34906 +       .byte   0x23
34907 +       .uleb128 0x64
34908 +       .uleb128 0x16
34909 +       .long   .LASF1349
34910 +       .byte   0x1a
34911 +       .value  0x1e0
34912 +       .long   0x4521
34913 +       .byte   0x2
34914 +       .byte   0x23
34915 +       .uleb128 0x6c
34916 +       .uleb128 0x16
34917 +       .long   .LASF1350
34918 +       .byte   0x1a
34919 +       .value  0x1e7
34920 +       .long   0x2f
34921 +       .byte   0x2
34922 +       .byte   0x23
34923 +       .uleb128 0x70
34924 +       .byte   0x0
34925 +       .uleb128 0x4
34926 +       .byte   0x4
34927 +       .long   0x70cd
34928 +       .uleb128 0x21
34929 +       .long   .LASF1351
34930 +       .byte   0x1
34931 +       .uleb128 0x4
34932 +       .byte   0x4
34933 +       .long   0x71ef
34934 +       .uleb128 0x21
34935 +       .long   .LASF1352
34936 +       .byte   0x1
34937 +       .uleb128 0x4
34938 +       .byte   0x4
34939 +       .long   0x71fb
34940 +       .uleb128 0x2b
34941 +       .long   0x7235
34942 +       .byte   0x4
34943 +       .byte   0x1a
34944 +       .value  0x236
34945 +       .uleb128 0x1c
34946 +       .long   .LASF1353
34947 +       .byte   0x1a
34948 +       .value  0x237
34949 +       .long   0x4551
34950 +       .uleb128 0x1c
34951 +       .long   .LASF1354
34952 +       .byte   0x1a
34953 +       .value  0x238
34954 +       .long   0x71e9
34955 +       .uleb128 0x1c
34956 +       .long   .LASF1355
34957 +       .byte   0x1a
34958 +       .value  0x239
34959 +       .long   0x723b
34960 +       .byte   0x0
34961 +       .uleb128 0x21
34962 +       .long   .LASF1356
34963 +       .byte   0x1
34964 +       .uleb128 0x4
34965 +       .byte   0x4
34966 +       .long   0x7235
34967 +       .uleb128 0x15
34968 +       .long   0x738a
34969 +       .long   .LASF1357
34970 +       .byte   0x54
34971 +       .byte   0x1a
34972 +       .value  0x22c
34973 +       .uleb128 0x16
34974 +       .long   .LASF1358
34975 +       .byte   0x1a
34976 +       .value  0x45f
34977 +       .long   0x7ffc
34978 +       .byte   0x2
34979 +       .byte   0x23
34980 +       .uleb128 0x0
34981 +       .uleb128 0x16
34982 +       .long   .LASF1359
34983 +       .byte   0x1a
34984 +       .value  0x460
34985 +       .long   0x801c
34986 +       .byte   0x2
34987 +       .byte   0x23
34988 +       .uleb128 0x4
34989 +       .uleb128 0x16
34990 +       .long   .LASF1360
34991 +       .byte   0x1a
34992 +       .value  0x461
34993 +       .long   0x803c
34994 +       .byte   0x2
34995 +       .byte   0x23
34996 +       .uleb128 0x8
34997 +       .uleb128 0x16
34998 +       .long   .LASF1361
34999 +       .byte   0x1a
35000 +       .value  0x462
35001 +       .long   0x8057
35002 +       .byte   0x2
35003 +       .byte   0x23
35004 +       .uleb128 0xc
35005 +       .uleb128 0x16
35006 +       .long   .LASF1362
35007 +       .byte   0x1a
35008 +       .value  0x463
35009 +       .long   0x8077
35010 +       .byte   0x2
35011 +       .byte   0x23
35012 +       .uleb128 0x10
35013 +       .uleb128 0x16
35014 +       .long   .LASF1363
35015 +       .byte   0x1a
35016 +       .value  0x464
35017 +       .long   0x8097
35018 +       .byte   0x2
35019 +       .byte   0x23
35020 +       .uleb128 0x14
35021 +       .uleb128 0x16
35022 +       .long   .LASF1364
35023 +       .byte   0x1a
35024 +       .value  0x465
35025 +       .long   0x8057
35026 +       .byte   0x2
35027 +       .byte   0x23
35028 +       .uleb128 0x18
35029 +       .uleb128 0x16
35030 +       .long   .LASF1365
35031 +       .byte   0x1a
35032 +       .value  0x466
35033 +       .long   0x80bc
35034 +       .byte   0x2
35035 +       .byte   0x23
35036 +       .uleb128 0x1c
35037 +       .uleb128 0x16
35038 +       .long   .LASF1366
35039 +       .byte   0x1a
35040 +       .value  0x468
35041 +       .long   0x80e1
35042 +       .byte   0x2
35043 +       .byte   0x23
35044 +       .uleb128 0x20
35045 +       .uleb128 0x16
35046 +       .long   .LASF1367
35047 +       .byte   0x1a
35048 +       .value  0x469
35049 +       .long   0x8101
35050 +       .byte   0x2
35051 +       .byte   0x23
35052 +       .uleb128 0x24
35053 +       .uleb128 0x16
35054 +       .long   .LASF1368
35055 +       .byte   0x1a
35056 +       .value  0x46a
35057 +       .long   0x811c
35058 +       .byte   0x2
35059 +       .byte   0x23
35060 +       .uleb128 0x28
35061 +       .uleb128 0x16
35062 +       .long   .LASF1369
35063 +       .byte   0x1a
35064 +       .value  0x46b
35065 +       .long   0x8138
35066 +       .byte   0x2
35067 +       .byte   0x23
35068 +       .uleb128 0x2c
35069 +       .uleb128 0x16
35070 +       .long   .LASF1370
35071 +       .byte   0x1a
35072 +       .value  0x46c
35073 +       .long   0x814a
35074 +       .byte   0x2
35075 +       .byte   0x23
35076 +       .uleb128 0x30
35077 +       .uleb128 0x16
35078 +       .long   .LASF1371
35079 +       .byte   0x1a
35080 +       .value  0x46d
35081 +       .long   0x816a
35082 +       .byte   0x2
35083 +       .byte   0x23
35084 +       .uleb128 0x34
35085 +       .uleb128 0x16
35086 +       .long   .LASF1372
35087 +       .byte   0x1a
35088 +       .value  0x46e
35089 +       .long   0x8185
35090 +       .byte   0x2
35091 +       .byte   0x23
35092 +       .uleb128 0x38
35093 +       .uleb128 0x16
35094 +       .long   .LASF1373
35095 +       .byte   0x1a
35096 +       .value  0x46f
35097 +       .long   0x81ab
35098 +       .byte   0x2
35099 +       .byte   0x23
35100 +       .uleb128 0x3c
35101 +       .uleb128 0x16
35102 +       .long   .LASF1374
35103 +       .byte   0x1a
35104 +       .value  0x470
35105 +       .long   0x81dc
35106 +       .byte   0x2
35107 +       .byte   0x23
35108 +       .uleb128 0x40
35109 +       .uleb128 0x16
35110 +       .long   .LASF1375
35111 +       .byte   0x1a
35112 +       .value  0x471
35113 +       .long   0x8201
35114 +       .byte   0x2
35115 +       .byte   0x23
35116 +       .uleb128 0x44
35117 +       .uleb128 0x16
35118 +       .long   .LASF1376
35119 +       .byte   0x1a
35120 +       .value  0x472
35121 +       .long   0x8221
35122 +       .byte   0x2
35123 +       .byte   0x23
35124 +       .uleb128 0x48
35125 +       .uleb128 0x16
35126 +       .long   .LASF1377
35127 +       .byte   0x1a
35128 +       .value  0x473
35129 +       .long   0x823c
35130 +       .byte   0x2
35131 +       .byte   0x23
35132 +       .uleb128 0x4c
35133 +       .uleb128 0x16
35134 +       .long   .LASF1378
35135 +       .byte   0x1a
35136 +       .value  0x474
35137 +       .long   0x8258
35138 +       .byte   0x2
35139 +       .byte   0x23
35140 +       .uleb128 0x50
35141 +       .byte   0x0
35142 +       .uleb128 0x4
35143 +       .byte   0x4
35144 +       .long   0x7390
35145 +       .uleb128 0x14
35146 +       .long   0x7241
35147 +       .uleb128 0x15
35148 +       .long   0x7538
35149 +       .long   .LASF1379
35150 +       .byte   0x6c
35151 +       .byte   0x1a
35152 +       .value  0x22d
35153 +       .uleb128 0x16
35154 +       .long   .LASF594
35155 +       .byte   0x1a
35156 +       .value  0x441
35157 +       .long   0x4af9
35158 +       .byte   0x2
35159 +       .byte   0x23
35160 +       .uleb128 0x0
35161 +       .uleb128 0x16
35162 +       .long   .LASF1380
35163 +       .byte   0x1a
35164 +       .value  0x442
35165 +       .long   0x7da2
35166 +       .byte   0x2
35167 +       .byte   0x23
35168 +       .uleb128 0x4
35169 +       .uleb128 0x16
35170 +       .long   .LASF1381
35171 +       .byte   0x1a
35172 +       .value  0x443
35173 +       .long   0x7dcd
35174 +       .byte   0x2
35175 +       .byte   0x23
35176 +       .uleb128 0x8
35177 +       .uleb128 0x16
35178 +       .long   .LASF1382
35179 +       .byte   0x1a
35180 +       .value  0x444
35181 +       .long   0x7df2
35182 +       .byte   0x2
35183 +       .byte   0x23
35184 +       .uleb128 0xc
35185 +       .uleb128 0x16
35186 +       .long   .LASF1383
35187 +       .byte   0x1a
35188 +       .value  0x445
35189 +       .long   0x7e17
35190 +       .byte   0x2
35191 +       .byte   0x23
35192 +       .uleb128 0x10
35193 +       .uleb128 0x16
35194 +       .long   .LASF1384
35195 +       .byte   0x1a
35196 +       .value  0x446
35197 +       .long   0x7e17
35198 +       .byte   0x2
35199 +       .byte   0x23
35200 +       .uleb128 0x14
35201 +       .uleb128 0x16
35202 +       .long   .LASF1385
35203 +       .byte   0x1a
35204 +       .value  0x447
35205 +       .long   0x7e37
35206 +       .byte   0x2
35207 +       .byte   0x23
35208 +       .uleb128 0x18
35209 +       .uleb128 0x16
35210 +       .long   .LASF978
35211 +       .byte   0x1a
35212 +       .value  0x448
35213 +       .long   0x7e5e
35214 +       .byte   0x2
35215 +       .byte   0x23
35216 +       .uleb128 0x1c
35217 +       .uleb128 0x16
35218 +       .long   .LASF1386
35219 +       .byte   0x1a
35220 +       .value  0x449
35221 +       .long   0x7cb7
35222 +       .byte   0x2
35223 +       .byte   0x23
35224 +       .uleb128 0x20
35225 +       .uleb128 0x16
35226 +       .long   .LASF1387
35227 +       .byte   0x1a
35228 +       .value  0x44a
35229 +       .long   0x7cd7
35230 +       .byte   0x2
35231 +       .byte   0x23
35232 +       .uleb128 0x24
35233 +       .uleb128 0x16
35234 +       .long   .LASF1388
35235 +       .byte   0x1a
35236 +       .value  0x44b
35237 +       .long   0x7cd7
35238 +       .byte   0x2
35239 +       .byte   0x23
35240 +       .uleb128 0x28
35241 +       .uleb128 0x16
35242 +       .long   .LASF673
35243 +       .byte   0x1a
35244 +       .value  0x44c
35245 +       .long   0x7e79
35246 +       .byte   0x2
35247 +       .byte   0x23
35248 +       .uleb128 0x2c
35249 +       .uleb128 0x16
35250 +       .long   .LASF1160
35251 +       .byte   0x1a
35252 +       .value  0x44d
35253 +       .long   0x7c92
35254 +       .byte   0x2
35255 +       .byte   0x23
35256 +       .uleb128 0x30
35257 +       .uleb128 0x16
35258 +       .long   .LASF1389
35259 +       .byte   0x1a
35260 +       .value  0x44e
35261 +       .long   0x7e94
35262 +       .byte   0x2
35263 +       .byte   0x23
35264 +       .uleb128 0x34
35265 +       .uleb128 0x16
35266 +       .long   .LASF404
35267 +       .byte   0x1a
35268 +       .value  0x44f
35269 +       .long   0x7c92
35270 +       .byte   0x2
35271 +       .byte   0x23
35272 +       .uleb128 0x38
35273 +       .uleb128 0x16
35274 +       .long   .LASF1390
35275 +       .byte   0x1a
35276 +       .value  0x450
35277 +       .long   0x7eb4
35278 +       .byte   0x2
35279 +       .byte   0x23
35280 +       .uleb128 0x3c
35281 +       .uleb128 0x16
35282 +       .long   .LASF1391
35283 +       .byte   0x1a
35284 +       .value  0x451
35285 +       .long   0x7ecf
35286 +       .byte   0x2
35287 +       .byte   0x23
35288 +       .uleb128 0x40
35289 +       .uleb128 0x16
35290 +       .long   .LASF1392
35291 +       .byte   0x1a
35292 +       .value  0x452
35293 +       .long   0x7eef
35294 +       .byte   0x2
35295 +       .byte   0x23
35296 +       .uleb128 0x44
35297 +       .uleb128 0x16
35298 +       .long   .LASF285
35299 +       .byte   0x1a
35300 +       .value  0x453
35301 +       .long   0x7f0f
35302 +       .byte   0x2
35303 +       .byte   0x23
35304 +       .uleb128 0x48
35305 +       .uleb128 0x16
35306 +       .long   .LASF1393
35307 +       .byte   0x1a
35308 +       .value  0x454
35309 +       .long   0x7f39
35310 +       .byte   0x2
35311 +       .byte   0x23
35312 +       .uleb128 0x4c
35313 +       .uleb128 0x16
35314 +       .long   .LASF1394
35315 +       .byte   0x1a
35316 +       .value  0x455
35317 +       .long   0x7f68
35318 +       .byte   0x2
35319 +       .byte   0x23
35320 +       .uleb128 0x50
35321 +       .uleb128 0x16
35322 +       .long   .LASF676
35323 +       .byte   0x1a
35324 +       .value  0x456
35325 +       .long   0x3fc6
35326 +       .byte   0x2
35327 +       .byte   0x23
35328 +       .uleb128 0x54
35329 +       .uleb128 0x16
35330 +       .long   .LASF1395
35331 +       .byte   0x1a
35332 +       .value  0x457
35333 +       .long   0x21c9
35334 +       .byte   0x2
35335 +       .byte   0x23
35336 +       .uleb128 0x58
35337 +       .uleb128 0x16
35338 +       .long   .LASF1396
35339 +       .byte   0x1a
35340 +       .value  0x458
35341 +       .long   0x7f83
35342 +       .byte   0x2
35343 +       .byte   0x23
35344 +       .uleb128 0x5c
35345 +       .uleb128 0x16
35346 +       .long   .LASF1397
35347 +       .byte   0x1a
35348 +       .value  0x459
35349 +       .long   0x7f0f
35350 +       .byte   0x2
35351 +       .byte   0x23
35352 +       .uleb128 0x60
35353 +       .uleb128 0x16
35354 +       .long   .LASF1398
35355 +       .byte   0x1a
35356 +       .value  0x45a
35357 +       .long   0x7fad
35358 +       .byte   0x2
35359 +       .byte   0x23
35360 +       .uleb128 0x64
35361 +       .uleb128 0x16
35362 +       .long   .LASF1399
35363 +       .byte   0x1a
35364 +       .value  0x45b
35365 +       .long   0x7fd7
35366 +       .byte   0x2
35367 +       .byte   0x23
35368 +       .uleb128 0x68
35369 +       .byte   0x0
35370 +       .uleb128 0x4
35371 +       .byte   0x4
35372 +       .long   0x753e
35373 +       .uleb128 0x14
35374 +       .long   0x7395
35375 +       .uleb128 0x15
35376 +       .long   0x7641
35377 +       .long   .LASF1400
35378 +       .byte   0x60
35379 +       .byte   0x1a
35380 +       .value  0x22f
35381 +       .uleb128 0x16
35382 +       .long   .LASF1401
35383 +       .byte   0x1a
35384 +       .value  0x323
35385 +       .long   0x7641
35386 +       .byte   0x2
35387 +       .byte   0x23
35388 +       .uleb128 0x0
35389 +       .uleb128 0x16
35390 +       .long   .LASF1402
35391 +       .byte   0x1a
35392 +       .value  0x324
35393 +       .long   0x17bc
35394 +       .byte   0x2
35395 +       .byte   0x23
35396 +       .uleb128 0x4
35397 +       .uleb128 0x16
35398 +       .long   .LASF1403
35399 +       .byte   0x1a
35400 +       .value  0x325
35401 +       .long   0x17bc
35402 +       .byte   0x2
35403 +       .byte   0x23
35404 +       .uleb128 0xc
35405 +       .uleb128 0x16
35406 +       .long   .LASF1404
35407 +       .byte   0x1a
35408 +       .value  0x326
35409 +       .long   0x7790
35410 +       .byte   0x2
35411 +       .byte   0x23
35412 +       .uleb128 0x14
35413 +       .uleb128 0x16
35414 +       .long   .LASF1405
35415 +       .byte   0x1a
35416 +       .value  0x327
35417 +       .long   0x77
35418 +       .byte   0x2
35419 +       .byte   0x23
35420 +       .uleb128 0x18
35421 +       .uleb128 0x16
35422 +       .long   .LASF1406
35423 +       .byte   0x1a
35424 +       .value  0x328
35425 +       .long   0x18ef
35426 +       .byte   0x2
35427 +       .byte   0x23
35428 +       .uleb128 0x1c
35429 +       .uleb128 0x16
35430 +       .long   .LASF1407
35431 +       .byte   0x1a
35432 +       .value  0x329
35433 +       .long   0x3cfd
35434 +       .byte   0x2
35435 +       .byte   0x23
35436 +       .uleb128 0x28
35437 +       .uleb128 0x16
35438 +       .long   .LASF1408
35439 +       .byte   0x1a
35440 +       .value  0x32a
35441 +       .long   0x112
35442 +       .byte   0x2
35443 +       .byte   0x23
35444 +       .uleb128 0x2c
35445 +       .uleb128 0x16
35446 +       .long   .LASF1409
35447 +       .byte   0x1a
35448 +       .value  0x32b
35449 +       .long   0x112
35450 +       .byte   0x2
35451 +       .byte   0x23
35452 +       .uleb128 0x2d
35453 +       .uleb128 0x16
35454 +       .long   .LASF1410
35455 +       .byte   0x1a
35456 +       .value  0x32c
35457 +       .long   0x1f3
35458 +       .byte   0x2
35459 +       .byte   0x23
35460 +       .uleb128 0x30
35461 +       .uleb128 0x16
35462 +       .long   .LASF1411
35463 +       .byte   0x1a
35464 +       .value  0x32d
35465 +       .long   0x1f3
35466 +       .byte   0x2
35467 +       .byte   0x23
35468 +       .uleb128 0x38
35469 +       .uleb128 0x16
35470 +       .long   .LASF1412
35471 +       .byte   0x1a
35472 +       .value  0x32f
35473 +       .long   0x79c7
35474 +       .byte   0x2
35475 +       .byte   0x23
35476 +       .uleb128 0x40
35477 +       .uleb128 0x16
35478 +       .long   .LASF1413
35479 +       .byte   0x1a
35480 +       .value  0x330
35481 +       .long   0x2f
35482 +       .byte   0x2
35483 +       .byte   0x23
35484 +       .uleb128 0x44
35485 +       .uleb128 0x16
35486 +       .long   .LASF1414
35487 +       .byte   0x1a
35488 +       .value  0x332
35489 +       .long   0x79cd
35490 +       .byte   0x2
35491 +       .byte   0x23
35492 +       .uleb128 0x48
35493 +       .uleb128 0x16
35494 +       .long   .LASF1415
35495 +       .byte   0x1a
35496 +       .value  0x333
35497 +       .long   0x79d3
35498 +       .byte   0x2
35499 +       .byte   0x23
35500 +       .uleb128 0x4c
35501 +       .uleb128 0x16
35502 +       .long   .LASF1416
35503 +       .byte   0x1a
35504 +       .value  0x337
35505 +       .long   0x795b
35506 +       .byte   0x2
35507 +       .byte   0x23
35508 +       .uleb128 0x50
35509 +       .byte   0x0
35510 +       .uleb128 0x4
35511 +       .byte   0x4
35512 +       .long   0x7543
35513 +       .uleb128 0x21
35514 +       .long   .LASF1417
35515 +       .byte   0x1
35516 +       .uleb128 0x4
35517 +       .byte   0x4
35518 +       .long   0x7647
35519 +       .uleb128 0x15
35520 +       .long   0x76bb
35521 +       .long   .LASF1418
35522 +       .byte   0x18
35523 +       .byte   0x1a
35524 +       .value  0x2ad
35525 +       .uleb128 0x16
35526 +       .long   .LASF285
35527 +       .byte   0x1a
35528 +       .value  0x2ae
35529 +       .long   0x16a2
35530 +       .byte   0x2
35531 +       .byte   0x23
35532 +       .uleb128 0x0
35533 +       .uleb128 0x17
35534 +       .string "pid"
35535 +       .byte   0x1a
35536 +       .value  0x2af
35537 +       .long   0x3070
35538 +       .byte   0x2
35539 +       .byte   0x23
35540 +       .uleb128 0x4
35541 +       .uleb128 0x16
35542 +       .long   .LASF739
35543 +       .byte   0x1a
35544 +       .value  0x2b0
35545 +       .long   0x2fc0
35546 +       .byte   0x2
35547 +       .byte   0x23
35548 +       .uleb128 0x8
35549 +       .uleb128 0x17
35550 +       .string "uid"
35551 +       .byte   0x1a
35552 +       .value  0x2b1
35553 +       .long   0x1dd
35554 +       .byte   0x2
35555 +       .byte   0x23
35556 +       .uleb128 0xc
35557 +       .uleb128 0x16
35558 +       .long   .LASF226
35559 +       .byte   0x1a
35560 +       .value  0x2b1
35561 +       .long   0x1dd
35562 +       .byte   0x2
35563 +       .byte   0x23
35564 +       .uleb128 0x10
35565 +       .uleb128 0x16
35566 +       .long   .LASF1419
35567 +       .byte   0x1a
35568 +       .value  0x2b2
35569 +       .long   0x21
35570 +       .byte   0x2
35571 +       .byte   0x23
35572 +       .uleb128 0x14
35573 +       .byte   0x0
35574 +       .uleb128 0x15
35575 +       .long   0x776e
35576 +       .long   .LASF1420
35577 +       .byte   0x2c
35578 +       .byte   0x1a
35579 +       .value  0x2b8
35580 +       .uleb128 0x16
35581 +       .long   .LASF1045
35582 +       .byte   0x1a
35583 +       .value  0x2b9
35584 +       .long   0x2f
35585 +       .byte   0x2
35586 +       .byte   0x23
35587 +       .uleb128 0x0
35588 +       .uleb128 0x16
35589 +       .long   .LASF328
35590 +       .byte   0x1a
35591 +       .value  0x2ba
35592 +       .long   0x2f
35593 +       .byte   0x2
35594 +       .byte   0x23
35595 +       .uleb128 0x4
35596 +       .uleb128 0x16
35597 +       .long   .LASF53
35598 +       .byte   0x1a
35599 +       .value  0x2bb
35600 +       .long   0x2f
35601 +       .byte   0x2
35602 +       .byte   0x23
35603 +       .uleb128 0x8
35604 +       .uleb128 0x16
35605 +       .long   .LASF1421
35606 +       .byte   0x1a
35607 +       .value  0x2bc
35608 +       .long   0x2f
35609 +       .byte   0x2
35610 +       .byte   0x23
35611 +       .uleb128 0xc
35612 +       .uleb128 0x16
35613 +       .long   .LASF1422
35614 +       .byte   0x1a
35615 +       .value  0x2bd
35616 +       .long   0x2f
35617 +       .byte   0x2
35618 +       .byte   0x23
35619 +       .uleb128 0x10
35620 +       .uleb128 0x16
35621 +       .long   .LASF1423
35622 +       .byte   0x1a
35623 +       .value  0x2be
35624 +       .long   0x2f
35625 +       .byte   0x2
35626 +       .byte   0x23
35627 +       .uleb128 0x14
35628 +       .uleb128 0x16
35629 +       .long   .LASF1424
35630 +       .byte   0x1a
35631 +       .value  0x2bf
35632 +       .long   0x2f
35633 +       .byte   0x2
35634 +       .byte   0x23
35635 +       .uleb128 0x18
35636 +       .uleb128 0x16
35637 +       .long   .LASF887
35638 +       .byte   0x1a
35639 +       .value  0x2c0
35640 +       .long   0x2f
35641 +       .byte   0x2
35642 +       .byte   0x23
35643 +       .uleb128 0x1c
35644 +       .uleb128 0x16
35645 +       .long   .LASF1425
35646 +       .byte   0x1a
35647 +       .value  0x2c1
35648 +       .long   0x2f
35649 +       .byte   0x2
35650 +       .byte   0x23
35651 +       .uleb128 0x20
35652 +       .uleb128 0x16
35653 +       .long   .LASF1426
35654 +       .byte   0x1a
35655 +       .value  0x2c2
35656 +       .long   0x2f
35657 +       .byte   0x2
35658 +       .byte   0x23
35659 +       .uleb128 0x24
35660 +       .uleb128 0x16
35661 +       .long   .LASF1427
35662 +       .byte   0x1a
35663 +       .value  0x2c3
35664 +       .long   0x77
35665 +       .byte   0x2
35666 +       .byte   0x23
35667 +       .uleb128 0x28
35668 +       .byte   0x0
35669 +       .uleb128 0x2b
35670 +       .long   0x7790
35671 +       .byte   0x8
35672 +       .byte   0x1a
35673 +       .value  0x2cd
35674 +       .uleb128 0x1c
35675 +       .long   .LASF1428
35676 +       .byte   0x1a
35677 +       .value  0x2ce
35678 +       .long   0x17bc
35679 +       .uleb128 0x1c
35680 +       .long   .LASF1429
35681 +       .byte   0x1a
35682 +       .value  0x2cf
35683 +       .long   0x2ea8
35684 +       .byte   0x0
35685 +       .uleb128 0x1e
35686 +       .long   .LASF1430
35687 +       .byte   0x1a
35688 +       .value  0x30b
35689 +       .long   0x442b
35690 +       .uleb128 0x15
35691 +       .long   0x77e6
35692 +       .long   .LASF1431
35693 +       .byte   0x10
35694 +       .byte   0x1a
35695 +       .value  0x30d
35696 +       .uleb128 0x16
35697 +       .long   .LASF1432
35698 +       .byte   0x1a
35699 +       .value  0x30e
35700 +       .long   0x77f2
35701 +       .byte   0x2
35702 +       .byte   0x23
35703 +       .uleb128 0x0
35704 +       .uleb128 0x16
35705 +       .long   .LASF1433
35706 +       .byte   0x1a
35707 +       .value  0x30f
35708 +       .long   0x77f2
35709 +       .byte   0x2
35710 +       .byte   0x23
35711 +       .uleb128 0x4
35712 +       .uleb128 0x16
35713 +       .long   .LASF1434
35714 +       .byte   0x1a
35715 +       .value  0x310
35716 +       .long   0x7809
35717 +       .byte   0x2
35718 +       .byte   0x23
35719 +       .uleb128 0x8
35720 +       .uleb128 0x16
35721 +       .long   .LASF1435
35722 +       .byte   0x1a
35723 +       .value  0x311
35724 +       .long   0x77f2
35725 +       .byte   0x2
35726 +       .byte   0x23
35727 +       .uleb128 0xc
35728 +       .byte   0x0
35729 +       .uleb128 0x5
35730 +       .long   0x77f2
35731 +       .byte   0x1
35732 +       .uleb128 0x6
35733 +       .long   0x7641
35734 +       .byte   0x0
35735 +       .uleb128 0x4
35736 +       .byte   0x4
35737 +       .long   0x77e6
35738 +       .uleb128 0x5
35739 +       .long   0x7809
35740 +       .byte   0x1
35741 +       .uleb128 0x6
35742 +       .long   0x7641
35743 +       .uleb128 0x6
35744 +       .long   0x7641
35745 +       .byte   0x0
35746 +       .uleb128 0x4
35747 +       .byte   0x4
35748 +       .long   0x77f8
35749 +       .uleb128 0x15
35750 +       .long   0x7895
35751 +       .long   .LASF1436
35752 +       .byte   0x20
35753 +       .byte   0x1a
35754 +       .value  0x314
35755 +       .uleb128 0x16
35756 +       .long   .LASF1437
35757 +       .byte   0x1a
35758 +       .value  0x315
35759 +       .long   0x78aa
35760 +       .byte   0x2
35761 +       .byte   0x23
35762 +       .uleb128 0x0
35763 +       .uleb128 0x16
35764 +       .long   .LASF1438
35765 +       .byte   0x1a
35766 +       .value  0x316
35767 +       .long   0x77f2
35768 +       .byte   0x2
35769 +       .byte   0x23
35770 +       .uleb128 0x4
35771 +       .uleb128 0x16
35772 +       .long   .LASF1439
35773 +       .byte   0x1a
35774 +       .value  0x317
35775 +       .long   0x78ca
35776 +       .byte   0x2
35777 +       .byte   0x23
35778 +       .uleb128 0x8
35779 +       .uleb128 0x16
35780 +       .long   .LASF1434
35781 +       .byte   0x1a
35782 +       .value  0x318
35783 +       .long   0x7809
35784 +       .byte   0x2
35785 +       .byte   0x23
35786 +       .uleb128 0xc
35787 +       .uleb128 0x16
35788 +       .long   .LASF1435
35789 +       .byte   0x1a
35790 +       .value  0x319
35791 +       .long   0x77f2
35792 +       .byte   0x2
35793 +       .byte   0x23
35794 +       .uleb128 0x10
35795 +       .uleb128 0x16
35796 +       .long   .LASF1440
35797 +       .byte   0x1a
35798 +       .value  0x31a
35799 +       .long   0x77f2
35800 +       .byte   0x2
35801 +       .byte   0x23
35802 +       .uleb128 0x14
35803 +       .uleb128 0x16
35804 +       .long   .LASF1441
35805 +       .byte   0x1a
35806 +       .value  0x31b
35807 +       .long   0x78aa
35808 +       .byte   0x2
35809 +       .byte   0x23
35810 +       .uleb128 0x18
35811 +       .uleb128 0x16
35812 +       .long   .LASF1442
35813 +       .byte   0x1a
35814 +       .value  0x31c
35815 +       .long   0x78eb
35816 +       .byte   0x2
35817 +       .byte   0x23
35818 +       .uleb128 0x1c
35819 +       .byte   0x0
35820 +       .uleb128 0x11
35821 +       .long   0x78aa
35822 +       .byte   0x1
35823 +       .long   0x21
35824 +       .uleb128 0x6
35825 +       .long   0x7641
35826 +       .uleb128 0x6
35827 +       .long   0x7641
35828 +       .byte   0x0
35829 +       .uleb128 0x4
35830 +       .byte   0x4
35831 +       .long   0x7895
35832 +       .uleb128 0x11
35833 +       .long   0x78ca
35834 +       .byte   0x1
35835 +       .long   0x21
35836 +       .uleb128 0x6
35837 +       .long   0x7641
35838 +       .uleb128 0x6
35839 +       .long   0x7641
35840 +       .uleb128 0x6
35841 +       .long   0x21
35842 +       .byte   0x0
35843 +       .uleb128 0x4
35844 +       .byte   0x4
35845 +       .long   0x78b0
35846 +       .uleb128 0x11
35847 +       .long   0x78e5
35848 +       .byte   0x1
35849 +       .long   0x21
35850 +       .uleb128 0x6
35851 +       .long   0x78e5
35852 +       .uleb128 0x6
35853 +       .long   0x21
35854 +       .byte   0x0
35855 +       .uleb128 0x4
35856 +       .byte   0x4
35857 +       .long   0x7641
35858 +       .uleb128 0x4
35859 +       .byte   0x4
35860 +       .long   0x78d0
35861 +       .uleb128 0xf
35862 +       .long   0x7928
35863 +       .long   .LASF1443
35864 +       .byte   0x10
35865 +       .byte   0x37
35866 +       .byte   0xd
35867 +       .uleb128 0xa
35868 +       .long   .LASF169
35869 +       .byte   0x37
35870 +       .byte   0xe
35871 +       .long   0x173
35872 +       .byte   0x2
35873 +       .byte   0x23
35874 +       .uleb128 0x0
35875 +       .uleb128 0xa
35876 +       .long   .LASF594
35877 +       .byte   0x37
35878 +       .byte   0xf
35879 +       .long   0x792e
35880 +       .byte   0x2
35881 +       .byte   0x23
35882 +       .uleb128 0x4
35883 +       .uleb128 0xa
35884 +       .long   .LASF509
35885 +       .byte   0x37
35886 +       .byte   0x10
35887 +       .long   0x17bc
35888 +       .byte   0x2
35889 +       .byte   0x23
35890 +       .uleb128 0x8
35891 +       .byte   0x0
35892 +       .uleb128 0x21
35893 +       .long   .LASF1444
35894 +       .byte   0x1
35895 +       .uleb128 0x4
35896 +       .byte   0x4
35897 +       .long   0x7928
35898 +       .uleb128 0xf
35899 +       .long   0x794f
35900 +       .long   .LASF1445
35901 +       .byte   0x4
35902 +       .byte   0x37
35903 +       .byte   0x14
35904 +       .uleb128 0xa
35905 +       .long   .LASF594
35906 +       .byte   0x37
35907 +       .byte   0x15
35908 +       .long   0x7955
35909 +       .byte   0x2
35910 +       .byte   0x23
35911 +       .uleb128 0x0
35912 +       .byte   0x0
35913 +       .uleb128 0x21
35914 +       .long   .LASF1446
35915 +       .byte   0x1
35916 +       .uleb128 0x4
35917 +       .byte   0x4
35918 +       .long   0x794f
35919 +       .uleb128 0x2b
35920 +       .long   0x797d
35921 +       .byte   0x10
35922 +       .byte   0x1a
35923 +       .value  0x334
35924 +       .uleb128 0x1c
35925 +       .long   .LASF1447
35926 +       .byte   0x1a
35927 +       .value  0x335
35928 +       .long   0x78f1
35929 +       .uleb128 0x1c
35930 +       .long   .LASF1448
35931 +       .byte   0x1a
35932 +       .value  0x336
35933 +       .long   0x7934
35934 +       .byte   0x0
35935 +       .uleb128 0x15
35936 +       .long   0x79c7
35937 +       .long   .LASF1449
35938 +       .byte   0x10
35939 +       .byte   0x1a
35940 +       .value  0x32f
35941 +       .uleb128 0x16
35942 +       .long   .LASF1450
35943 +       .byte   0x1a
35944 +       .value  0x36a
35945 +       .long   0x21
35946 +       .byte   0x2
35947 +       .byte   0x23
35948 +       .uleb128 0x0
35949 +       .uleb128 0x16
35950 +       .long   .LASF1451
35951 +       .byte   0x1a
35952 +       .value  0x36b
35953 +       .long   0x21
35954 +       .byte   0x2
35955 +       .byte   0x23
35956 +       .uleb128 0x4
35957 +       .uleb128 0x16
35958 +       .long   .LASF1452
35959 +       .byte   0x1a
35960 +       .value  0x36c
35961 +       .long   0x79c7
35962 +       .byte   0x2
35963 +       .byte   0x23
35964 +       .uleb128 0x8
35965 +       .uleb128 0x16
35966 +       .long   .LASF1453
35967 +       .byte   0x1a
35968 +       .value  0x36d
35969 +       .long   0x3cfd
35970 +       .byte   0x2
35971 +       .byte   0x23
35972 +       .uleb128 0xc
35973 +       .byte   0x0
35974 +       .uleb128 0x4
35975 +       .byte   0x4
35976 +       .long   0x797d
35977 +       .uleb128 0x4
35978 +       .byte   0x4
35979 +       .long   0x779c
35980 +       .uleb128 0x4
35981 +       .byte   0x4
35982 +       .long   0x780f
35983 +       .uleb128 0x15
35984 +       .long   0x7a6e
35985 +       .long   .LASF1454
35986 +       .byte   0x20
35987 +       .byte   0x1a
35988 +       .value  0x393
35989 +       .uleb128 0x16
35990 +       .long   .LASF414
35991 +       .byte   0x1a
35992 +       .value  0x56f
35993 +       .long   0x7f2
35994 +       .byte   0x2
35995 +       .byte   0x23
35996 +       .uleb128 0x0
35997 +       .uleb128 0x16
35998 +       .long   .LASF1455
35999 +       .byte   0x1a
36000 +       .value  0x570
36001 +       .long   0x21
36002 +       .byte   0x2
36003 +       .byte   0x23
36004 +       .uleb128 0x4
36005 +       .uleb128 0x16
36006 +       .long   .LASF1456
36007 +       .byte   0x1a
36008 +       .value  0x572
36009 +       .long   0x841f
36010 +       .byte   0x2
36011 +       .byte   0x23
36012 +       .uleb128 0x8
36013 +       .uleb128 0x16
36014 +       .long   .LASF1457
36015 +       .byte   0x1a
36016 +       .value  0x573
36017 +       .long   0x8280
36018 +       .byte   0x2
36019 +       .byte   0x23
36020 +       .uleb128 0xc
36021 +       .uleb128 0x16
36022 +       .long   .LASF594
36023 +       .byte   0x1a
36024 +       .value  0x574
36025 +       .long   0x4af9
36026 +       .byte   0x2
36027 +       .byte   0x23
36028 +       .uleb128 0x10
36029 +       .uleb128 0x16
36030 +       .long   .LASF307
36031 +       .byte   0x1a
36032 +       .value  0x575
36033 +       .long   0x7a6e
36034 +       .byte   0x2
36035 +       .byte   0x23
36036 +       .uleb128 0x14
36037 +       .uleb128 0x16
36038 +       .long   .LASF1458
36039 +       .byte   0x1a
36040 +       .value  0x576
36041 +       .long   0x17bc
36042 +       .byte   0x2
36043 +       .byte   0x23
36044 +       .uleb128 0x18
36045 +       .uleb128 0x16
36046 +       .long   .LASF1459
36047 +       .byte   0x1a
36048 +       .value  0x577
36049 +       .long   0x161c
36050 +       .byte   0x2
36051 +       .byte   0x23
36052 +       .uleb128 0x20
36053 +       .uleb128 0x16
36054 +       .long   .LASF1460
36055 +       .byte   0x1a
36056 +       .value  0x578
36057 +       .long   0x161c
36058 +       .byte   0x2
36059 +       .byte   0x23
36060 +       .uleb128 0x20
36061 +       .byte   0x0
36062 +       .uleb128 0x4
36063 +       .byte   0x4
36064 +       .long   0x79d9
36065 +       .uleb128 0x15
36066 +       .long   0x7b9f
36067 +       .long   .LASF1461
36068 +       .byte   0x4c
36069 +       .byte   0x1a
36070 +       .value  0x394
36071 +       .uleb128 0x16
36072 +       .long   .LASF1275
36073 +       .byte   0x1a
36074 +       .value  0x48a
36075 +       .long   0x826e
36076 +       .byte   0x2
36077 +       .byte   0x23
36078 +       .uleb128 0x0
36079 +       .uleb128 0x16
36080 +       .long   .LASF1462
36081 +       .byte   0x1a
36082 +       .value  0x48b
36083 +       .long   0x814a
36084 +       .byte   0x2
36085 +       .byte   0x23
36086 +       .uleb128 0x4
36087 +       .uleb128 0x16
36088 +       .long   .LASF1463
36089 +       .byte   0x1a
36090 +       .value  0x48d
36091 +       .long   0x814a
36092 +       .byte   0x2
36093 +       .byte   0x23
36094 +       .uleb128 0x8
36095 +       .uleb128 0x16
36096 +       .long   .LASF1464
36097 +       .byte   0x1a
36098 +       .value  0x48f
36099 +       .long   0x814a
36100 +       .byte   0x2
36101 +       .byte   0x23
36102 +       .uleb128 0xc
36103 +       .uleb128 0x16
36104 +       .long   .LASF1465
36105 +       .byte   0x1a
36106 +       .value  0x490
36107 +       .long   0x6a5d
36108 +       .byte   0x2
36109 +       .byte   0x23
36110 +       .uleb128 0x10
36111 +       .uleb128 0x16
36112 +       .long   .LASF1466
36113 +       .byte   0x1a
36114 +       .value  0x491
36115 +       .long   0x814a
36116 +       .byte   0x2
36117 +       .byte   0x23
36118 +       .uleb128 0x14
36119 +       .uleb128 0x16
36120 +       .long   .LASF1467
36121 +       .byte   0x1a
36122 +       .value  0x492
36123 +       .long   0x814a
36124 +       .byte   0x2
36125 +       .byte   0x23
36126 +       .uleb128 0x18
36127 +       .uleb128 0x16
36128 +       .long   .LASF1468
36129 +       .byte   0x1a
36130 +       .value  0x493
36131 +       .long   0x814a
36132 +       .byte   0x2
36133 +       .byte   0x23
36134 +       .uleb128 0x1c
36135 +       .uleb128 0x16
36136 +       .long   .LASF1469
36137 +       .byte   0x1a
36138 +       .value  0x494
36139 +       .long   0x8280
36140 +       .byte   0x2
36141 +       .byte   0x23
36142 +       .uleb128 0x20
36143 +       .uleb128 0x16
36144 +       .long   .LASF1470
36145 +       .byte   0x1a
36146 +       .value  0x495
36147 +       .long   0x8280
36148 +       .byte   0x2
36149 +       .byte   0x23
36150 +       .uleb128 0x24
36151 +       .uleb128 0x16
36152 +       .long   .LASF1471
36153 +       .byte   0x1a
36154 +       .value  0x496
36155 +       .long   0x696d
36156 +       .byte   0x2
36157 +       .byte   0x23
36158 +       .uleb128 0x28
36159 +       .uleb128 0x16
36160 +       .long   .LASF1472
36161 +       .byte   0x1a
36162 +       .value  0x497
36163 +       .long   0x8280
36164 +       .byte   0x2
36165 +       .byte   0x23
36166 +       .uleb128 0x2c
36167 +       .uleb128 0x16
36168 +       .long   .LASF1473
36169 +       .byte   0x1a
36170 +       .value  0x498
36171 +       .long   0x8280
36172 +       .byte   0x2
36173 +       .byte   0x23
36174 +       .uleb128 0x30
36175 +       .uleb128 0x16
36176 +       .long   .LASF1474
36177 +       .byte   0x1a
36178 +       .value  0x499
36179 +       .long   0x82a7
36180 +       .byte   0x2
36181 +       .byte   0x23
36182 +       .uleb128 0x34
36183 +       .uleb128 0x16
36184 +       .long   .LASF1475
36185 +       .byte   0x1a
36186 +       .value  0x49a
36187 +       .long   0x82c7
36188 +       .byte   0x2
36189 +       .byte   0x23
36190 +       .uleb128 0x38
36191 +       .uleb128 0x16
36192 +       .long   .LASF1476
36193 +       .byte   0x1a
36194 +       .value  0x49b
36195 +       .long   0x814a
36196 +       .byte   0x2
36197 +       .byte   0x23
36198 +       .uleb128 0x3c
36199 +       .uleb128 0x16
36200 +       .long   .LASF1477
36201 +       .byte   0x1a
36202 +       .value  0x49c
36203 +       .long   0x82de
36204 +       .byte   0x2
36205 +       .byte   0x23
36206 +       .uleb128 0x40
36207 +       .uleb128 0x16
36208 +       .long   .LASF1478
36209 +       .byte   0x1a
36210 +       .value  0x49e
36211 +       .long   0x8305
36212 +       .byte   0x2
36213 +       .byte   0x23
36214 +       .uleb128 0x44
36215 +       .uleb128 0x16
36216 +       .long   .LASF1479
36217 +       .byte   0x1a
36218 +       .value  0x49f
36219 +       .long   0x8305
36220 +       .byte   0x2
36221 +       .byte   0x23
36222 +       .uleb128 0x48
36223 +       .byte   0x0
36224 +       .uleb128 0x4
36225 +       .byte   0x4
36226 +       .long   0x7ba5
36227 +       .uleb128 0x14
36228 +       .long   0x7a74
36229 +       .uleb128 0x4
36230 +       .byte   0x4
36231 +       .long   0x698f
36232 +       .uleb128 0x4
36233 +       .byte   0x4
36234 +       .long   0x6afb
36235 +       .uleb128 0x15
36236 +       .long   0x7c1e
36237 +       .long   .LASF1480
36238 +       .byte   0x18
36239 +       .byte   0x1a
36240 +       .value  0x397
36241 +       .uleb128 0x16
36242 +       .long   .LASF1481
36243 +       .byte   0x1a
36244 +       .value  0x556
36245 +       .long   0x8355
36246 +       .byte   0x2
36247 +       .byte   0x23
36248 +       .uleb128 0x0
36249 +       .uleb128 0x16
36250 +       .long   .LASF1482
36251 +       .byte   0x1a
36252 +       .value  0x558
36253 +       .long   0x837a
36254 +       .byte   0x2
36255 +       .byte   0x23
36256 +       .uleb128 0x4
36257 +       .uleb128 0x16
36258 +       .long   .LASF1483
36259 +       .byte   0x1a
36260 +       .value  0x55c
36261 +       .long   0x839a
36262 +       .byte   0x2
36263 +       .byte   0x23
36264 +       .uleb128 0x8
36265 +       .uleb128 0x16
36266 +       .long   .LASF1484
36267 +       .byte   0x1a
36268 +       .value  0x55d
36269 +       .long   0x83b0
36270 +       .byte   0x2
36271 +       .byte   0x23
36272 +       .uleb128 0xc
36273 +       .uleb128 0x16
36274 +       .long   .LASF1485
36275 +       .byte   0x1a
36276 +       .value  0x55e
36277 +       .long   0x83cb
36278 +       .byte   0x2
36279 +       .byte   0x23
36280 +       .uleb128 0x10
36281 +       .uleb128 0x16
36282 +       .long   .LASF1486
36283 +       .byte   0x1a
36284 +       .value  0x564
36285 +       .long   0x83f5
36286 +       .byte   0x2
36287 +       .byte   0x23
36288 +       .uleb128 0x14
36289 +       .byte   0x0
36290 +       .uleb128 0x4
36291 +       .byte   0x4
36292 +       .long   0x7bb6
36293 +       .uleb128 0x21
36294 +       .long   .LASF1487
36295 +       .byte   0x1
36296 +       .uleb128 0x4
36297 +       .byte   0x4
36298 +       .long   0x7c30
36299 +       .uleb128 0x4
36300 +       .byte   0x4
36301 +       .long   0x7c24
36302 +       .uleb128 0x21
36303 +       .long   .LASF1488
36304 +       .byte   0x1
36305 +       .uleb128 0x4
36306 +       .byte   0x4
36307 +       .long   0x7c36
36308 +       .uleb128 0x1e
36309 +       .long   .LASF1489
36310 +       .byte   0x1a
36311 +       .value  0x411
36312 +       .long   0x7c4e
36313 +       .uleb128 0x4
36314 +       .byte   0x4
36315 +       .long   0x7c54
36316 +       .uleb128 0x11
36317 +       .long   0x7c7d
36318 +       .byte   0x1
36319 +       .long   0x21
36320 +       .uleb128 0x6
36321 +       .long   0x160b
36322 +       .uleb128 0x6
36323 +       .long   0x7f2
36324 +       .uleb128 0x6
36325 +       .long   0x21
36326 +       .uleb128 0x6
36327 +       .long   0x1f3
36328 +       .uleb128 0x6
36329 +       .long   0x189
36330 +       .uleb128 0x6
36331 +       .long   0x77
36332 +       .byte   0x0
36333 +       .uleb128 0x11
36334 +       .long   0x7c92
36335 +       .byte   0x1
36336 +       .long   0x21
36337 +       .uleb128 0x6
36338 +       .long   0x3381
36339 +       .uleb128 0x6
36340 +       .long   0x3cfd
36341 +       .byte   0x0
36342 +       .uleb128 0x4
36343 +       .byte   0x4
36344 +       .long   0x7c7d
36345 +       .uleb128 0x11
36346 +       .long   0x7cb7
36347 +       .byte   0x1
36348 +       .long   0x21
36349 +       .uleb128 0x6
36350 +       .long   0x3381
36351 +       .uleb128 0x6
36352 +       .long   0x3cfd
36353 +       .uleb128 0x6
36354 +       .long   0x77
36355 +       .uleb128 0x6
36356 +       .long   0x2f
36357 +       .byte   0x0
36358 +       .uleb128 0x4
36359 +       .byte   0x4
36360 +       .long   0x7c98
36361 +       .uleb128 0x11
36362 +       .long   0x7cd7
36363 +       .byte   0x1
36364 +       .long   0x5a
36365 +       .uleb128 0x6
36366 +       .long   0x3cfd
36367 +       .uleb128 0x6
36368 +       .long   0x77
36369 +       .uleb128 0x6
36370 +       .long   0x2f
36371 +       .byte   0x0
36372 +       .uleb128 0x4
36373 +       .byte   0x4
36374 +       .long   0x7cbd
36375 +       .uleb128 0x2b
36376 +       .long   0x7cff
36377 +       .byte   0x4
36378 +       .byte   0x1a
36379 +       .value  0x42c
36380 +       .uleb128 0x30
36381 +       .string "buf"
36382 +       .byte   0x1a
36383 +       .value  0x42d
36384 +       .long   0xb5
36385 +       .uleb128 0x1c
36386 +       .long   .LASF734
36387 +       .byte   0x1a
36388 +       .value  0x42e
36389 +       .long   0x160b
36390 +       .byte   0x0
36391 +       .uleb128 0x1d
36392 +       .long   0x7d45
36393 +       .byte   0x10
36394 +       .byte   0x1a
36395 +       .value  0x429
36396 +       .uleb128 0x16
36397 +       .long   .LASF1490
36398 +       .byte   0x1a
36399 +       .value  0x42a
36400 +       .long   0x1fe
36401 +       .byte   0x2
36402 +       .byte   0x23
36403 +       .uleb128 0x0
36404 +       .uleb128 0x16
36405 +       .long   .LASF322
36406 +       .byte   0x1a
36407 +       .value  0x42b
36408 +       .long   0x1fe
36409 +       .byte   0x2
36410 +       .byte   0x23
36411 +       .uleb128 0x4
36412 +       .uleb128 0x17
36413 +       .string "arg"
36414 +       .byte   0x1a
36415 +       .value  0x42f
36416 +       .long   0x7cdd
36417 +       .byte   0x2
36418 +       .byte   0x23
36419 +       .uleb128 0x8
36420 +       .uleb128 0x16
36421 +       .long   .LASF1491
36422 +       .byte   0x1a
36423 +       .value  0x430
36424 +       .long   0x21
36425 +       .byte   0x2
36426 +       .byte   0x23
36427 +       .uleb128 0xc
36428 +       .byte   0x0
36429 +       .uleb128 0x1e
36430 +       .long   .LASF1492
36431 +       .byte   0x1a
36432 +       .value  0x431
36433 +       .long   0x7cff
36434 +       .uleb128 0x1e
36435 +       .long   .LASF1493
36436 +       .byte   0x1a
36437 +       .value  0x433
36438 +       .long   0x7d5d
36439 +       .uleb128 0x4
36440 +       .byte   0x4
36441 +       .long   0x7d63
36442 +       .uleb128 0x11
36443 +       .long   0x7d82
36444 +       .byte   0x1
36445 +       .long   0x21
36446 +       .uleb128 0x6
36447 +       .long   0x7d82
36448 +       .uleb128 0x6
36449 +       .long   0x2d82
36450 +       .uleb128 0x6
36451 +       .long   0x2f
36452 +       .uleb128 0x6
36453 +       .long   0x2f
36454 +       .byte   0x0
36455 +       .uleb128 0x4
36456 +       .byte   0x4
36457 +       .long   0x7d45
36458 +       .uleb128 0x11
36459 +       .long   0x7da2
36460 +       .byte   0x1
36461 +       .long   0x1f3
36462 +       .uleb128 0x6
36463 +       .long   0x3cfd
36464 +       .uleb128 0x6
36465 +       .long   0x1f3
36466 +       .uleb128 0x6
36467 +       .long   0x21
36468 +       .byte   0x0
36469 +       .uleb128 0x4
36470 +       .byte   0x4
36471 +       .long   0x7d88
36472 +       .uleb128 0x11
36473 +       .long   0x7dc7
36474 +       .byte   0x1
36475 +       .long   0x209
36476 +       .uleb128 0x6
36477 +       .long   0x3cfd
36478 +       .uleb128 0x6
36479 +       .long   0xb5
36480 +       .uleb128 0x6
36481 +       .long   0x1fe
36482 +       .uleb128 0x6
36483 +       .long   0x7dc7
36484 +       .byte   0x0
36485 +       .uleb128 0x4
36486 +       .byte   0x4
36487 +       .long   0x1f3
36488 +       .uleb128 0x4
36489 +       .byte   0x4
36490 +       .long   0x7da8
36491 +       .uleb128 0x11
36492 +       .long   0x7df2
36493 +       .byte   0x1
36494 +       .long   0x209
36495 +       .uleb128 0x6
36496 +       .long   0x3cfd
36497 +       .uleb128 0x6
36498 +       .long   0x7f2
36499 +       .uleb128 0x6
36500 +       .long   0x1fe
36501 +       .uleb128 0x6
36502 +       .long   0x7dc7
36503 +       .byte   0x0
36504 +       .uleb128 0x4
36505 +       .byte   0x4
36506 +       .long   0x7dd3
36507 +       .uleb128 0x11
36508 +       .long   0x7e17
36509 +       .byte   0x1
36510 +       .long   0x209
36511 +       .uleb128 0x6
36512 +       .long   0x3ddf
36513 +       .uleb128 0x6
36514 +       .long   0x7071
36515 +       .uleb128 0x6
36516 +       .long   0x2f
36517 +       .uleb128 0x6
36518 +       .long   0x1f3
36519 +       .byte   0x0
36520 +       .uleb128 0x4
36521 +       .byte   0x4
36522 +       .long   0x7df8
36523 +       .uleb128 0x11
36524 +       .long   0x7e37
36525 +       .byte   0x1
36526 +       .long   0x21
36527 +       .uleb128 0x6
36528 +       .long   0x3cfd
36529 +       .uleb128 0x6
36530 +       .long   0x160b
36531 +       .uleb128 0x6
36532 +       .long   0x7c42
36533 +       .byte   0x0
36534 +       .uleb128 0x4
36535 +       .byte   0x4
36536 +       .long   0x7e1d
36537 +       .uleb128 0x11
36538 +       .long   0x7e52
36539 +       .byte   0x1
36540 +       .long   0x77
36541 +       .uleb128 0x6
36542 +       .long   0x3cfd
36543 +       .uleb128 0x6
36544 +       .long   0x7e52
36545 +       .byte   0x0
36546 +       .uleb128 0x4
36547 +       .byte   0x4
36548 +       .long   0x7e58
36549 +       .uleb128 0x21
36550 +       .long   .LASF1494
36551 +       .byte   0x1
36552 +       .uleb128 0x4
36553 +       .byte   0x4
36554 +       .long   0x7e3d
36555 +       .uleb128 0x11
36556 +       .long   0x7e79
36557 +       .byte   0x1
36558 +       .long   0x21
36559 +       .uleb128 0x6
36560 +       .long   0x3cfd
36561 +       .uleb128 0x6
36562 +       .long   0x3f9c
36563 +       .byte   0x0
36564 +       .uleb128 0x4
36565 +       .byte   0x4
36566 +       .long   0x7e64
36567 +       .uleb128 0x11
36568 +       .long   0x7e94
36569 +       .byte   0x1
36570 +       .long   0x21
36571 +       .uleb128 0x6
36572 +       .long   0x3cfd
36573 +       .uleb128 0x6
36574 +       .long   0x7790
36575 +       .byte   0x0
36576 +       .uleb128 0x4
36577 +       .byte   0x4
36578 +       .long   0x7e7f
36579 +       .uleb128 0x11
36580 +       .long   0x7eb4
36581 +       .byte   0x1
36582 +       .long   0x21
36583 +       .uleb128 0x6
36584 +       .long   0x3cfd
36585 +       .uleb128 0x6
36586 +       .long   0x28ec
36587 +       .uleb128 0x6
36588 +       .long   0x21
36589 +       .byte   0x0
36590 +       .uleb128 0x4
36591 +       .byte   0x4
36592 +       .long   0x7e9a
36593 +       .uleb128 0x11
36594 +       .long   0x7ecf
36595 +       .byte   0x1
36596 +       .long   0x21
36597 +       .uleb128 0x6
36598 +       .long   0x3ddf
36599 +       .uleb128 0x6
36600 +       .long   0x21
36601 +       .byte   0x0
36602 +       .uleb128 0x4
36603 +       .byte   0x4
36604 +       .long   0x7eba
36605 +       .uleb128 0x11
36606 +       .long   0x7eef
36607 +       .byte   0x1
36608 +       .long   0x21
36609 +       .uleb128 0x6
36610 +       .long   0x21
36611 +       .uleb128 0x6
36612 +       .long   0x3cfd
36613 +       .uleb128 0x6
36614 +       .long   0x21
36615 +       .byte   0x0
36616 +       .uleb128 0x4
36617 +       .byte   0x4
36618 +       .long   0x7ed5
36619 +       .uleb128 0x11
36620 +       .long   0x7f0f
36621 +       .byte   0x1
36622 +       .long   0x21
36623 +       .uleb128 0x6
36624 +       .long   0x3cfd
36625 +       .uleb128 0x6
36626 +       .long   0x21
36627 +       .uleb128 0x6
36628 +       .long   0x7641
36629 +       .byte   0x0
36630 +       .uleb128 0x4
36631 +       .byte   0x4
36632 +       .long   0x7ef5
36633 +       .uleb128 0x11
36634 +       .long   0x7f39
36635 +       .byte   0x1
36636 +       .long   0x209
36637 +       .uleb128 0x6
36638 +       .long   0x3cfd
36639 +       .uleb128 0x6
36640 +       .long   0x7dc7
36641 +       .uleb128 0x6
36642 +       .long   0x1fe
36643 +       .uleb128 0x6
36644 +       .long   0x7d51
36645 +       .uleb128 0x6
36646 +       .long   0x160b
36647 +       .byte   0x0
36648 +       .uleb128 0x4
36649 +       .byte   0x4
36650 +       .long   0x7f15
36651 +       .uleb128 0x11
36652 +       .long   0x7f68
36653 +       .byte   0x1
36654 +       .long   0x209
36655 +       .uleb128 0x6
36656 +       .long   0x3cfd
36657 +       .uleb128 0x6
36658 +       .long   0x2d82
36659 +       .uleb128 0x6
36660 +       .long   0x21
36661 +       .uleb128 0x6
36662 +       .long   0x1fe
36663 +       .uleb128 0x6
36664 +       .long   0x7dc7
36665 +       .uleb128 0x6
36666 +       .long   0x21
36667 +       .byte   0x0
36668 +       .uleb128 0x4
36669 +       .byte   0x4
36670 +       .long   0x7f3f
36671 +       .uleb128 0x11
36672 +       .long   0x7f83
36673 +       .byte   0x1
36674 +       .long   0x21
36675 +       .uleb128 0x6
36676 +       .long   0x3cfd
36677 +       .uleb128 0x6
36678 +       .long   0x2f
36679 +       .byte   0x0
36680 +       .uleb128 0x4
36681 +       .byte   0x4
36682 +       .long   0x7f6e
36683 +       .uleb128 0x11
36684 +       .long   0x7fad
36685 +       .byte   0x1
36686 +       .long   0x209
36687 +       .uleb128 0x6
36688 +       .long   0x4551
36689 +       .uleb128 0x6
36690 +       .long   0x3cfd
36691 +       .uleb128 0x6
36692 +       .long   0x7dc7
36693 +       .uleb128 0x6
36694 +       .long   0x1fe
36695 +       .uleb128 0x6
36696 +       .long   0x77
36697 +       .byte   0x0
36698 +       .uleb128 0x4
36699 +       .byte   0x4
36700 +       .long   0x7f89
36701 +       .uleb128 0x11
36702 +       .long   0x7fd7
36703 +       .byte   0x1
36704 +       .long   0x209
36705 +       .uleb128 0x6
36706 +       .long   0x3cfd
36707 +       .uleb128 0x6
36708 +       .long   0x7dc7
36709 +       .uleb128 0x6
36710 +       .long   0x4551
36711 +       .uleb128 0x6
36712 +       .long   0x1fe
36713 +       .uleb128 0x6
36714 +       .long   0x77
36715 +       .byte   0x0
36716 +       .uleb128 0x4
36717 +       .byte   0x4
36718 +       .long   0x7fb3
36719 +       .uleb128 0x11
36720 +       .long   0x7ffc
36721 +       .byte   0x1
36722 +       .long   0x21
36723 +       .uleb128 0x6
36724 +       .long   0x3381
36725 +       .uleb128 0x6
36726 +       .long   0x28ec
36727 +       .uleb128 0x6
36728 +       .long   0x21
36729 +       .uleb128 0x6
36730 +       .long   0x6108
36731 +       .byte   0x0
36732 +       .uleb128 0x4
36733 +       .byte   0x4
36734 +       .long   0x7fdd
36735 +       .uleb128 0x11
36736 +       .long   0x801c
36737 +       .byte   0x1
36738 +       .long   0x28ec
36739 +       .uleb128 0x6
36740 +       .long   0x3381
36741 +       .uleb128 0x6
36742 +       .long   0x28ec
36743 +       .uleb128 0x6
36744 +       .long   0x6108
36745 +       .byte   0x0
36746 +       .uleb128 0x4
36747 +       .byte   0x4
36748 +       .long   0x8002
36749 +       .uleb128 0x11
36750 +       .long   0x803c
36751 +       .byte   0x1
36752 +       .long   0x21
36753 +       .uleb128 0x6
36754 +       .long   0x28ec
36755 +       .uleb128 0x6
36756 +       .long   0x3381
36757 +       .uleb128 0x6
36758 +       .long   0x28ec
36759 +       .byte   0x0
36760 +       .uleb128 0x4
36761 +       .byte   0x4
36762 +       .long   0x8022
36763 +       .uleb128 0x11
36764 +       .long   0x8057
36765 +       .byte   0x1
36766 +       .long   0x21
36767 +       .uleb128 0x6
36768 +       .long   0x3381
36769 +       .uleb128 0x6
36770 +       .long   0x28ec
36771 +       .byte   0x0
36772 +       .uleb128 0x4
36773 +       .byte   0x4
36774 +       .long   0x8042
36775 +       .uleb128 0x11
36776 +       .long   0x8077
36777 +       .byte   0x1
36778 +       .long   0x21
36779 +       .uleb128 0x6
36780 +       .long   0x3381
36781 +       .uleb128 0x6
36782 +       .long   0x28ec
36783 +       .uleb128 0x6
36784 +       .long   0x7f2
36785 +       .byte   0x0
36786 +       .uleb128 0x4
36787 +       .byte   0x4
36788 +       .long   0x805d
36789 +       .uleb128 0x11
36790 +       .long   0x8097
36791 +       .byte   0x1
36792 +       .long   0x21
36793 +       .uleb128 0x6
36794 +       .long   0x3381
36795 +       .uleb128 0x6
36796 +       .long   0x28ec
36797 +       .uleb128 0x6
36798 +       .long   0x21
36799 +       .byte   0x0
36800 +       .uleb128 0x4
36801 +       .byte   0x4
36802 +       .long   0x807d
36803 +       .uleb128 0x11
36804 +       .long   0x80bc
36805 +       .byte   0x1
36806 +       .long   0x21
36807 +       .uleb128 0x6
36808 +       .long   0x3381
36809 +       .uleb128 0x6
36810 +       .long   0x28ec
36811 +       .uleb128 0x6
36812 +       .long   0x21
36813 +       .uleb128 0x6
36814 +       .long   0x19f
36815 +       .byte   0x0
36816 +       .uleb128 0x4
36817 +       .byte   0x4
36818 +       .long   0x809d
36819 +       .uleb128 0x11
36820 +       .long   0x80e1
36821 +       .byte   0x1
36822 +       .long   0x21
36823 +       .uleb128 0x6
36824 +       .long   0x3381
36825 +       .uleb128 0x6
36826 +       .long   0x28ec
36827 +       .uleb128 0x6
36828 +       .long   0x3381
36829 +       .uleb128 0x6
36830 +       .long   0x28ec
36831 +       .byte   0x0
36832 +       .uleb128 0x4
36833 +       .byte   0x4
36834 +       .long   0x80c2
36835 +       .uleb128 0x11
36836 +       .long   0x8101
36837 +       .byte   0x1
36838 +       .long   0x21
36839 +       .uleb128 0x6
36840 +       .long   0x28ec
36841 +       .uleb128 0x6
36842 +       .long   0xb5
36843 +       .uleb128 0x6
36844 +       .long   0x21
36845 +       .byte   0x0
36846 +       .uleb128 0x4
36847 +       .byte   0x4
36848 +       .long   0x80e7
36849 +       .uleb128 0x11
36850 +       .long   0x811c
36851 +       .byte   0x1
36852 +       .long   0x160b
36853 +       .uleb128 0x6
36854 +       .long   0x28ec
36855 +       .uleb128 0x6
36856 +       .long   0x6108
36857 +       .byte   0x0
36858 +       .uleb128 0x4
36859 +       .byte   0x4
36860 +       .long   0x8107
36861 +       .uleb128 0x5
36862 +       .long   0x8138
36863 +       .byte   0x1
36864 +       .uleb128 0x6
36865 +       .long   0x28ec
36866 +       .uleb128 0x6
36867 +       .long   0x6108
36868 +       .uleb128 0x6
36869 +       .long   0x160b
36870 +       .byte   0x0
36871 +       .uleb128 0x4
36872 +       .byte   0x4
36873 +       .long   0x8122
36874 +       .uleb128 0x5
36875 +       .long   0x814a
36876 +       .byte   0x1
36877 +       .uleb128 0x6
36878 +       .long   0x3381
36879 +       .byte   0x0
36880 +       .uleb128 0x4
36881 +       .byte   0x4
36882 +       .long   0x813e
36883 +       .uleb128 0x11
36884 +       .long   0x816a
36885 +       .byte   0x1
36886 +       .long   0x21
36887 +       .uleb128 0x6
36888 +       .long   0x3381
36889 +       .uleb128 0x6
36890 +       .long   0x21
36891 +       .uleb128 0x6
36892 +       .long   0x6108
36893 +       .byte   0x0
36894 +       .uleb128 0x4
36895 +       .byte   0x4
36896 +       .long   0x8150
36897 +       .uleb128 0x11
36898 +       .long   0x8185
36899 +       .byte   0x1
36900 +       .long   0x21
36901 +       .uleb128 0x6
36902 +       .long   0x28ec
36903 +       .uleb128 0x6
36904 +       .long   0x6aef
36905 +       .byte   0x0
36906 +       .uleb128 0x4
36907 +       .byte   0x4
36908 +       .long   0x8170
36909 +       .uleb128 0x11
36910 +       .long   0x81a5
36911 +       .byte   0x1
36912 +       .long   0x21
36913 +       .uleb128 0x6
36914 +       .long   0x28f8
36915 +       .uleb128 0x6
36916 +       .long   0x28ec
36917 +       .uleb128 0x6
36918 +       .long   0x81a5
36919 +       .byte   0x0
36920 +       .uleb128 0x4
36921 +       .byte   0x4
36922 +       .long   0x460e
36923 +       .uleb128 0x4
36924 +       .byte   0x4
36925 +       .long   0x818b
36926 +       .uleb128 0x11
36927 +       .long   0x81d5
36928 +       .byte   0x1
36929 +       .long   0x21
36930 +       .uleb128 0x6
36931 +       .long   0x28ec
36932 +       .uleb128 0x6
36933 +       .long   0x7f2
36934 +       .uleb128 0x6
36935 +       .long   0x81d5
36936 +       .uleb128 0x6
36937 +       .long   0x1fe
36938 +       .uleb128 0x6
36939 +       .long   0x21
36940 +       .byte   0x0
36941 +       .uleb128 0x4
36942 +       .byte   0x4
36943 +       .long   0x81db
36944 +       .uleb128 0x31
36945 +       .uleb128 0x4
36946 +       .byte   0x4
36947 +       .long   0x81b1
36948 +       .uleb128 0x11
36949 +       .long   0x8201
36950 +       .byte   0x1
36951 +       .long   0x209
36952 +       .uleb128 0x6
36953 +       .long   0x28ec
36954 +       .uleb128 0x6
36955 +       .long   0x7f2
36956 +       .uleb128 0x6
36957 +       .long   0x160b
36958 +       .uleb128 0x6
36959 +       .long   0x1fe
36960 +       .byte   0x0
36961 +       .uleb128 0x4
36962 +       .byte   0x4
36963 +       .long   0x81e2
36964 +       .uleb128 0x11
36965 +       .long   0x8221
36966 +       .byte   0x1
36967 +       .long   0x209
36968 +       .uleb128 0x6
36969 +       .long   0x28ec
36970 +       .uleb128 0x6
36971 +       .long   0xb5
36972 +       .uleb128 0x6
36973 +       .long   0x1fe
36974 +       .byte   0x0
36975 +       .uleb128 0x4
36976 +       .byte   0x4
36977 +       .long   0x8207
36978 +       .uleb128 0x11
36979 +       .long   0x823c
36980 +       .byte   0x1
36981 +       .long   0x21
36982 +       .uleb128 0x6
36983 +       .long   0x28ec
36984 +       .uleb128 0x6
36985 +       .long   0x7f2
36986 +       .byte   0x0
36987 +       .uleb128 0x4
36988 +       .byte   0x4
36989 +       .long   0x8227
36990 +       .uleb128 0x5
36991 +       .long   0x8258
36992 +       .byte   0x1
36993 +       .uleb128 0x6
36994 +       .long   0x3381
36995 +       .uleb128 0x6
36996 +       .long   0x1f3
36997 +       .uleb128 0x6
36998 +       .long   0x1f3
36999 +       .byte   0x0
37000 +       .uleb128 0x4
37001 +       .byte   0x4
37002 +       .long   0x8242
37003 +       .uleb128 0x11
37004 +       .long   0x826e
37005 +       .byte   0x1
37006 +       .long   0x3381
37007 +       .uleb128 0x6
37008 +       .long   0x60d1
37009 +       .byte   0x0
37010 +       .uleb128 0x4
37011 +       .byte   0x4
37012 +       .long   0x825e
37013 +       .uleb128 0x5
37014 +       .long   0x8280
37015 +       .byte   0x1
37016 +       .uleb128 0x6
37017 +       .long   0x60d1
37018 +       .byte   0x0
37019 +       .uleb128 0x4
37020 +       .byte   0x4
37021 +       .long   0x8274
37022 +       .uleb128 0x11
37023 +       .long   0x829b
37024 +       .byte   0x1
37025 +       .long   0x21
37026 +       .uleb128 0x6
37027 +       .long   0x28ec
37028 +       .uleb128 0x6
37029 +       .long   0x829b
37030 +       .byte   0x0
37031 +       .uleb128 0x4
37032 +       .byte   0x4
37033 +       .long   0x82a1
37034 +       .uleb128 0x21
37035 +       .long   .LASF1495
37036 +       .byte   0x1
37037 +       .uleb128 0x4
37038 +       .byte   0x4
37039 +       .long   0x8286
37040 +       .uleb128 0x11
37041 +       .long   0x82c7
37042 +       .byte   0x1
37043 +       .long   0x21
37044 +       .uleb128 0x6
37045 +       .long   0x60d1
37046 +       .uleb128 0x6
37047 +       .long   0x4413
37048 +       .uleb128 0x6
37049 +       .long   0xb5
37050 +       .byte   0x0
37051 +       .uleb128 0x4
37052 +       .byte   0x4
37053 +       .long   0x82ad
37054 +       .uleb128 0x5
37055 +       .long   0x82de
37056 +       .byte   0x1
37057 +       .uleb128 0x6
37058 +       .long   0x28f8
37059 +       .uleb128 0x6
37060 +       .long   0x21
37061 +       .byte   0x0
37062 +       .uleb128 0x4
37063 +       .byte   0x4
37064 +       .long   0x82cd
37065 +       .uleb128 0x11
37066 +       .long   0x82f9
37067 +       .byte   0x1
37068 +       .long   0x21
37069 +       .uleb128 0x6
37070 +       .long   0x82f9
37071 +       .uleb128 0x6
37072 +       .long   0x28f8
37073 +       .byte   0x0
37074 +       .uleb128 0x4
37075 +       .byte   0x4
37076 +       .long   0x82ff
37077 +       .uleb128 0x21
37078 +       .long   .LASF1496
37079 +       .byte   0x1
37080 +       .uleb128 0x4
37081 +       .byte   0x4
37082 +       .long   0x82e4
37083 +       .uleb128 0x11
37084 +       .long   0x8334
37085 +       .byte   0x1
37086 +       .long   0x28ec
37087 +       .uleb128 0x6
37088 +       .long   0x60d1
37089 +       .uleb128 0x6
37090 +       .long   0x8334
37091 +       .uleb128 0x6
37092 +       .long   0x21
37093 +       .uleb128 0x6
37094 +       .long   0x21
37095 +       .uleb128 0x6
37096 +       .long   0x833a
37097 +       .uleb128 0x6
37098 +       .long   0x160b
37099 +       .byte   0x0
37100 +       .uleb128 0x4
37101 +       .byte   0x4
37102 +       .long   0x141
37103 +       .uleb128 0x4
37104 +       .byte   0x4
37105 +       .long   0x8340
37106 +       .uleb128 0x11
37107 +       .long   0x8355
37108 +       .byte   0x1
37109 +       .long   0x21
37110 +       .uleb128 0x6
37111 +       .long   0x160b
37112 +       .uleb128 0x6
37113 +       .long   0x28ec
37114 +       .byte   0x0
37115 +       .uleb128 0x4
37116 +       .byte   0x4
37117 +       .long   0x830b
37118 +       .uleb128 0x11
37119 +       .long   0x837a
37120 +       .byte   0x1
37121 +       .long   0x21
37122 +       .uleb128 0x6
37123 +       .long   0x28ec
37124 +       .uleb128 0x6
37125 +       .long   0x8334
37126 +       .uleb128 0x6
37127 +       .long   0x4413
37128 +       .uleb128 0x6
37129 +       .long   0x21
37130 +       .byte   0x0
37131 +       .uleb128 0x4
37132 +       .byte   0x4
37133 +       .long   0x835b
37134 +       .uleb128 0x11
37135 +       .long   0x839a
37136 +       .byte   0x1
37137 +       .long   0x21
37138 +       .uleb128 0x6
37139 +       .long   0x28ec
37140 +       .uleb128 0x6
37141 +       .long   0xb5
37142 +       .uleb128 0x6
37143 +       .long   0x28ec
37144 +       .byte   0x0
37145 +       .uleb128 0x4
37146 +       .byte   0x4
37147 +       .long   0x8380
37148 +       .uleb128 0x11
37149 +       .long   0x83b0
37150 +       .byte   0x1
37151 +       .long   0x28ec
37152 +       .uleb128 0x6
37153 +       .long   0x28ec
37154 +       .byte   0x0
37155 +       .uleb128 0x4
37156 +       .byte   0x4
37157 +       .long   0x83a0
37158 +       .uleb128 0x11
37159 +       .long   0x83cb
37160 +       .byte   0x1
37161 +       .long   0x28ec
37162 +       .uleb128 0x6
37163 +       .long   0x60d1
37164 +       .uleb128 0x6
37165 +       .long   0x160b
37166 +       .byte   0x0
37167 +       .uleb128 0x4
37168 +       .byte   0x4
37169 +       .long   0x83b6
37170 +       .uleb128 0x11
37171 +       .long   0x83f5
37172 +       .byte   0x1
37173 +       .long   0x28ec
37174 +       .uleb128 0x6
37175 +       .long   0x60d1
37176 +       .uleb128 0x6
37177 +       .long   0x160b
37178 +       .uleb128 0x6
37179 +       .long   0x160b
37180 +       .uleb128 0x6
37181 +       .long   0x833a
37182 +       .uleb128 0x6
37183 +       .long   0x160b
37184 +       .byte   0x0
37185 +       .uleb128 0x4
37186 +       .byte   0x4
37187 +       .long   0x83d1
37188 +       .uleb128 0x11
37189 +       .long   0x841f
37190 +       .byte   0x1
37191 +       .long   0x21
37192 +       .uleb128 0x6
37193 +       .long   0x7a6e
37194 +       .uleb128 0x6
37195 +       .long   0x21
37196 +       .uleb128 0x6
37197 +       .long   0x7f2
37198 +       .uleb128 0x6
37199 +       .long   0x160b
37200 +       .uleb128 0x6
37201 +       .long   0x28f8
37202 +       .byte   0x0
37203 +       .uleb128 0x4
37204 +       .byte   0x4
37205 +       .long   0x83fb
37206 +       .uleb128 0x7
37207 +       .long   .LASF889
37208 +       .byte   0x3e
37209 +       .byte   0x19
37210 +       .long   0x8430
37211 +       .uleb128 0x11
37212 +       .long   0x8445
37213 +       .byte   0x1
37214 +       .long   0x21
37215 +       .uleb128 0x6
37216 +       .long   0x160b
37217 +       .uleb128 0x6
37218 +       .long   0x21
37219 +       .byte   0x0
37220 +       .uleb128 0x4
37221 +       .byte   0x4
37222 +       .long   0x8425
37223 +       .uleb128 0x5
37224 +       .long   0x845c
37225 +       .byte   0x1
37226 +       .uleb128 0x6
37227 +       .long   0x4521
37228 +       .uleb128 0x6
37229 +       .long   0x2d82
37230 +       .byte   0x0
37231 +       .uleb128 0x4
37232 +       .byte   0x4
37233 +       .long   0x844b
37234 +       .uleb128 0x9
37235 +       .long   0x8487
37236 +       .byte   0x4
37237 +       .byte   0x19
37238 +       .byte   0x1b
37239 +       .uleb128 0xa
37240 +       .long   .LASF1497
37241 +       .byte   0x19
37242 +       .byte   0x1c
37243 +       .long   0x53
37244 +       .byte   0x2
37245 +       .byte   0x23
37246 +       .uleb128 0x0
37247 +       .uleb128 0xa
37248 +       .long   .LASF1498
37249 +       .byte   0x19
37250 +       .byte   0x1d
37251 +       .long   0x53
37252 +       .byte   0x2
37253 +       .byte   0x23
37254 +       .uleb128 0x2
37255 +       .byte   0x0
37256 +       .uleb128 0xc
37257 +       .long   0x84a0
37258 +       .byte   0x4
37259 +       .byte   0x19
37260 +       .byte   0x16
37261 +       .uleb128 0xe
37262 +       .long   .LASF1499
37263 +       .byte   0x19
37264 +       .byte   0x17
37265 +       .long   0x16c4
37266 +       .uleb128 0xd
37267 +       .long   0x8462
37268 +       .byte   0x0
37269 +       .uleb128 0x9
37270 +       .long   0x84c5
37271 +       .byte   0x8
37272 +       .byte   0x19
37273 +       .byte   0x21
37274 +       .uleb128 0xa
37275 +       .long   .LASF315
37276 +       .byte   0x19
37277 +       .byte   0x22
37278 +       .long   0x2f
37279 +       .byte   0x2
37280 +       .byte   0x23
37281 +       .uleb128 0x0
37282 +       .uleb128 0xa
37283 +       .long   .LASF1500
37284 +       .byte   0x19
37285 +       .byte   0x29
37286 +       .long   0x6e96
37287 +       .byte   0x2
37288 +       .byte   0x23
37289 +       .uleb128 0x4
37290 +       .byte   0x0
37291 +       .uleb128 0x9
37292 +       .long   0x84ea
37293 +       .byte   0x8
37294 +       .byte   0x19
37295 +       .byte   0x34
37296 +       .uleb128 0xa
37297 +       .long   .LASF1501
37298 +       .byte   0x19
37299 +       .byte   0x35
37300 +       .long   0x84ea
37301 +       .byte   0x2
37302 +       .byte   0x23
37303 +       .uleb128 0x0
37304 +       .uleb128 0xa
37305 +       .long   .LASF1502
37306 +       .byte   0x19
37307 +       .byte   0x36
37308 +       .long   0x2ea2
37309 +       .byte   0x2
37310 +       .byte   0x23
37311 +       .uleb128 0x4
37312 +       .byte   0x0
37313 +       .uleb128 0x4
37314 +       .byte   0x4
37315 +       .long   0x160b
37316 +       .uleb128 0x9
37317 +       .long   0x8507
37318 +       .byte   0x4
37319 +       .byte   0x19
37320 +       .byte   0x38
37321 +       .uleb128 0xa
37322 +       .long   .LASF1503
37323 +       .byte   0x19
37324 +       .byte   0x39
37325 +       .long   0x2d82
37326 +       .byte   0x2
37327 +       .byte   0x23
37328 +       .uleb128 0x0
37329 +       .byte   0x0
37330 +       .uleb128 0xc
37331 +       .long   0x852a
37332 +       .byte   0x8
37333 +       .byte   0x19
37334 +       .byte   0x20
37335 +       .uleb128 0xd
37336 +       .long   0x84a0
37337 +       .uleb128 0x26
37338 +       .string "ptl"
37339 +       .byte   0x19
37340 +       .byte   0x32
37341 +       .long   0x1680
37342 +       .uleb128 0xd
37343 +       .long   0x84c5
37344 +       .uleb128 0xd
37345 +       .long   0x84f0
37346 +       .byte   0x0
37347 +       .uleb128 0xc
37348 +       .long   0x8549
37349 +       .byte   0x4
37350 +       .byte   0x19
37351 +       .byte   0x3c
37352 +       .uleb128 0xe
37353 +       .long   .LASF746
37354 +       .byte   0x19
37355 +       .byte   0x3d
37356 +       .long   0x2f
37357 +       .uleb128 0xe
37358 +       .long   .LASF1504
37359 +       .byte   0x19
37360 +       .byte   0x3e
37361 +       .long   0x160b
37362 +       .byte   0x0
37363 +       .uleb128 0x9
37364 +       .long   0x857c
37365 +       .byte   0x10
37366 +       .byte   0x15
37367 +       .byte   0x51
37368 +       .uleb128 0xa
37369 +       .long   .LASF509
37370 +       .byte   0x15
37371 +       .byte   0x52
37372 +       .long   0x17bc
37373 +       .byte   0x2
37374 +       .byte   0x23
37375 +       .uleb128 0x0
37376 +       .uleb128 0xa
37377 +       .long   .LASF205
37378 +       .byte   0x15
37379 +       .byte   0x53
37380 +       .long   0x160b
37381 +       .byte   0x2
37382 +       .byte   0x23
37383 +       .uleb128 0x8
37384 +       .uleb128 0xa
37385 +       .long   .LASF600
37386 +       .byte   0x15
37387 +       .byte   0x54
37388 +       .long   0x3f9c
37389 +       .byte   0x2
37390 +       .byte   0x23
37391 +       .uleb128 0xc
37392 +       .byte   0x0
37393 +       .uleb128 0xc
37394 +       .long   0x859b
37395 +       .byte   0x10
37396 +       .byte   0x15
37397 +       .byte   0x50
37398 +       .uleb128 0xe
37399 +       .long   .LASF1505
37400 +       .byte   0x15
37401 +       .byte   0x55
37402 +       .long   0x8549
37403 +       .uleb128 0xe
37404 +       .long   .LASF1100
37405 +       .byte   0x15
37406 +       .byte   0x57
37407 +       .long   0x5ced
37408 +       .byte   0x0
37409 +       .uleb128 0x21
37410 +       .long   .LASF833
37411 +       .byte   0x1
37412 +       .uleb128 0x4
37413 +       .byte   0x4
37414 +       .long   0x859b
37415 +       .uleb128 0xf
37416 +       .long   0x8608
37417 +       .long   .LASF1506
37418 +       .byte   0x18
37419 +       .byte   0x15
37420 +       .byte   0x64
37421 +       .uleb128 0xa
37422 +       .long   .LASF1160
37423 +       .byte   0x15
37424 +       .byte   0xca
37425 +       .long   0x861a
37426 +       .byte   0x2
37427 +       .byte   0x23
37428 +       .uleb128 0x0
37429 +       .uleb128 0xa
37430 +       .long   .LASF1507
37431 +       .byte   0x15
37432 +       .byte   0xcb
37433 +       .long   0x861a
37434 +       .byte   0x2
37435 +       .byte   0x23
37436 +       .uleb128 0x4
37437 +       .uleb128 0xa
37438 +       .long   .LASF1508
37439 +       .byte   0x15
37440 +       .byte   0xcc
37441 +       .long   0x863a
37442 +       .byte   0x2
37443 +       .byte   0x23
37444 +       .uleb128 0x8
37445 +       .uleb128 0xa
37446 +       .long   .LASF1509
37447 +       .byte   0x15
37448 +       .byte   0xcd
37449 +       .long   0x8655
37450 +       .byte   0x2
37451 +       .byte   0x23
37452 +       .uleb128 0xc
37453 +       .uleb128 0xa
37454 +       .long   .LASF1510
37455 +       .byte   0x15
37456 +       .byte   0xce
37457 +       .long   0x8684
37458 +       .byte   0x2
37459 +       .byte   0x23
37460 +       .uleb128 0x10
37461 +       .uleb128 0xa
37462 +       .long   .LASF1511
37463 +       .byte   0x15
37464 +       .byte   0xd2
37465 +       .long   0x869f
37466 +       .byte   0x2
37467 +       .byte   0x23
37468 +       .uleb128 0x14
37469 +       .byte   0x0
37470 +       .uleb128 0x4
37471 +       .byte   0x4
37472 +       .long   0x85a7
37473 +       .uleb128 0x5
37474 +       .long   0x861a
37475 +       .byte   0x1
37476 +       .uleb128 0x6
37477 +       .long   0x3f9c
37478 +       .byte   0x0
37479 +       .uleb128 0x4
37480 +       .byte   0x4
37481 +       .long   0x860e
37482 +       .uleb128 0x11
37483 +       .long   0x863a
37484 +       .byte   0x1
37485 +       .long   0x2d82
37486 +       .uleb128 0x6
37487 +       .long   0x3f9c
37488 +       .uleb128 0x6
37489 +       .long   0x2f
37490 +       .uleb128 0x6
37491 +       .long   0x4413
37492 +       .byte   0x0
37493 +       .uleb128 0x4
37494 +       .byte   0x4
37495 +       .long   0x8620
37496 +       .uleb128 0x11
37497 +       .long   0x8655
37498 +       .byte   0x1
37499 +       .long   0x2f
37500 +       .uleb128 0x6
37501 +       .long   0x3f9c
37502 +       .uleb128 0x6
37503 +       .long   0x2f
37504 +       .byte   0x0
37505 +       .uleb128 0x4
37506 +       .byte   0x4
37507 +       .long   0x8640
37508 +       .uleb128 0x11
37509 +       .long   0x8684
37510 +       .byte   0x1
37511 +       .long   0x21
37512 +       .uleb128 0x6
37513 +       .long   0x3f9c
37514 +       .uleb128 0x6
37515 +       .long   0x2f
37516 +       .uleb128 0x6
37517 +       .long   0x2f
37518 +       .uleb128 0x6
37519 +       .long   0x36e
37520 +       .uleb128 0x6
37521 +       .long   0x2f
37522 +       .uleb128 0x6
37523 +       .long   0x21
37524 +       .byte   0x0
37525 +       .uleb128 0x4
37526 +       .byte   0x4
37527 +       .long   0x865b
37528 +       .uleb128 0x11
37529 +       .long   0x869f
37530 +       .byte   0x1
37531 +       .long   0x21
37532 +       .uleb128 0x6
37533 +       .long   0x3f9c
37534 +       .uleb128 0x6
37535 +       .long   0x2d82
37536 +       .byte   0x0
37537 +       .uleb128 0x4
37538 +       .byte   0x4
37539 +       .long   0x868a
37540 +       .uleb128 0xf
37541 +       .long   0x86c0
37542 +       .long   .LASF1512
37543 +       .byte   0x7c
37544 +       .byte   0x6e
37545 +       .byte   0x36
37546 +       .uleb128 0xa
37547 +       .long   .LASF367
37548 +       .byte   0x6e
37549 +       .byte   0x37
37550 +       .long   0x86c0
37551 +       .byte   0x2
37552 +       .byte   0x23
37553 +       .uleb128 0x0
37554 +       .byte   0x0
37555 +       .uleb128 0x12
37556 +       .long   0x86d0
37557 +       .long   0x2f
37558 +       .uleb128 0x13
37559 +       .long   0x28
37560 +       .byte   0x1e
37561 +       .byte   0x0
37562 +       .uleb128 0xf
37563 +       .long   0x8723
37564 +       .long   .LASF367
37565 +       .byte   0x18
37566 +       .byte   0x57
37567 +       .byte   0x1d
37568 +       .uleb128 0xa
37569 +       .long   .LASF1360
37570 +       .byte   0x57
37571 +       .byte   0x1e
37572 +       .long   0x17bc
37573 +       .byte   0x2
37574 +       .byte   0x23
37575 +       .uleb128 0x0
37576 +       .uleb128 0xa
37577 +       .long   .LASF1513
37578 +       .byte   0x57
37579 +       .byte   0x1f
37580 +       .long   0x160b
37581 +       .byte   0x2
37582 +       .byte   0x23
37583 +       .uleb128 0x8
37584 +       .uleb128 0xa
37585 +       .long   .LASF322
37586 +       .byte   0x57
37587 +       .byte   0x20
37588 +       .long   0x77
37589 +       .byte   0x2
37590 +       .byte   0x23
37591 +       .uleb128 0xc
37592 +       .uleb128 0xa
37593 +       .long   .LASF1514
37594 +       .byte   0x57
37595 +       .byte   0x21
37596 +       .long   0x77
37597 +       .byte   0x2
37598 +       .byte   0x23
37599 +       .uleb128 0x10
37600 +       .uleb128 0xa
37601 +       .long   .LASF161
37602 +       .byte   0x57
37603 +       .byte   0x22
37604 +       .long   0x15f9
37605 +       .byte   0x2
37606 +       .byte   0x23
37607 +       .uleb128 0x14
37608 +       .byte   0x0
37609 +       .uleb128 0x1a
37610 +       .long   0x873f
37611 +       .long   .LASF1515
37612 +       .value  0x200
37613 +       .byte   0x1
37614 +       .byte   0x3d
37615 +       .uleb128 0xb
37616 +       .string "vec"
37617 +       .byte   0x1
37618 +       .byte   0x3e
37619 +       .long   0x873f
37620 +       .byte   0x2
37621 +       .byte   0x23
37622 +       .uleb128 0x0
37623 +       .byte   0x0
37624 +       .uleb128 0x12
37625 +       .long   0x874f
37626 +       .long   0x17bc
37627 +       .uleb128 0x13
37628 +       .long   0x28
37629 +       .byte   0x3f
37630 +       .byte   0x0
37631 +       .uleb128 0x7
37632 +       .long   .LASF1516
37633 +       .byte   0x1
37634 +       .byte   0x3f
37635 +       .long   0x8723
37636 +       .uleb128 0x1a
37637 +       .long   0x8776
37638 +       .long   .LASF1517
37639 +       .value  0x800
37640 +       .byte   0x1
37641 +       .byte   0x41
37642 +       .uleb128 0xb
37643 +       .string "vec"
37644 +       .byte   0x1
37645 +       .byte   0x42
37646 +       .long   0x8776
37647 +       .byte   0x2
37648 +       .byte   0x23
37649 +       .uleb128 0x0
37650 +       .byte   0x0
37651 +       .uleb128 0x12
37652 +       .long   0x8786
37653 +       .long   0x17bc
37654 +       .uleb128 0x13
37655 +       .long   0x28
37656 +       .byte   0xff
37657 +       .byte   0x0
37658 +       .uleb128 0x7
37659 +       .long   .LASF1518
37660 +       .byte   0x1
37661 +       .byte   0x43
37662 +       .long   0x875a
37663 +       .uleb128 0x4
37664 +       .byte   0x4
37665 +       .long   0x3728
37666 +       .uleb128 0x7
37667 +       .long   .LASF1519
37668 +       .byte   0x1
37669 +       .byte   0x50
37670 +       .long   0x378d
37671 +       .uleb128 0x15
37672 +       .long   0x87eb
37673 +       .long   .LASF1520
37674 +       .byte   0x10
37675 +       .byte   0x1
37676 +       .value  0x3a6
37677 +       .uleb128 0x17
37678 +       .string "pc"
37679 +       .byte   0x1
37680 +       .value  0x3a7
37681 +       .long   0x2f
37682 +       .byte   0x2
37683 +       .byte   0x23
37684 +       .uleb128 0x0
37685 +       .uleb128 0x16
37686 +       .long   .LASF1521
37687 +       .byte   0x1
37688 +       .value  0x3a8
37689 +       .long   0x2f
37690 +       .byte   0x2
37691 +       .byte   0x23
37692 +       .uleb128 0x4
37693 +       .uleb128 0x16
37694 +       .long   .LASF322
37695 +       .byte   0x1
37696 +       .value  0x3a9
37697 +       .long   0x77
37698 +       .byte   0x2
37699 +       .byte   0x23
37700 +       .uleb128 0x8
37701 +       .uleb128 0x16
37702 +       .long   .LASF1522
37703 +       .byte   0x1
37704 +       .value  0x3aa
37705 +       .long   0x77
37706 +       .byte   0x2
37707 +       .byte   0x23
37708 +       .uleb128 0xc
37709 +       .byte   0x0
37710 +       .uleb128 0x32
37711 +       .long   .LASF1548
37712 +       .byte   0x6
37713 +       .byte   0x23
37714 +       .byte   0x1
37715 +       .byte   0x3
37716 +       .uleb128 0x33
37717 +       .long   0x8823
37718 +       .long   .LASF1523
37719 +       .byte   0x2
37720 +       .byte   0x2e
37721 +       .byte   0x1
37722 +       .byte   0x3
37723 +       .uleb128 0x34
37724 +       .string "new"
37725 +       .byte   0x2
37726 +       .byte   0x2b
37727 +       .long   0x17e5
37728 +       .uleb128 0x35
37729 +       .long   .LASF308
37730 +       .byte   0x2
37731 +       .byte   0x2c
37732 +       .long   0x17e5
37733 +       .uleb128 0x35
37734 +       .long   .LASF307
37735 +       .byte   0x2
37736 +       .byte   0x2d
37737 +       .long   0x17e5
37738 +       .byte   0x0
37739 +       .uleb128 0x33
37740 +       .long   0x8847
37741 +       .long   .LASF1524
37742 +       .byte   0x2
37743 +       .byte   0xd9
37744 +       .byte   0x1
37745 +       .byte   0x3
37746 +       .uleb128 0x34
37747 +       .string "old"
37748 +       .byte   0x2
37749 +       .byte   0xd7
37750 +       .long   0x17e5
37751 +       .uleb128 0x34
37752 +       .string "new"
37753 +       .byte   0x2
37754 +       .byte   0xd8
37755 +       .long   0x17e5
37756 +       .byte   0x0
37757 +       .uleb128 0x33
37758 +       .long   0x8860
37759 +       .long   .LASF1525
37760 +       .byte   0x2
37761 +       .byte   0x1f
37762 +       .byte   0x1
37763 +       .byte   0x3
37764 +       .uleb128 0x35
37765 +       .long   .LASF509
37766 +       .byte   0x2
37767 +       .byte   0x1e
37768 +       .long   0x17e5
37769 +       .byte   0x0
37770 +       .uleb128 0x36
37771 +       .long   0x887d
37772 +       .long   .LASF1526
37773 +       .byte   0x1
37774 +       .byte   0x5f
37775 +       .byte   0x1
37776 +       .long   0x77
37777 +       .byte   0x3
37778 +       .uleb128 0x35
37779 +       .long   .LASF735
37780 +       .byte   0x1
37781 +       .byte   0x5e
37782 +       .long   0x887d
37783 +       .byte   0x0
37784 +       .uleb128 0x4
37785 +       .byte   0x4
37786 +       .long   0x8797
37787 +       .uleb128 0x33
37788 +       .long   0x88a8
37789 +       .long   .LASF1527
37790 +       .byte   0x2
37791 +       .byte   0x55
37792 +       .byte   0x1
37793 +       .byte   0x3
37794 +       .uleb128 0x34
37795 +       .string "new"
37796 +       .byte   0x2
37797 +       .byte   0x54
37798 +       .long   0x17e5
37799 +       .uleb128 0x35
37800 +       .long   .LASF600
37801 +       .byte   0x2
37802 +       .byte   0x54
37803 +       .long   0x17e5
37804 +       .uleb128 0x37
37805 +       .byte   0x0
37806 +       .uleb128 0x33
37807 +       .long   0x88cc
37808 +       .long   .LASF1528
37809 +       .byte   0x2
37810 +       .byte   0x9c
37811 +       .byte   0x1
37812 +       .byte   0x3
37813 +       .uleb128 0x35
37814 +       .long   .LASF308
37815 +       .byte   0x2
37816 +       .byte   0x9b
37817 +       .long   0x17e5
37818 +       .uleb128 0x35
37819 +       .long   .LASF307
37820 +       .byte   0x2
37821 +       .byte   0x9b
37822 +       .long   0x17e5
37823 +       .byte   0x0
37824 +       .uleb128 0x33
37825 +       .long   0x88f2
37826 +       .long   .LASF1529
37827 +       .byte   0x2
37828 +       .byte   0xe2
37829 +       .byte   0x1
37830 +       .byte   0x3
37831 +       .uleb128 0x34
37832 +       .string "old"
37833 +       .byte   0x2
37834 +       .byte   0xe0
37835 +       .long   0x17e5
37836 +       .uleb128 0x34
37837 +       .string "new"
37838 +       .byte   0x2
37839 +       .byte   0xe1
37840 +       .long   0x17e5
37841 +       .uleb128 0x37
37842 +       .uleb128 0x37
37843 +       .byte   0x0
37844 +       .uleb128 0x36
37845 +       .long   0x890f
37846 +       .long   .LASF1530
37847 +       .byte   0x1
37848 +       .byte   0x64
37849 +       .byte   0x1
37850 +       .long   0x887d
37851 +       .byte   0x3
37852 +       .uleb128 0x35
37853 +       .long   .LASF735
37854 +       .byte   0x1
37855 +       .byte   0x63
37856 +       .long   0x887d
37857 +       .byte   0x0
37858 +       .uleb128 0x36
37859 +       .long   0x8936
37860 +       .long   .LASF1531
37861 +       .byte   0xc
37862 +       .byte   0xf5
37863 +       .byte   0x1
37864 +       .long   0x21
37865 +       .byte   0x3
37866 +       .uleb128 0x34
37867 +       .string "nr"
37868 +       .byte   0xc
37869 +       .byte   0xf4
37870 +       .long   0x21
37871 +       .uleb128 0x35
37872 +       .long   .LASF1532
37873 +       .byte   0xc
37874 +       .byte   0xf4
37875 +       .long   0x8936
37876 +       .byte   0x0
37877 +       .uleb128 0x4
37878 +       .byte   0x4
37879 +       .long   0x893c
37880 +       .uleb128 0x14
37881 +       .long   0x8941
37882 +       .uleb128 0x2d
37883 +       .long   0x2f
37884 +       .uleb128 0x36
37885 +       .long   0x8978
37886 +       .long   .LASF1533
37887 +       .byte   0xc
37888 +       .byte   0xfa
37889 +       .byte   0x1
37890 +       .long   0x21
37891 +       .byte   0x3
37892 +       .uleb128 0x34
37893 +       .string "nr"
37894 +       .byte   0xc
37895 +       .byte   0xf9
37896 +       .long   0x21
37897 +       .uleb128 0x35
37898 +       .long   .LASF1532
37899 +       .byte   0xc
37900 +       .byte   0xf9
37901 +       .long   0x8936
37902 +       .uleb128 0x38
37903 +       .long   .LASF1538
37904 +       .byte   0xc
37905 +       .byte   0xfb
37906 +       .long   0x21
37907 +       .byte   0x0
37908 +       .uleb128 0x36
37909 +       .long   0x89a9
37910 +       .long   .LASF1534
37911 +       .byte   0xf
37912 +       .byte   0x40
37913 +       .byte   0x1
37914 +       .long   0x21
37915 +       .byte   0x3
37916 +       .uleb128 0x34
37917 +       .string "ti"
37918 +       .byte   0xf
37919 +       .byte   0x3f
37920 +       .long   0x2dd9
37921 +       .uleb128 0x35
37922 +       .long   .LASF1535
37923 +       .byte   0xf
37924 +       .byte   0x3f
37925 +       .long   0x21
37926 +       .uleb128 0x37
37927 +       .uleb128 0x39
37928 +       .uleb128 0x39
37929 +       .uleb128 0x3a
37930 +       .long   0x896c
37931 +       .byte   0x0
37932 +       .byte   0x0
37933 +       .byte   0x0
37934 +       .uleb128 0x3b
37935 +       .long   0x89e0
37936 +       .long   .LASF1536
37937 +       .byte   0xb
37938 +       .value  0x620
37939 +       .byte   0x1
37940 +       .long   0x21
37941 +       .byte   0x3
37942 +       .uleb128 0x3c
37943 +       .string "tsk"
37944 +       .byte   0xb
37945 +       .value  0x61f
37946 +       .long   0x15f9
37947 +       .uleb128 0x3d
37948 +       .long   .LASF1535
37949 +       .byte   0xb
37950 +       .value  0x61f
37951 +       .long   0x21
37952 +       .uleb128 0x39
37953 +       .uleb128 0x37
37954 +       .uleb128 0x39
37955 +       .uleb128 0x39
37956 +       .uleb128 0x3a
37957 +       .long   0x896c
37958 +       .byte   0x0
37959 +       .byte   0x0
37960 +       .byte   0x0
37961 +       .byte   0x0
37962 +       .uleb128 0x36
37963 +       .long   0x8a1a
37964 +       .long   .LASF1537
37965 +       .byte   0x3
37966 +       .byte   0x1d
37967 +       .byte   0x1
37968 +       .long   0x160b
37969 +       .byte   0x3
37970 +       .uleb128 0x35
37971 +       .long   .LASF328
37972 +       .byte   0x3
37973 +       .byte   0x1c
37974 +       .long   0x1fe
37975 +       .uleb128 0x35
37976 +       .long   .LASF53
37977 +       .byte   0x3
37978 +       .byte   0x1c
37979 +       .long   0x240
37980 +       .uleb128 0x3e
37981 +       .long   .LASF1596
37982 +       .byte   0x3
37983 +       .byte   0x2b
37984 +       .uleb128 0x39
37985 +       .uleb128 0x3f
37986 +       .string "i"
37987 +       .byte   0x3
37988 +       .byte   0x1f
37989 +       .long   0x21
37990 +       .byte   0x0
37991 +       .byte   0x0
37992 +       .uleb128 0x36
37993 +       .long   0x8a5d
37994 +       .long   .LASF1539
37995 +       .byte   0x54
37996 +       .byte   0xc3
37997 +       .byte   0x1
37998 +       .long   0x160b
37999 +       .byte   0x3
38000 +       .uleb128 0x35
38001 +       .long   .LASF328
38002 +       .byte   0x54
38003 +       .byte   0xc2
38004 +       .long   0x1fe
38005 +       .uleb128 0x35
38006 +       .long   .LASF53
38007 +       .byte   0x54
38008 +       .byte   0xc2
38009 +       .long   0x240
38010 +       .uleb128 0x35
38011 +       .long   .LASF400
38012 +       .byte   0x54
38013 +       .byte   0xc2
38014 +       .long   0x21
38015 +       .uleb128 0x39
38016 +       .uleb128 0x39
38017 +       .uleb128 0x40
38018 +       .long   0x8a07
38019 +       .uleb128 0x39
38020 +       .uleb128 0x3a
38021 +       .long   0x8a0f
38022 +       .byte   0x0
38023 +       .byte   0x0
38024 +       .byte   0x0
38025 +       .byte   0x0
38026 +       .uleb128 0x3b
38027 +       .long   0x8aaa
38028 +       .long   .LASF1540
38029 +       .byte   0x4
38030 +       .value  0x1a3
38031 +       .byte   0x1
38032 +       .long   0x160b
38033 +       .byte   0x3
38034 +       .uleb128 0x3c
38035 +       .string "s"
38036 +       .byte   0x4
38037 +       .value  0x1a2
38038 +       .long   0x160b
38039 +       .uleb128 0x3d
38040 +       .long   .LASF1541
38041 +       .byte   0x4
38042 +       .value  0x1a2
38043 +       .long   0x2f
38044 +       .uleb128 0x3d
38045 +       .long   .LASF322
38046 +       .byte   0x4
38047 +       .value  0x1a2
38048 +       .long   0x1fe
38049 +       .uleb128 0x39
38050 +       .uleb128 0x41
38051 +       .string "d0"
38052 +       .byte   0x4
38053 +       .value  0x1bd
38054 +       .long   0x21
38055 +       .uleb128 0x41
38056 +       .string "d1"
38057 +       .byte   0x4
38058 +       .value  0x1bd
38059 +       .long   0x21
38060 +       .byte   0x0
38061 +       .byte   0x0
38062 +       .uleb128 0x33
38063 +       .long   0x8ac3
38064 +       .long   .LASF1542
38065 +       .byte   0xa
38066 +       .byte   0x7b
38067 +       .byte   0x1
38068 +       .byte   0x3
38069 +       .uleb128 0x35
38070 +       .long   .LASF760
38071 +       .byte   0xa
38072 +       .byte   0x7a
38073 +       .long   0x8791
38074 +       .byte   0x0
38075 +       .uleb128 0x33
38076 +       .long   0x8ae7
38077 +       .long   .LASF1543
38078 +       .byte   0x1
38079 +       .byte   0xfb
38080 +       .byte   0x1
38081 +       .byte   0x3
38082 +       .uleb128 0x35
38083 +       .long   .LASF735
38084 +       .byte   0x1
38085 +       .byte   0xf9
38086 +       .long   0x887d
38087 +       .uleb128 0x35
38088 +       .long   .LASF760
38089 +       .byte   0x1
38090 +       .byte   0xfa
38091 +       .long   0x8791
38092 +       .byte   0x0
38093 +       .uleb128 0x42
38094 +       .long   0x8b1b
38095 +       .long   .LASF1544
38096 +       .byte   0x1
38097 +       .value  0x153
38098 +       .byte   0x1
38099 +       .byte   0x3
38100 +       .uleb128 0x3d
38101 +       .long   .LASF760
38102 +       .byte   0x1
38103 +       .value  0x151
38104 +       .long   0x8791
38105 +       .uleb128 0x3d
38106 +       .long   .LASF1545
38107 +       .byte   0x1
38108 +       .value  0x152
38109 +       .long   0x21
38110 +       .uleb128 0x43
38111 +       .long   .LASF376
38112 +       .byte   0x1
38113 +       .value  0x154
38114 +       .long   0x17e5
38115 +       .uleb128 0x37
38116 +       .byte   0x0
38117 +       .uleb128 0x33
38118 +       .long   0x8b34
38119 +       .long   .LASF1546
38120 +       .byte   0x5
38121 +       .byte   0x6b
38122 +       .byte   0x1
38123 +       .byte   0x3
38124 +       .uleb128 0x35
38125 +       .long   .LASF285
38126 +       .byte   0x5
38127 +       .byte   0x6a
38128 +       .long   0x8b34
38129 +       .byte   0x0
38130 +       .uleb128 0x4
38131 +       .byte   0x4
38132 +       .long   0x163c
38133 +       .uleb128 0x33
38134 +       .long   0x8b49
38135 +       .long   .LASF1547
38136 +       .byte   0x6
38137 +       .byte   0x47
38138 +       .byte   0x1
38139 +       .byte   0x3
38140 +       .uleb128 0x37
38141 +       .byte   0x0
38142 +       .uleb128 0x44
38143 +       .long   .LASF1549
38144 +       .byte   0x10
38145 +       .byte   0x5c
38146 +       .byte   0x1
38147 +       .long   0x2dd9
38148 +       .byte   0x3
38149 +       .uleb128 0x3b
38150 +       .long   0x8b75
38151 +       .long   .LASF1550
38152 +       .byte   0x2
38153 +       .value  0x12b
38154 +       .byte   0x1
38155 +       .long   0x21
38156 +       .byte   0x3
38157 +       .uleb128 0x3d
38158 +       .long   .LASF600
38159 +       .byte   0x2
38160 +       .value  0x12a
38161 +       .long   0x8b75
38162 +       .byte   0x0
38163 +       .uleb128 0x4
38164 +       .byte   0x4
38165 +       .long   0x8b7b
38166 +       .uleb128 0x14
38167 +       .long   0x17bc
38168 +       .uleb128 0x36
38169 +       .long   0x8ba7
38170 +       .long   .LASF1551
38171 +       .byte   0x7
38172 +       .byte   0x57
38173 +       .byte   0x1
38174 +       .long   0x77
38175 +       .byte   0x3
38176 +       .uleb128 0x34
38177 +       .string "sl"
38178 +       .byte   0x7
38179 +       .byte   0x56
38180 +       .long   0x8ba7
38181 +       .uleb128 0x3f
38182 +       .string "ret"
38183 +       .byte   0x7
38184 +       .byte   0x58
38185 +       .long   0x77
38186 +       .byte   0x0
38187 +       .uleb128 0x4
38188 +       .byte   0x4
38189 +       .long   0x8bad
38190 +       .uleb128 0x14
38191 +       .long   0x170a
38192 +       .uleb128 0x36
38193 +       .long   0x8bd8
38194 +       .long   .LASF1552
38195 +       .byte   0x7
38196 +       .byte   0x66
38197 +       .byte   0x1
38198 +       .long   0x21
38199 +       .byte   0x3
38200 +       .uleb128 0x34
38201 +       .string "sl"
38202 +       .byte   0x7
38203 +       .byte   0x65
38204 +       .long   0x8ba7
38205 +       .uleb128 0x34
38206 +       .string "iv"
38207 +       .byte   0x7
38208 +       .byte   0x65
38209 +       .long   0x77
38210 +       .byte   0x0
38211 +       .uleb128 0x42
38212 +       .long   0x8c11
38213 +       .long   .LASF1553
38214 +       .byte   0x1
38215 +       .value  0x354
38216 +       .byte   0x1
38217 +       .byte   0x3
38218 +       .uleb128 0x3d
38219 +       .long   .LASF1554
38220 +       .byte   0x1
38221 +       .value  0x353
38222 +       .long   0x2f
38223 +       .uleb128 0x43
38224 +       .long   .LASF1555
38225 +       .byte   0x1
38226 +       .value  0x355
38227 +       .long   0x2f
38228 +       .uleb128 0x45
38229 +       .long   .LASF322
38230 +       .byte   0x1
38231 +       .value  0x356
38232 +       .long   0x21
38233 +       .byte   0x5
38234 +       .byte   0x3
38235 +       .long   count.18791
38236 +       .byte   0x0
38237 +       .uleb128 0x46
38238 +       .long   .LASF1556
38239 +       .byte   0x8
38240 +       .value  0x1f0
38241 +       .byte   0x1
38242 +       .byte   0x3
38243 +       .uleb128 0x36
38244 +       .long   0x8c38
38245 +       .long   .LASF1557
38246 +       .byte   0xa
38247 +       .byte   0x3e
38248 +       .byte   0x1
38249 +       .long   0x21
38250 +       .byte   0x3
38251 +       .uleb128 0x35
38252 +       .long   .LASF760
38253 +       .byte   0xa
38254 +       .byte   0x3d
38255 +       .long   0x8c38
38256 +       .byte   0x0
38257 +       .uleb128 0x4
38258 +       .byte   0x4
38259 +       .long   0x8c3e
38260 +       .uleb128 0x14
38261 +       .long   0x3728
38262 +       .uleb128 0x33
38263 +       .long   0x8c5c
38264 +       .long   .LASF1558
38265 +       .byte   0xa
38266 +       .byte   0x7f
38267 +       .byte   0x1
38268 +       .byte   0x3
38269 +       .uleb128 0x35
38270 +       .long   .LASF760
38271 +       .byte   0xa
38272 +       .byte   0x7e
38273 +       .long   0x8791
38274 +       .byte   0x0
38275 +       .uleb128 0x33
38276 +       .long   0x8c81
38277 +       .long   .LASF1559
38278 +       .byte   0x1
38279 +       .byte   0x70
38280 +       .byte   0x1
38281 +       .byte   0x3
38282 +       .uleb128 0x35
38283 +       .long   .LASF760
38284 +       .byte   0x1
38285 +       .byte   0x6f
38286 +       .long   0x8791
38287 +       .uleb128 0x35
38288 +       .long   .LASF1560
38289 +       .byte   0x1
38290 +       .byte   0x6f
38291 +       .long   0x887d
38292 +       .uleb128 0x37
38293 +       .byte   0x0
38294 +       .uleb128 0x36
38295 +       .long   0x8ca0
38296 +       .long   .LASF1561
38297 +       .byte   0x9
38298 +       .byte   0xb
38299 +       .byte   0x1
38300 +       .long   0x15f9
38301 +       .byte   0x3
38302 +       .uleb128 0x39
38303 +       .uleb128 0x38
38304 +       .long   .LASF1562
38305 +       .byte   0x9
38306 +       .byte   0xc
38307 +       .long   0x15f9
38308 +       .byte   0x0
38309 +       .byte   0x0
38310 +       .uleb128 0x33
38311 +       .long   0x8ccf
38312 +       .long   .LASF1563
38313 +       .byte   0xa
38314 +       .byte   0x2d
38315 +       .byte   0x1
38316 +       .byte   0x3
38317 +       .uleb128 0x35
38318 +       .long   .LASF760
38319 +       .byte   0xa
38320 +       .byte   0x2a
38321 +       .long   0x8791
38322 +       .uleb128 0x35
38323 +       .long   .LASF733
38324 +       .byte   0xa
38325 +       .byte   0x2b
38326 +       .long   0x3787
38327 +       .uleb128 0x35
38328 +       .long   .LASF734
38329 +       .byte   0xa
38330 +       .byte   0x2c
38331 +       .long   0x2f
38332 +       .byte   0x0
38333 +       .uleb128 0x47
38334 +       .long   0x8d1f
38335 +       .byte   0x1
38336 +       .long   .LASF1565
38337 +       .byte   0x1
38338 +       .byte   0x8a
38339 +       .byte   0x1
38340 +       .long   0x2f
38341 +       .long   .LFB883
38342 +       .long   .LFE883
38343 +       .long   .LLST0
38344 +       .uleb128 0x48
38345 +       .string "j"
38346 +       .byte   0x1
38347 +       .byte   0x89
38348 +       .long   0x2f
38349 +       .long   .LLST1
38350 +       .uleb128 0x48
38351 +       .string "cpu"
38352 +       .byte   0x1
38353 +       .byte   0x89
38354 +       .long   0x21
38355 +       .long   .LLST2
38356 +       .uleb128 0x3f
38357 +       .string "rem"
38358 +       .byte   0x1
38359 +       .byte   0x8b
38360 +       .long   0x21
38361 +       .uleb128 0x38
38362 +       .long   .LASF1564
38363 +       .byte   0x1
38364 +       .byte   0x8c
38365 +       .long   0x2f
38366 +       .byte   0x0
38367 +       .uleb128 0x49
38368 +       .long   0x8d58
38369 +       .byte   0x1
38370 +       .long   .LASF1566
38371 +       .byte   0x1
38372 +       .byte   0xc3
38373 +       .byte   0x1
38374 +       .long   0x2f
38375 +       .long   .LFB884
38376 +       .long   .LFE884
38377 +       .byte   0x2
38378 +       .byte   0x74
38379 +       .sleb128 4
38380 +       .uleb128 0x48
38381 +       .string "j"
38382 +       .byte   0x1
38383 +       .byte   0xc2
38384 +       .long   0x2f
38385 +       .long   .LLST4
38386 +       .uleb128 0x48
38387 +       .string "cpu"
38388 +       .byte   0x1
38389 +       .byte   0xc2
38390 +       .long   0x21
38391 +       .long   .LLST5
38392 +       .byte   0x0
38393 +       .uleb128 0x49
38394 +       .long   0x8d9b
38395 +       .byte   0x1
38396 +       .long   .LASF1567
38397 +       .byte   0x1
38398 +       .byte   0xde
38399 +       .byte   0x1
38400 +       .long   0x2f
38401 +       .long   .LFB885
38402 +       .long   .LFE885
38403 +       .byte   0x2
38404 +       .byte   0x74
38405 +       .sleb128 4
38406 +       .uleb128 0x48
38407 +       .string "j"
38408 +       .byte   0x1
38409 +       .byte   0xdd
38410 +       .long   0x2f
38411 +       .long   .LLST7
38412 +       .uleb128 0x4a
38413 +       .long   .LBB179
38414 +       .long   .LBE179
38415 +       .uleb128 0x4b
38416 +       .long   .LASF1562
38417 +       .byte   0x1
38418 +       .byte   0xdf
38419 +       .long   0x21
38420 +       .long   .LLST8
38421 +       .byte   0x0
38422 +       .byte   0x0
38423 +       .uleb128 0x49
38424 +       .long   0x8dde
38425 +       .byte   0x1
38426 +       .long   .LASF1568
38427 +       .byte   0x1
38428 +       .byte   0xf3
38429 +       .byte   0x1
38430 +       .long   0x2f
38431 +       .long   .LFB886
38432 +       .long   .LFE886
38433 +       .byte   0x2
38434 +       .byte   0x74
38435 +       .sleb128 4
38436 +       .uleb128 0x48
38437 +       .string "j"
38438 +       .byte   0x1
38439 +       .byte   0xf2
38440 +       .long   0x2f
38441 +       .long   .LLST10
38442 +       .uleb128 0x4a
38443 +       .long   .LBB180
38444 +       .long   .LBE180
38445 +       .uleb128 0x4b
38446 +       .long   .LASF1562
38447 +       .byte   0x1
38448 +       .byte   0xf4
38449 +       .long   0x21
38450 +       .long   .LLST11
38451 +       .byte   0x0
38452 +       .byte   0x0
38453 +       .uleb128 0x4c
38454 +       .long   0x8eda
38455 +       .long   .LASF1569
38456 +       .byte   0x1
38457 +       .value  0x102
38458 +       .byte   0x1
38459 +       .long   .LFB888
38460 +       .long   .LFE888
38461 +       .long   .LLST12
38462 +       .uleb128 0x4d
38463 +       .long   .LASF735
38464 +       .byte   0x1
38465 +       .value  0x101
38466 +       .long   0x887d
38467 +       .long   .LLST13
38468 +       .uleb128 0x4e
38469 +       .long   .LASF760
38470 +       .byte   0x1
38471 +       .value  0x101
38472 +       .long   0x8791
38473 +       .byte   0x1
38474 +       .byte   0x52
38475 +       .uleb128 0x4f
38476 +       .long   .LASF732
38477 +       .byte   0x1
38478 +       .value  0x103
38479 +       .long   0x2f
38480 +       .long   .LLST14
38481 +       .uleb128 0x50
38482 +       .string "idx"
38483 +       .byte   0x1
38484 +       .value  0x104
38485 +       .long   0x2f
38486 +       .long   .LLST15
38487 +       .uleb128 0x50
38488 +       .string "vec"
38489 +       .byte   0x1
38490 +       .value  0x105
38491 +       .long   0x17e5
38492 +       .long   .LLST16
38493 +       .uleb128 0x51
38494 +       .long   0x8e5d
38495 +       .long   .LBB181
38496 +       .long   .LBE181
38497 +       .uleb128 0x41
38498 +       .string "i"
38499 +       .byte   0x1
38500 +       .value  0x10b
38501 +       .long   0x21
38502 +       .byte   0x0
38503 +       .uleb128 0x51
38504 +       .long   0x8e75
38505 +       .long   .LBB182
38506 +       .long   .LBE182
38507 +       .uleb128 0x41
38508 +       .string "i"
38509 +       .byte   0x1
38510 +       .value  0x10e
38511 +       .long   0x21
38512 +       .byte   0x0
38513 +       .uleb128 0x51
38514 +       .long   0x8e8d
38515 +       .long   .LBB183
38516 +       .long   .LBE183
38517 +       .uleb128 0x41
38518 +       .string "i"
38519 +       .byte   0x1
38520 +       .value  0x111
38521 +       .long   0x21
38522 +       .byte   0x0
38523 +       .uleb128 0x51
38524 +       .long   0x8ea5
38525 +       .long   .LBB184
38526 +       .long   .LBE184
38527 +       .uleb128 0x41
38528 +       .string "i"
38529 +       .byte   0x1
38530 +       .value  0x11a
38531 +       .long   0x21
38532 +       .byte   0x0
38533 +       .uleb128 0x52
38534 +       .long   0x8883
38535 +       .long   .Ldebug_ranges0+0x0
38536 +       .byte   0x1
38537 +       .value  0x128
38538 +       .uleb128 0x53
38539 +       .long   0x889b
38540 +       .uleb128 0x53
38541 +       .long   0x8890
38542 +       .uleb128 0x54
38543 +       .long   0x87f4
38544 +       .long   .Ldebug_ranges0+0x18
38545 +       .byte   0x2
38546 +       .byte   0x56
38547 +       .uleb128 0x53
38548 +       .long   0x8817
38549 +       .uleb128 0x55
38550 +       .long   0x880c
38551 +       .byte   0x1
38552 +       .byte   0x50
38553 +       .uleb128 0x53
38554 +       .long   0x8801
38555 +       .byte   0x0
38556 +       .byte   0x0
38557 +       .byte   0x0
38558 +       .uleb128 0x56
38559 +       .long   0x8f30
38560 +       .byte   0x1
38561 +       .long   .LASF1570
38562 +       .byte   0x1
38563 +       .value  0x13f
38564 +       .byte   0x1
38565 +       .long   .LFB889
38566 +       .long   .LFE889
38567 +       .byte   0x2
38568 +       .byte   0x74
38569 +       .sleb128 4
38570 +       .uleb128 0x4e
38571 +       .long   .LASF760
38572 +       .byte   0x1
38573 +       .value  0x13e
38574 +       .long   0x8791
38575 +       .byte   0x1
38576 +       .byte   0x50
38577 +       .uleb128 0x4a
38578 +       .long   .LBB193
38579 +       .long   .LBE193
38580 +       .uleb128 0x43
38581 +       .long   .LASF1571
38582 +       .byte   0x1
38583 +       .value  0x141
38584 +       .long   0x2f
38585 +       .uleb128 0x4a
38586 +       .long   .LBB194
38587 +       .long   .LBE194
38588 +       .uleb128 0x45
38589 +       .long   .LASF1562
38590 +       .byte   0x1
38591 +       .value  0x141
38592 +       .long   0x2f
38593 +       .byte   0x1
38594 +       .byte   0x51
38595 +       .byte   0x0
38596 +       .byte   0x0
38597 +       .byte   0x0
38598 +       .uleb128 0x33
38599 +       .long   0x8f49
38600 +       .long   .LASF1572
38601 +       .byte   0x1
38602 +       .byte   0x69
38603 +       .byte   0x1
38604 +       .byte   0x3
38605 +       .uleb128 0x35
38606 +       .long   .LASF760
38607 +       .byte   0x1
38608 +       .byte   0x68
38609 +       .long   0x8791
38610 +       .byte   0x0
38611 +       .uleb128 0x57
38612 +       .long   0x8f8a
38613 +       .byte   0x1
38614 +       .long   .LASF1573
38615 +       .byte   0x1
38616 +       .value  0x14b
38617 +       .byte   0x1
38618 +       .long   .LFB890
38619 +       .long   .LFE890
38620 +       .long   .LLST18
38621 +       .uleb128 0x4d
38622 +       .long   .LASF760
38623 +       .byte   0x1
38624 +       .value  0x14a
38625 +       .long   0x8791
38626 +       .long   .LLST19
38627 +       .uleb128 0x58
38628 +       .long   0x8f30
38629 +       .long   .LBB197
38630 +       .long   .LBE197
38631 +       .byte   0x1
38632 +       .value  0x14d
38633 +       .uleb128 0x53
38634 +       .long   0x8f3d
38635 +       .byte   0x0
38636 +       .byte   0x0
38637 +       .uleb128 0x59
38638 +       .long   0x909a
38639 +       .long   .LASF1574
38640 +       .byte   0x1
38641 +       .value  0x245
38642 +       .byte   0x1
38643 +       .long   0x21
38644 +       .long   .LFB899
38645 +       .long   .LFE899
38646 +       .long   .LLST20
38647 +       .uleb128 0x4d
38648 +       .long   .LASF735
38649 +       .byte   0x1
38650 +       .value  0x244
38651 +       .long   0x887d
38652 +       .long   .LLST21
38653 +       .uleb128 0x5a
38654 +       .string "tv"
38655 +       .byte   0x1
38656 +       .value  0x244
38657 +       .long   0x909a
38658 +       .long   .LLST22
38659 +       .uleb128 0x4d
38660 +       .long   .LASF746
38661 +       .byte   0x1
38662 +       .value  0x244
38663 +       .long   0x21
38664 +       .long   .LLST23
38665 +       .uleb128 0x4f
38666 +       .long   .LASF760
38667 +       .byte   0x1
38668 +       .value  0x247
38669 +       .long   0x8791
38670 +       .long   .LLST24
38671 +       .uleb128 0x50
38672 +       .string "tmp"
38673 +       .byte   0x1
38674 +       .value  0x247
38675 +       .long   0x8791
38676 +       .long   .LLST25
38677 +       .uleb128 0x45
38678 +       .long   .LASF1575
38679 +       .byte   0x1
38680 +       .value  0x248
38681 +       .long   0x17bc
38682 +       .byte   0x2
38683 +       .byte   0x91
38684 +       .sleb128 -24
38685 +       .uleb128 0x5b
38686 +       .long   0x904f
38687 +       .long   0x88cc
38688 +       .long   .Ldebug_ranges0+0x30
38689 +       .byte   0x1
38690 +       .value  0x24a
38691 +       .uleb128 0x53
38692 +       .long   0x88e4
38693 +       .uleb128 0x53
38694 +       .long   0x88d9
38695 +       .uleb128 0x5c
38696 +       .long   0x9039
38697 +       .long   0x8823
38698 +       .long   .Ldebug_ranges0+0x48
38699 +       .byte   0x2
38700 +       .byte   0xe3
38701 +       .uleb128 0x53
38702 +       .long   0x883b
38703 +       .uleb128 0x53
38704 +       .long   0x8830
38705 +       .byte   0x0
38706 +       .uleb128 0x54
38707 +       .long   0x8847
38708 +       .long   .Ldebug_ranges0+0x60
38709 +       .byte   0x2
38710 +       .byte   0xe4
38711 +       .uleb128 0x5d
38712 +       .long   0x8854
38713 +       .long   .LLST26
38714 +       .byte   0x0
38715 +       .byte   0x0
38716 +       .uleb128 0x51
38717 +       .long   0x9069
38718 +       .long   .LBB207
38719 +       .long   .LBE207
38720 +       .uleb128 0x43
38721 +       .long   .LASF1576
38722 +       .byte   0x1
38723 +       .value  0x250
38724 +       .long   0x8b75
38725 +       .byte   0x0
38726 +       .uleb128 0x51
38727 +       .long   0x9083
38728 +       .long   .LBB212
38729 +       .long   .LBE212
38730 +       .uleb128 0x43
38731 +       .long   .LASF1576
38732 +       .byte   0x1
38733 +       .value  0x250
38734 +       .long   0x8b75
38735 +       .byte   0x0
38736 +       .uleb128 0x4a
38737 +       .long   .LBB213
38738 +       .long   .LBE213
38739 +       .uleb128 0x43
38740 +       .long   .LASF1576
38741 +       .byte   0x1
38742 +       .value  0x250
38743 +       .long   0x8b75
38744 +       .byte   0x0
38745 +       .byte   0x0
38746 +       .uleb128 0x4
38747 +       .byte   0x4
38748 +       .long   0x874f
38749 +       .uleb128 0x3b
38750 +       .long   0x916f
38751 +       .long   .LASF1577
38752 +       .byte   0x1
38753 +       .value  0x4d1
38754 +       .byte   0x1
38755 +       .long   0x21
38756 +       .byte   0x1
38757 +       .uleb128 0x3c
38758 +       .string "cpu"
38759 +       .byte   0x1
38760 +       .value  0x4d0
38761 +       .long   0x21
38762 +       .uleb128 0x41
38763 +       .string "j"
38764 +       .byte   0x1
38765 +       .value  0x4d2
38766 +       .long   0x21
38767 +       .uleb128 0x43
38768 +       .long   .LASF735
38769 +       .byte   0x1
38770 +       .value  0x4d3
38771 +       .long   0x887d
38772 +       .uleb128 0x5e
38773 +       .long   0x9123
38774 +       .uleb128 0x5e
38775 +       .long   0x90eb
38776 +       .uleb128 0x43
38777 +       .long   .LASF1578
38778 +       .byte   0x1
38779 +       .value  0x4e4
38780 +       .long   0x21
38781 +       .byte   0x0
38782 +       .uleb128 0x5e
38783 +       .long   0x90fd
38784 +       .uleb128 0x43
38785 +       .long   .LASF1571
38786 +       .byte   0x1
38787 +       .value  0x4e9
38788 +       .long   0x2f
38789 +       .byte   0x0
38790 +       .uleb128 0x5e
38791 +       .long   0x9113
38792 +       .uleb128 0x39
38793 +       .uleb128 0x39
38794 +       .uleb128 0x40
38795 +       .long   0x8a07
38796 +       .uleb128 0x39
38797 +       .uleb128 0x3a
38798 +       .long   0x8a0f
38799 +       .byte   0x0
38800 +       .byte   0x0
38801 +       .byte   0x0
38802 +       .byte   0x0
38803 +       .uleb128 0x37
38804 +       .uleb128 0x39
38805 +       .uleb128 0x39
38806 +       .uleb128 0x3a
38807 +       .long   0x8a92
38808 +       .uleb128 0x3a
38809 +       .long   0x8a9d
38810 +       .byte   0x0
38811 +       .byte   0x0
38812 +       .byte   0x0
38813 +       .uleb128 0x5e
38814 +       .long   0x9135
38815 +       .uleb128 0x43
38816 +       .long   .LASF1571
38817 +       .byte   0x1
38818 +       .value  0x4f6
38819 +       .long   0x2f
38820 +       .byte   0x0
38821 +       .uleb128 0x37
38822 +       .uleb128 0x37
38823 +       .uleb128 0x37
38824 +       .uleb128 0x37
38825 +       .uleb128 0x37
38826 +       .uleb128 0x45
38827 +       .long   .LASF1579
38828 +       .byte   0x1
38829 +       .value  0x4d7
38830 +       .long   0xbb
38831 +       .byte   0x5
38832 +       .byte   0x3
38833 +       .long   boot_done.19029
38834 +       .uleb128 0x5f
38835 +       .long   .LASF1580
38836 +       .long   0xa316
38837 +       .byte   0x1
38838 +       .byte   0x5
38839 +       .byte   0x3
38840 +       .long   __func__.19031
38841 +       .uleb128 0x45
38842 +       .long   .LASF1581
38843 +       .byte   0x1
38844 +       .value  0x4d4
38845 +       .long   0x46d1
38846 +       .byte   0x5
38847 +       .byte   0x3
38848 +       .long   tvec_base_done.19028
38849 +       .byte   0x0
38850 +       .uleb128 0x59
38851 +       .long   0x933c
38852 +       .long   .LASF1582
38853 +       .byte   0x1
38854 +       .value  0x538
38855 +       .byte   0x1
38856 +       .long   0x21
38857 +       .long   .LFB923
38858 +       .long   .LFE923
38859 +       .long   .LLST27
38860 +       .uleb128 0x4d
38861 +       .long   .LASF1583
38862 +       .byte   0x1
38863 +       .value  0x536
38864 +       .long   0x2e30
38865 +       .long   .LLST28
38866 +       .uleb128 0x4d
38867 +       .long   .LASF840
38868 +       .byte   0x1
38869 +       .value  0x537
38870 +       .long   0x2f
38871 +       .long   .LLST29
38872 +       .uleb128 0x4d
38873 +       .long   .LASF1584
38874 +       .byte   0x1
38875 +       .value  0x537
38876 +       .long   0x160b
38877 +       .long   .LLST30
38878 +       .uleb128 0x41
38879 +       .string "cpu"
38880 +       .byte   0x1
38881 +       .value  0x539
38882 +       .long   0x5a
38883 +       .uleb128 0x58
38884 +       .long   0x90a0
38885 +       .long   .LBB240
38886 +       .long   .LBE240
38887 +       .byte   0x1
38888 +       .value  0x53d
38889 +       .uleb128 0x53
38890 +       .long   0x90b2
38891 +       .uleb128 0x60
38892 +       .long   0x9286
38893 +       .long   .Ldebug_ranges0+0x78
38894 +       .uleb128 0x61
38895 +       .long   0x90be
38896 +       .long   .LLST31
38897 +       .uleb128 0x61
38898 +       .long   0x90c8
38899 +       .long   .LLST32
38900 +       .uleb128 0x62
38901 +       .long   0x9236
38902 +       .long   0x8a1a
38903 +       .long   .LBB243
38904 +       .long   .LBE243
38905 +       .byte   0x1
38906 +       .value  0x4dd
38907 +       .uleb128 0x53
38908 +       .long   0x8a41
38909 +       .uleb128 0x53
38910 +       .long   0x8a36
38911 +       .uleb128 0x53
38912 +       .long   0x8a2b
38913 +       .uleb128 0x63
38914 +       .long   0x89e0
38915 +       .long   .LBB245
38916 +       .long   .LBE245
38917 +       .byte   0x54
38918 +       .byte   0xc4
38919 +       .uleb128 0x53
38920 +       .long   0x89fc
38921 +       .uleb128 0x53
38922 +       .long   0x89f1
38923 +       .byte   0x0
38924 +       .byte   0x0
38925 +       .uleb128 0x51
38926 +       .long   0x9249
38927 +       .long   .LBB246
38928 +       .long   .LBE246
38929 +       .uleb128 0x3a
38930 +       .long   0x90de
38931 +       .byte   0x0
38932 +       .uleb128 0x58
38933 +       .long   0x8a5d
38934 +       .long   .LBB247
38935 +       .long   .LBE247
38936 +       .byte   0x1
38937 +       .value  0x4e8
38938 +       .uleb128 0x53
38939 +       .long   0x8a85
38940 +       .uleb128 0x53
38941 +       .long   0x8a79
38942 +       .uleb128 0x53
38943 +       .long   0x8a6f
38944 +       .uleb128 0x4a
38945 +       .long   .LBB249
38946 +       .long   .LBE249
38947 +       .uleb128 0x61
38948 +       .long   0x8a92
38949 +       .long   .LLST33
38950 +       .uleb128 0x61
38951 +       .long   0x8a9d
38952 +       .long   .LLST34
38953 +       .byte   0x0
38954 +       .byte   0x0
38955 +       .byte   0x0
38956 +       .uleb128 0x51
38957 +       .long   0x9299
38958 +       .long   .LBB252
38959 +       .long   .LBE252
38960 +       .uleb128 0x3a
38961 +       .long   0x90f0
38962 +       .byte   0x0
38963 +       .uleb128 0x51
38964 +       .long   0x92ac
38965 +       .long   .LBB256
38966 +       .long   .LBE256
38967 +       .uleb128 0x3a
38968 +       .long   0x9128
38969 +       .byte   0x0
38970 +       .uleb128 0x5b
38971 +       .long   0x92c6
38972 +       .long   0x8847
38973 +       .long   .Ldebug_ranges0+0xa8
38974 +       .byte   0x1
38975 +       .value  0x4fd
38976 +       .uleb128 0x5d
38977 +       .long   0x8854
38978 +       .long   .LLST35
38979 +       .byte   0x0
38980 +       .uleb128 0x62
38981 +       .long   0x92e4
38982 +       .long   0x8847
38983 +       .long   .LBB262
38984 +       .long   .LBE262
38985 +       .byte   0x1
38986 +       .value  0x4fe
38987 +       .uleb128 0x5d
38988 +       .long   0x8854
38989 +       .long   .LLST36
38990 +       .byte   0x0
38991 +       .uleb128 0x62
38992 +       .long   0x9302
38993 +       .long   0x8847
38994 +       .long   .LBB264
38995 +       .long   .LBE264
38996 +       .byte   0x1
38997 +       .value  0x4ff
38998 +       .uleb128 0x5d
38999 +       .long   0x8854
39000 +       .long   .LLST37
39001 +       .byte   0x0
39002 +       .uleb128 0x62
39003 +       .long   0x9320
39004 +       .long   0x8847
39005 +       .long   .LBB266
39006 +       .long   .LBE266
39007 +       .byte   0x1
39008 +       .value  0x500
39009 +       .uleb128 0x5d
39010 +       .long   0x8854
39011 +       .long   .LLST38
39012 +       .byte   0x0
39013 +       .uleb128 0x58
39014 +       .long   0x8847
39015 +       .long   .LBB268
39016 +       .long   .LBE268
39017 +       .byte   0x1
39018 +       .value  0x503
39019 +       .uleb128 0x5d
39020 +       .long   0x8854
39021 +       .long   .LLST39
39022 +       .byte   0x0
39023 +       .byte   0x0
39024 +       .byte   0x0
39025 +       .uleb128 0x32
39026 +       .long   .LASF1585
39027 +       .byte   0xa
39028 +       .byte   0x77
39029 +       .byte   0x1
39030 +       .byte   0x3
39031 +       .uleb128 0x56
39032 +       .long   0x9389
39033 +       .byte   0x1
39034 +       .long   .LASF1586
39035 +       .byte   0x1
39036 +       .value  0x552
39037 +       .byte   0x1
39038 +       .long   .LFB924
39039 +       .long   .LFE924
39040 +       .byte   0x2
39041 +       .byte   0x74
39042 +       .sleb128 4
39043 +       .uleb128 0x50
39044 +       .string "err"
39045 +       .byte   0x1
39046 +       .value  0x553
39047 +       .long   0x21
39048 +       .long   .LLST41
39049 +       .uleb128 0x4a
39050 +       .long   .LBB273
39051 +       .long   .LBE273
39052 +       .uleb128 0x4f
39053 +       .long   .LASF1562
39054 +       .byte   0x1
39055 +       .value  0x554
39056 +       .long   0x21
39057 +       .long   .LLST42
39058 +       .byte   0x0
39059 +       .byte   0x0
39060 +       .uleb128 0x42
39061 +       .long   0x947c
39062 +       .long   .LASF1587
39063 +       .byte   0x1
39064 +       .value  0x262
39065 +       .byte   0x1
39066 +       .byte   0x3
39067 +       .uleb128 0x3d
39068 +       .long   .LASF735
39069 +       .byte   0x1
39070 +       .value  0x261
39071 +       .long   0x887d
39072 +       .uleb128 0x43
39073 +       .long   .LASF760
39074 +       .byte   0x1
39075 +       .value  0x263
39076 +       .long   0x8791
39077 +       .uleb128 0x5e
39078 +       .long   0x943a
39079 +       .uleb128 0x43
39080 +       .long   .LASF1588
39081 +       .byte   0x1
39082 +       .value  0x267
39083 +       .long   0x17bc
39084 +       .uleb128 0x43
39085 +       .long   .LASF600
39086 +       .byte   0x1
39087 +       .value  0x268
39088 +       .long   0x17e5
39089 +       .uleb128 0x43
39090 +       .long   .LASF746
39091 +       .byte   0x1
39092 +       .value  0x269
39093 +       .long   0x21
39094 +       .uleb128 0x5e
39095 +       .long   0x9430
39096 +       .uleb128 0x41
39097 +       .string "fn"
39098 +       .byte   0x1
39099 +       .value  0x276
39100 +       .long   0x3787
39101 +       .uleb128 0x43
39102 +       .long   .LASF734
39103 +       .byte   0x1
39104 +       .value  0x277
39105 +       .long   0x2f
39106 +       .uleb128 0x5e
39107 +       .long   0x9406
39108 +       .uleb128 0x43
39109 +       .long   .LASF1576
39110 +       .byte   0x1
39111 +       .value  0x279
39112 +       .long   0x8b75
39113 +       .byte   0x0
39114 +       .uleb128 0x5e
39115 +       .long   0x941b
39116 +       .uleb128 0x43
39117 +       .long   .LASF163
39118 +       .byte   0x1
39119 +       .value  0x283
39120 +       .long   0x21
39121 +       .uleb128 0x37
39122 +       .uleb128 0x37
39123 +       .uleb128 0x37
39124 +       .byte   0x0
39125 +       .uleb128 0x37
39126 +       .uleb128 0x37
39127 +       .uleb128 0x5e
39128 +       .long   0x942b
39129 +       .uleb128 0x39
39130 +       .uleb128 0x3a
39131 +       .long   0x8b0d
39132 +       .uleb128 0x37
39133 +       .byte   0x0
39134 +       .byte   0x0
39135 +       .uleb128 0x37
39136 +       .uleb128 0x39
39137 +       .uleb128 0x37
39138 +       .byte   0x0
39139 +       .byte   0x0
39140 +       .uleb128 0x5e
39141 +       .long   0x9438
39142 +       .uleb128 0x37
39143 +       .uleb128 0x37
39144 +       .byte   0x0
39145 +       .uleb128 0x37
39146 +       .byte   0x0
39147 +       .uleb128 0x5e
39148 +       .long   0x9458
39149 +       .uleb128 0x43
39150 +       .long   .LASF1589
39151 +       .byte   0x1
39152 +       .value  0x266
39153 +       .long   0x2f
39154 +       .uleb128 0x43
39155 +       .long   .LASF1590
39156 +       .byte   0x1
39157 +       .value  0x266
39158 +       .long   0x8941
39159 +       .byte   0x0
39160 +       .uleb128 0x5e
39161 +       .long   0x9476
39162 +       .uleb128 0x43
39163 +       .long   .LASF1589
39164 +       .byte   0x1
39165 +       .value  0x266
39166 +       .long   0x2f
39167 +       .uleb128 0x43
39168 +       .long   .LASF1590
39169 +       .byte   0x1
39170 +       .value  0x266
39171 +       .long   0x2f
39172 +       .byte   0x0
39173 +       .uleb128 0x37
39174 +       .uleb128 0x37
39175 +       .uleb128 0x39
39176 +       .uleb128 0x37
39177 +       .byte   0x0
39178 +       .byte   0x0
39179 +       .uleb128 0x4c
39180 +       .long   0x96cb
39181 +       .long   .LASF1591
39182 +       .byte   0x1
39183 +       .value  0x368
39184 +       .byte   0x1
39185 +       .long   .LFB904
39186 +       .long   .LFE904
39187 +       .long   .LLST43
39188 +       .uleb128 0x5a
39189 +       .string "h"
39190 +       .byte   0x1
39191 +       .value  0x367
39192 +       .long   0x5ce1
39193 +       .long   .LLST44
39194 +       .uleb128 0x4f
39195 +       .long   .LASF735
39196 +       .byte   0x1
39197 +       .value  0x369
39198 +       .long   0x887d
39199 +       .long   .LLST45
39200 +       .uleb128 0x60
39201 +       .long   0x94c9
39202 +       .long   .Ldebug_ranges0+0xc0
39203 +       .uleb128 0x43
39204 +       .long   .LASF1571
39205 +       .byte   0x1
39206 +       .value  0x369
39207 +       .long   0x2f
39208 +       .byte   0x0
39209 +       .uleb128 0x51
39210 +       .long   0x94e7
39211 +       .long   .LBB324
39212 +       .long   .LBE324
39213 +       .uleb128 0x4f
39214 +       .long   .LASF1562
39215 +       .byte   0x1
39216 +       .value  0x369
39217 +       .long   0x2f
39218 +       .long   .LLST46
39219 +       .byte   0x0
39220 +       .uleb128 0x58
39221 +       .long   0x9389
39222 +       .long   .LBB325
39223 +       .long   .LBE325
39224 +       .byte   0x1
39225 +       .value  0x36e
39226 +       .uleb128 0x53
39227 +       .long   0x9397
39228 +       .uleb128 0x60
39229 +       .long   0x9527
39230 +       .long   .Ldebug_ranges0+0xd8
39231 +       .uleb128 0x3a
39232 +       .long   0x93a3
39233 +       .uleb128 0x64
39234 +       .long   .Ldebug_ranges0+0x100
39235 +       .uleb128 0x65
39236 +       .long   0x93b4
39237 +       .byte   0x2
39238 +       .byte   0x91
39239 +       .sleb128 -24
39240 +       .uleb128 0x3a
39241 +       .long   0x93c0
39242 +       .uleb128 0x61
39243 +       .long   0x93cc
39244 +       .long   .LLST47
39245 +       .byte   0x0
39246 +       .byte   0x0
39247 +       .uleb128 0x5b
39248 +       .long   0x9558
39249 +       .long   0x88cc
39250 +       .long   .Ldebug_ranges0+0x128
39251 +       .byte   0x1
39252 +       .value  0x274
39253 +       .uleb128 0x53
39254 +       .long   0x88e4
39255 +       .uleb128 0x53
39256 +       .long   0x88d9
39257 +       .uleb128 0x54
39258 +       .long   0x8823
39259 +       .long   .Ldebug_ranges0+0x140
39260 +       .byte   0x2
39261 +       .byte   0xe3
39262 +       .uleb128 0x53
39263 +       .long   0x883b
39264 +       .uleb128 0x53
39265 +       .long   0x8830
39266 +       .byte   0x0
39267 +       .byte   0x0
39268 +       .uleb128 0x66
39269 +       .long   0x9575
39270 +       .long   0x8847
39271 +       .long   .LBB342
39272 +       .long   .LBE342
39273 +       .byte   0x2
39274 +       .byte   0xe4
39275 +       .uleb128 0x5d
39276 +       .long   0x8854
39277 +       .long   .LLST48
39278 +       .byte   0x0
39279 +       .uleb128 0x51
39280 +       .long   0x9656
39281 +       .long   .LBB344
39282 +       .long   .LBE344
39283 +       .uleb128 0x61
39284 +       .long   0x93dd
39285 +       .long   .LLST49
39286 +       .uleb128 0x61
39287 +       .long   0x93e8
39288 +       .long   .LLST50
39289 +       .uleb128 0x62
39290 +       .long   0x95b3
39291 +       .long   0x8ac3
39292 +       .long   .LBB345
39293 +       .long   .LBE345
39294 +       .byte   0x1
39295 +       .value  0x27f
39296 +       .uleb128 0x53
39297 +       .long   0x8adb
39298 +       .uleb128 0x53
39299 +       .long   0x8ad0
39300 +       .byte   0x0
39301 +       .uleb128 0x62
39302 +       .long   0x9604
39303 +       .long   0x8ae7
39304 +       .long   .LBB347
39305 +       .long   .LBE347
39306 +       .byte   0x1
39307 +       .value  0x280
39308 +       .uleb128 0x53
39309 +       .long   0x8b01
39310 +       .uleb128 0x53
39311 +       .long   0x8af5
39312 +       .uleb128 0x4a
39313 +       .long   .LBB348
39314 +       .long   .LBE348
39315 +       .uleb128 0x3a
39316 +       .long   0x8b0d
39317 +       .uleb128 0x58
39318 +       .long   0x88a8
39319 +       .long   .LBB349
39320 +       .long   .LBE349
39321 +       .byte   0x1
39322 +       .value  0x156
39323 +       .uleb128 0x5d
39324 +       .long   0x88c0
39325 +       .long   .LLST51
39326 +       .uleb128 0x5d
39327 +       .long   0x88b5
39328 +       .long   .LLST52
39329 +       .byte   0x0
39330 +       .byte   0x0
39331 +       .byte   0x0
39332 +       .uleb128 0x62
39333 +       .long   0x961e
39334 +       .long   0x8b1b
39335 +       .long   .LBB351
39336 +       .long   .LBE351
39337 +       .byte   0x1
39338 +       .value  0x281
39339 +       .uleb128 0x53
39340 +       .long   0x8b28
39341 +       .byte   0x0
39342 +       .uleb128 0x62
39343 +       .long   0x9642
39344 +       .long   0x8b3a
39345 +       .long   .LBB353
39346 +       .long   .LBE353
39347 +       .byte   0x1
39348 +       .value  0x281
39349 +       .uleb128 0x67
39350 +       .long   0x87eb
39351 +       .long   .LBB355
39352 +       .long   .LBE355
39353 +       .byte   0x6
39354 +       .byte   0x48
39355 +       .byte   0x0
39356 +       .uleb128 0x4a
39357 +       .long   .LBB357
39358 +       .long   .LBE357
39359 +       .uleb128 0x61
39360 +       .long   0x940b
39361 +       .long   .LLST53
39362 +       .byte   0x0
39363 +       .byte   0x0
39364 +       .uleb128 0x62
39365 +       .long   0x9670
39366 +       .long   0x8b56
39367 +       .long   .LBB358
39368 +       .long   .LBE358
39369 +       .byte   0x1
39370 +       .value  0x275
39371 +       .uleb128 0x53
39372 +       .long   0x8b68
39373 +       .byte   0x0
39374 +       .uleb128 0x62
39375 +       .long   0x968f
39376 +       .long   0x8ac3
39377 +       .long   .LBB360
39378 +       .long   .LBE360
39379 +       .byte   0x1
39380 +       .value  0x291
39381 +       .uleb128 0x53
39382 +       .long   0x8adb
39383 +       .uleb128 0x53
39384 +       .long   0x8ad0
39385 +       .byte   0x0
39386 +       .uleb128 0x62
39387 +       .long   0x96a9
39388 +       .long   0x8b1b
39389 +       .long   .LBB362
39390 +       .long   .LBE362
39391 +       .byte   0x1
39392 +       .value  0x292
39393 +       .uleb128 0x53
39394 +       .long   0x8b28
39395 +       .byte   0x0
39396 +       .uleb128 0x58
39397 +       .long   0x8b3a
39398 +       .long   .LBB364
39399 +       .long   .LBE364
39400 +       .byte   0x1
39401 +       .value  0x292
39402 +       .uleb128 0x67
39403 +       .long   0x87eb
39404 +       .long   .LBB366
39405 +       .long   .LBE366
39406 +       .byte   0x6
39407 +       .byte   0x48
39408 +       .byte   0x0
39409 +       .byte   0x0
39410 +       .byte   0x0
39411 +       .uleb128 0x68
39412 +       .long   0x97e4
39413 +       .byte   0x1
39414 +       .long   .LASF1592
39415 +       .byte   0x1
39416 +       .value  0x46c
39417 +       .byte   0x1
39418 +       .long   0x21
39419 +       .long   .LFB920
39420 +       .long   .LFE920
39421 +       .long   .LLST54
39422 +       .uleb128 0x4d
39423 +       .long   .LASF82
39424 +       .byte   0x1
39425 +       .value  0x46b
39426 +       .long   0x97e4
39427 +       .long   .LLST55
39428 +       .uleb128 0x4f
39429 +       .long   .LASF1593
39430 +       .byte   0x1
39431 +       .value  0x46d
39432 +       .long   0x2f
39433 +       .long   .LLST56
39434 +       .uleb128 0x43
39435 +       .long   .LASF1594
39436 +       .byte   0x1
39437 +       .value  0x46d
39438 +       .long   0x2f
39439 +       .uleb128 0x4f
39440 +       .long   .LASF120
39441 +       .byte   0x1
39442 +       .value  0x46e
39443 +       .long   0x77
39444 +       .long   .LLST57
39445 +       .uleb128 0x45
39446 +       .long   .LASF1595
39447 +       .byte   0x1
39448 +       .value  0x46e
39449 +       .long   0x77
39450 +       .byte   0x1
39451 +       .byte   0x51
39452 +       .uleb128 0x41
39453 +       .string "seq"
39454 +       .byte   0x1
39455 +       .value  0x46f
39456 +       .long   0x2f
39457 +       .uleb128 0x69
39458 +       .string "out"
39459 +       .byte   0x1
39460 +       .value  0x4b9
39461 +       .long   .L91
39462 +       .uleb128 0x5b
39463 +       .long   0x9783
39464 +       .long   0x8a5d
39465 +       .long   .Ldebug_ranges0+0x158
39466 +       .byte   0x1
39467 +       .value  0x471
39468 +       .uleb128 0x53
39469 +       .long   0x8a85
39470 +       .uleb128 0x53
39471 +       .long   0x8a79
39472 +       .uleb128 0x53
39473 +       .long   0x8a6f
39474 +       .uleb128 0x64
39475 +       .long   .Ldebug_ranges0+0x170
39476 +       .uleb128 0x61
39477 +       .long   0x8a92
39478 +       .long   .LLST58
39479 +       .uleb128 0x61
39480 +       .long   0x8a9d
39481 +       .long   .LLST59
39482 +       .byte   0x0
39483 +       .byte   0x0
39484 +       .uleb128 0x51
39485 +       .long   0x97c8
39486 +       .long   .LBB374
39487 +       .long   .LBE374
39488 +       .uleb128 0x6a
39489 +       .string "tp"
39490 +       .byte   0x1
39491 +       .value  0x474
39492 +       .long   0x173b
39493 +       .byte   0x2
39494 +       .byte   0x91
39495 +       .sleb128 -20
39496 +       .uleb128 0x58
39497 +       .long   0x8b80
39498 +       .long   .LBB375
39499 +       .long   .LBE375
39500 +       .byte   0x1
39501 +       .value  0x475
39502 +       .uleb128 0x53
39503 +       .long   0x8b91
39504 +       .uleb128 0x4a
39505 +       .long   .LBB376
39506 +       .long   .LBE376
39507 +       .uleb128 0x61
39508 +       .long   0x8b9b
39509 +       .long   .LLST60
39510 +       .byte   0x0
39511 +       .byte   0x0
39512 +       .byte   0x0
39513 +       .uleb128 0x58
39514 +       .long   0x8bb2
39515 +       .long   .LBB377
39516 +       .long   .LBE377
39517 +       .byte   0x1
39518 +       .value  0x48c
39519 +       .uleb128 0x53
39520 +       .long   0x8bcd
39521 +       .uleb128 0x53
39522 +       .long   0x8bc3
39523 +       .byte   0x0
39524 +       .byte   0x0
39525 +       .uleb128 0x4
39526 +       .byte   0x4
39527 +       .long   0x7fd
39528 +       .uleb128 0x68
39529 +       .long   0x9828
39530 +       .byte   0x1
39531 +       .long   .LASF1597
39532 +       .byte   0x1
39533 +       .value  0x4be
39534 +       .byte   0x1
39535 +       .long   0x5a
39536 +       .long   .LFB921
39537 +       .long   .LFE921
39538 +       .long   .LLST61
39539 +       .uleb128 0x4e
39540 +       .long   .LASF82
39541 +       .byte   0x1
39542 +       .value  0x4bd
39543 +       .long   0x97e4
39544 +       .byte   0x2
39545 +       .byte   0x91
39546 +       .sleb128 0
39547 +       .uleb128 0x6a
39548 +       .string "val"
39549 +       .byte   0x1
39550 +       .value  0x4bf
39551 +       .long   0x7fd
39552 +       .byte   0x3
39553 +       .byte   0x91
39554 +       .sleb128 -72
39555 +       .byte   0x0
39556 +       .uleb128 0x6b
39557 +       .long   0x9851
39558 +       .long   .LASF1598
39559 +       .byte   0x1
39560 +       .value  0x401
39561 +       .byte   0x1
39562 +       .long   .LFB915
39563 +       .long   .LFE915
39564 +       .byte   0x2
39565 +       .byte   0x74
39566 +       .sleb128 4
39567 +       .uleb128 0x4d
39568 +       .long   .LASF1599
39569 +       .byte   0x1
39570 +       .value  0x400
39571 +       .long   0x2f
39572 +       .long   .LLST63
39573 +       .byte   0x0
39574 +       .uleb128 0x6c
39575 +       .long   0x987e
39576 +       .byte   0x1
39577 +       .long   .LASF1600
39578 +       .byte   0x1
39579 +       .value  0x397
39580 +       .byte   0x1
39581 +       .long   0x2f
39582 +       .long   .LFB908
39583 +       .long   .LFE908
39584 +       .byte   0x2
39585 +       .byte   0x74
39586 +       .sleb128 4
39587 +       .uleb128 0x4e
39588 +       .long   .LASF1601
39589 +       .byte   0x1
39590 +       .value  0x396
39591 +       .long   0x77
39592 +       .byte   0x2
39593 +       .byte   0x91
39594 +       .sleb128 0
39595 +       .byte   0x0
39596 +       .uleb128 0x42
39597 +       .long   0x98a2
39598 +       .long   .LASF1602
39599 +       .byte   0x1
39600 +       .value  0x37f
39601 +       .byte   0x1
39602 +       .byte   0x3
39603 +       .uleb128 0x3d
39604 +       .long   .LASF1554
39605 +       .byte   0x1
39606 +       .value  0x37e
39607 +       .long   0x2f
39608 +       .uleb128 0x39
39609 +       .uleb128 0x39
39610 +       .uleb128 0x3a
39611 +       .long   0x8bf2
39612 +       .byte   0x0
39613 +       .byte   0x0
39614 +       .byte   0x0
39615 +       .uleb128 0x6d
39616 +       .long   .LASF1603
39617 +       .byte   0x1
39618 +       .value  0x33f
39619 +       .byte   0x1
39620 +       .long   0x2f
39621 +       .byte   0x1
39622 +       .uleb128 0x57
39623 +       .long   0x9926
39624 +       .byte   0x1
39625 +       .long   .LASF1604
39626 +       .byte   0x1
39627 +       .value  0x38b
39628 +       .byte   0x1
39629 +       .long   .LFB907
39630 +       .long   .LFE907
39631 +       .long   .LLST65
39632 +       .uleb128 0x4d
39633 +       .long   .LASF1554
39634 +       .byte   0x1
39635 +       .value  0x38a
39636 +       .long   0x2f
39637 +       .long   .LLST66
39638 +       .uleb128 0x58
39639 +       .long   0x987e
39640 +       .long   .LBB385
39641 +       .long   .LBE385
39642 +       .byte   0x1
39643 +       .value  0x38d
39644 +       .uleb128 0x53
39645 +       .long   0x988c
39646 +       .uleb128 0x58
39647 +       .long   0x8bd8
39648 +       .long   .LBB387
39649 +       .long   .LBE387
39650 +       .byte   0x1
39651 +       .value  0x381
39652 +       .uleb128 0x53
39653 +       .long   0x8be6
39654 +       .uleb128 0x4a
39655 +       .long   .LBB388
39656 +       .long   .LBE388
39657 +       .uleb128 0x61
39658 +       .long   0x8bf2
39659 +       .long   .LLST67
39660 +       .uleb128 0x6e
39661 +       .long   0x98a2
39662 +       .long   .Ldebug_ranges0+0x188
39663 +       .byte   0x1
39664 +       .value  0x35a
39665 +       .byte   0x0
39666 +       .byte   0x0
39667 +       .byte   0x0
39668 +       .byte   0x0
39669 +       .uleb128 0x6f
39670 +       .byte   0x1
39671 +       .long   .LASF1721
39672 +       .byte   0x1
39673 +       .value  0x375
39674 +       .byte   0x1
39675 +       .long   .LFB905
39676 +       .long   .LFE905
39677 +       .byte   0x2
39678 +       .byte   0x74
39679 +       .sleb128 4
39680 +       .uleb128 0x59
39681 +       .long   0x99b3
39682 +       .long   .LASF1605
39683 +       .byte   0x1
39684 +       .value  0x16b
39685 +       .byte   0x1
39686 +       .long   0x887d
39687 +       .long   .LFB892
39688 +       .long   .LFE892
39689 +       .long   .LLST69
39690 +       .uleb128 0x4d
39691 +       .long   .LASF760
39692 +       .byte   0x1
39693 +       .value  0x168
39694 +       .long   0x8791
39695 +       .long   .LLST70
39696 +       .uleb128 0x4d
39697 +       .long   .LASF53
39698 +       .byte   0x1
39699 +       .value  0x169
39700 +       .long   0xdd4
39701 +       .long   .LLST71
39702 +       .uleb128 0x4f
39703 +       .long   .LASF735
39704 +       .byte   0x1
39705 +       .value  0x16c
39706 +       .long   0x887d
39707 +       .long   .LLST72
39708 +       .uleb128 0x4a
39709 +       .long   .LBB392
39710 +       .long   .LBE392
39711 +       .uleb128 0x4f
39712 +       .long   .LASF1606
39713 +       .byte   0x1
39714 +       .value  0x16f
39715 +       .long   0x887d
39716 +       .long   .LLST73
39717 +       .uleb128 0x70
39718 +       .long   0x8c11
39719 +       .long   .LBB393
39720 +       .long   .LBE393
39721 +       .byte   0x1
39722 +       .value  0x178
39723 +       .byte   0x0
39724 +       .byte   0x0
39725 +       .uleb128 0x68
39726 +       .long   0x9a6a
39727 +       .byte   0x1
39728 +       .long   .LASF1607
39729 +       .byte   0x1
39730 +       .value  0x20f
39731 +       .byte   0x1
39732 +       .long   0x21
39733 +       .long   .LFB897
39734 +       .long   .LFE897
39735 +       .long   .LLST74
39736 +       .uleb128 0x4d
39737 +       .long   .LASF760
39738 +       .byte   0x1
39739 +       .value  0x20e
39740 +       .long   0x8791
39741 +       .long   .LLST75
39742 +       .uleb128 0x4f
39743 +       .long   .LASF735
39744 +       .byte   0x1
39745 +       .value  0x210
39746 +       .long   0x887d
39747 +       .long   .LLST76
39748 +       .uleb128 0x45
39749 +       .long   .LASF53
39750 +       .byte   0x1
39751 +       .value  0x211
39752 +       .long   0x2f
39753 +       .byte   0x2
39754 +       .byte   0x91
39755 +       .sleb128 -16
39756 +       .uleb128 0x50
39757 +       .string "ret"
39758 +       .byte   0x1
39759 +       .value  0x212
39760 +       .long   0x21
39761 +       .long   .LLST77
39762 +       .uleb128 0x69
39763 +       .string "out"
39764 +       .byte   0x1
39765 +       .value  0x21e
39766 +       .long   .L133
39767 +       .uleb128 0x58
39768 +       .long   0x8ae7
39769 +       .long   .LBB395
39770 +       .long   .LBE395
39771 +       .byte   0x1
39772 +       .value  0x21b
39773 +       .uleb128 0x53
39774 +       .long   0x8b01
39775 +       .uleb128 0x53
39776 +       .long   0x8af5
39777 +       .uleb128 0x4a
39778 +       .long   .LBB396
39779 +       .long   .LBE396
39780 +       .uleb128 0x3a
39781 +       .long   0x8b0d
39782 +       .uleb128 0x58
39783 +       .long   0x88a8
39784 +       .long   .LBB397
39785 +       .long   .LBE397
39786 +       .byte   0x1
39787 +       .value  0x156
39788 +       .uleb128 0x5d
39789 +       .long   0x88c0
39790 +       .long   .LLST78
39791 +       .uleb128 0x5d
39792 +       .long   0x88b5
39793 +       .long   .LLST79
39794 +       .byte   0x0
39795 +       .byte   0x0
39796 +       .byte   0x0
39797 +       .byte   0x0
39798 +       .uleb128 0x68
39799 +       .long   0x9ac3
39800 +       .byte   0x1
39801 +       .long   .LASF1608
39802 +       .byte   0x1
39803 +       .value  0x238
39804 +       .byte   0x1
39805 +       .long   0x21
39806 +       .long   .LFB898
39807 +       .long   .LFE898
39808 +       .long   .LLST80
39809 +       .uleb128 0x4d
39810 +       .long   .LASF760
39811 +       .byte   0x1
39812 +       .value  0x237
39813 +       .long   0x8791
39814 +       .long   .LLST81
39815 +       .uleb128 0x4a
39816 +       .long   .LBB399
39817 +       .long   .LBE399
39818 +       .uleb128 0x50
39819 +       .string "ret"
39820 +       .byte   0x1
39821 +       .value  0x23a
39822 +       .long   0x21
39823 +       .long   .LLST82
39824 +       .uleb128 0x70
39825 +       .long   0x8c11
39826 +       .long   .LBB400
39827 +       .long   .LBE400
39828 +       .byte   0x1
39829 +       .value  0x23d
39830 +       .byte   0x0
39831 +       .byte   0x0
39832 +       .uleb128 0x68
39833 +       .long   0x9c15
39834 +       .byte   0x1
39835 +       .long   .LASF1609
39836 +       .byte   0x1
39837 +       .value  0x17d
39838 +       .byte   0x1
39839 +       .long   0x21
39840 +       .long   .LFB893
39841 +       .long   .LFE893
39842 +       .long   .LLST83
39843 +       .uleb128 0x4d
39844 +       .long   .LASF760
39845 +       .byte   0x1
39846 +       .value  0x17c
39847 +       .long   0x8791
39848 +       .long   .LLST84
39849 +       .uleb128 0x4d
39850 +       .long   .LASF732
39851 +       .byte   0x1
39852 +       .value  0x17c
39853 +       .long   0x2f
39854 +       .long   .LLST85
39855 +       .uleb128 0x4f
39856 +       .long   .LASF735
39857 +       .byte   0x1
39858 +       .value  0x17e
39859 +       .long   0x887d
39860 +       .long   .LLST86
39861 +       .uleb128 0x4f
39862 +       .long   .LASF1560
39863 +       .byte   0x1
39864 +       .value  0x17e
39865 +       .long   0x887d
39866 +       .long   .LLST87
39867 +       .uleb128 0x45
39868 +       .long   .LASF53
39869 +       .byte   0x1
39870 +       .value  0x17f
39871 +       .long   0x2f
39872 +       .byte   0x2
39873 +       .byte   0x91
39874 +       .sleb128 -24
39875 +       .uleb128 0x6a
39876 +       .string "ret"
39877 +       .byte   0x1
39878 +       .value  0x180
39879 +       .long   0x21
39880 +       .byte   0x2
39881 +       .byte   0x91
39882 +       .sleb128 -28
39883 +       .uleb128 0x62
39884 +       .long   0x9b90
39885 +       .long   0x8ae7
39886 +       .long   .LBB402
39887 +       .long   .LBE402
39888 +       .byte   0x1
39889 +       .value  0x188
39890 +       .uleb128 0x53
39891 +       .long   0x8b01
39892 +       .uleb128 0x53
39893 +       .long   0x8af5
39894 +       .uleb128 0x4a
39895 +       .long   .LBB403
39896 +       .long   .LBE403
39897 +       .uleb128 0x3a
39898 +       .long   0x8b0d
39899 +       .uleb128 0x58
39900 +       .long   0x88a8
39901 +       .long   .LBB404
39902 +       .long   .LBE404
39903 +       .byte   0x1
39904 +       .value  0x156
39905 +       .uleb128 0x5d
39906 +       .long   0x88c0
39907 +       .long   .LLST88
39908 +       .uleb128 0x5d
39909 +       .long   0x88b5
39910 +       .long   .LLST89
39911 +       .byte   0x0
39912 +       .byte   0x0
39913 +       .byte   0x0
39914 +       .uleb128 0x51
39915 +       .long   0x9bc4
39916 +       .long   .LBB406
39917 +       .long   .LBE406
39918 +       .uleb128 0x43
39919 +       .long   .LASF1571
39920 +       .byte   0x1
39921 +       .value  0x18c
39922 +       .long   0x2f
39923 +       .uleb128 0x4a
39924 +       .long   .LBB407
39925 +       .long   .LBE407
39926 +       .uleb128 0x4f
39927 +       .long   .LASF1562
39928 +       .byte   0x1
39929 +       .value  0x18c
39930 +       .long   0x2f
39931 +       .long   .LLST90
39932 +       .byte   0x0
39933 +       .byte   0x0
39934 +       .uleb128 0x62
39935 +       .long   0x9be3
39936 +       .long   0x8c5c
39937 +       .long   .LBB408
39938 +       .long   .LBE408
39939 +       .byte   0x1
39940 +       .value  0x198
39941 +       .uleb128 0x53
39942 +       .long   0x8c74
39943 +       .uleb128 0x53
39944 +       .long   0x8c69
39945 +       .byte   0x0
39946 +       .uleb128 0x62
39947 +       .long   0x9bfd
39948 +       .long   0x8b1b
39949 +       .long   .LBB410
39950 +       .long   .LBE410
39951 +       .byte   0x1
39952 +       .value  0x199
39953 +       .uleb128 0x53
39954 +       .long   0x8b28
39955 +       .byte   0x0
39956 +       .uleb128 0x52
39957 +       .long   0x8c5c
39958 +       .long   .Ldebug_ranges0+0x1a0
39959 +       .byte   0x1
39960 +       .value  0x19c
39961 +       .uleb128 0x53
39962 +       .long   0x8c74
39963 +       .uleb128 0x53
39964 +       .long   0x8c69
39965 +       .byte   0x0
39966 +       .byte   0x0
39967 +       .uleb128 0x68
39968 +       .long   0x9cdb
39969 +       .byte   0x1
39970 +       .long   .LASF1610
39971 +       .byte   0x1
39972 +       .value  0x420
39973 +       .byte   0x1
39974 +       .long   0x5a
39975 +       .long   .LFB916
39976 +       .long   .LFE916
39977 +       .long   .LLST91
39978 +       .uleb128 0x4d
39979 +       .long   .LASF1611
39980 +       .byte   0x1
39981 +       .value  0x41f
39982 +       .long   0x5a
39983 +       .long   .LLST92
39984 +       .uleb128 0x45
39985 +       .long   .LASF760
39986 +       .byte   0x1
39987 +       .value  0x421
39988 +       .long   0x3728
39989 +       .byte   0x2
39990 +       .byte   0x91
39991 +       .sleb128 -36
39992 +       .uleb128 0x4f
39993 +       .long   .LASF1612
39994 +       .byte   0x1
39995 +       .value  0x422
39996 +       .long   0x2f
39997 +       .long   .LLST93
39998 +       .uleb128 0x69
39999 +       .string "out"
40000 +       .byte   0x1
40001 +       .value  0x44a
40002 +       .long   .L158
40003 +       .uleb128 0x62
40004 +       .long   0x9c96
40005 +       .long   0x8c81
40006 +       .long   .LBB416
40007 +       .long   .LBE416
40008 +       .byte   0x1
40009 +       .value  0x43c
40010 +       .uleb128 0x4a
40011 +       .long   .LBB418
40012 +       .long   .LBE418
40013 +       .uleb128 0x61
40014 +       .long   0x8c93
40015 +       .long   .LLST94
40016 +       .byte   0x0
40017 +       .byte   0x0
40018 +       .uleb128 0x5b
40019 +       .long   0x9cb6
40020 +       .long   0x8ca0
40021 +       .long   .Ldebug_ranges0+0x1b8
40022 +       .byte   0x1
40023 +       .value  0x443
40024 +       .uleb128 0x53
40025 +       .long   0x8cc3
40026 +       .uleb128 0x53
40027 +       .long   0x8cb8
40028 +       .uleb128 0x53
40029 +       .long   0x8cad
40030 +       .byte   0x0
40031 +       .uleb128 0x58
40032 +       .long   0x8c81
40033 +       .long   .LBB421
40034 +       .long   .LBE421
40035 +       .byte   0x1
40036 +       .value  0x443
40037 +       .uleb128 0x4a
40038 +       .long   .LBB423
40039 +       .long   .LBE423
40040 +       .uleb128 0x61
40041 +       .long   0x8c93
40042 +       .long   .LLST95
40043 +       .byte   0x0
40044 +       .byte   0x0
40045 +       .byte   0x0
40046 +       .uleb128 0x6c
40047 +       .long   0x9d2d
40048 +       .byte   0x1
40049 +       .long   .LASF1613
40050 +       .byte   0x1
40051 +       .value  0x45b
40052 +       .byte   0x1
40053 +       .long   0x5a
40054 +       .long   .LFB918
40055 +       .long   .LFE918
40056 +       .byte   0x2
40057 +       .byte   0x74
40058 +       .sleb128 4
40059 +       .uleb128 0x4d
40060 +       .long   .LASF1611
40061 +       .byte   0x1
40062 +       .value  0x45a
40063 +       .long   0x5a
40064 +       .long   .LLST97
40065 +       .uleb128 0x58
40066 +       .long   0x8c81
40067 +       .long   .LBB426
40068 +       .long   .LBE426
40069 +       .byte   0x1
40070 +       .value  0x45c
40071 +       .uleb128 0x4a
40072 +       .long   .LBB428
40073 +       .long   .LBE428
40074 +       .uleb128 0x61
40075 +       .long   0x8c93
40076 +       .long   .LLST98
40077 +       .byte   0x0
40078 +       .byte   0x0
40079 +       .byte   0x0
40080 +       .uleb128 0x56
40081 +       .long   0x9d67
40082 +       .byte   0x1
40083 +       .long   .LASF1614
40084 +       .byte   0x1
40085 +       .value  0x61e
40086 +       .byte   0x1
40087 +       .long   .LFB925
40088 +       .long   .LFE925
40089 +       .byte   0x2
40090 +       .byte   0x74
40091 +       .sleb128 4
40092 +       .uleb128 0x4d
40093 +       .long   .LASF1615
40094 +       .byte   0x1
40095 +       .value  0x61d
40096 +       .long   0x77
40097 +       .long   .LLST100
40098 +       .uleb128 0x4f
40099 +       .long   .LASF1611
40100 +       .byte   0x1
40101 +       .value  0x61f
40102 +       .long   0x2f
40103 +       .long   .LLST101
40104 +       .byte   0x0
40105 +       .uleb128 0x6c
40106 +       .long   0x9db9
40107 +       .byte   0x1
40108 +       .long   .LASF1616
40109 +       .byte   0x1
40110 +       .value  0x454
40111 +       .byte   0x1
40112 +       .long   0x5a
40113 +       .long   .LFB917
40114 +       .long   .LFE917
40115 +       .byte   0x2
40116 +       .byte   0x74
40117 +       .sleb128 4
40118 +       .uleb128 0x4d
40119 +       .long   .LASF1611
40120 +       .byte   0x1
40121 +       .value  0x453
40122 +       .long   0x5a
40123 +       .long   .LLST103
40124 +       .uleb128 0x58
40125 +       .long   0x8c81
40126 +       .long   .LBB429
40127 +       .long   .LBE429
40128 +       .byte   0x1
40129 +       .value  0x455
40130 +       .uleb128 0x4a
40131 +       .long   .LBB431
40132 +       .long   .LBE431
40133 +       .uleb128 0x61
40134 +       .long   0x8c93
40135 +       .long   .LLST104
40136 +       .byte   0x0
40137 +       .byte   0x0
40138 +       .byte   0x0
40139 +       .uleb128 0x3b
40140 +       .long   0x9de4
40141 +       .long   .LASF1617
40142 +       .byte   0xb
40143 +       .value  0x62f
40144 +       .byte   0x1
40145 +       .long   0x21
40146 +       .byte   0x3
40147 +       .uleb128 0x3c
40148 +       .string "p"
40149 +       .byte   0xb
40150 +       .value  0x62e
40151 +       .long   0x15f9
40152 +       .uleb128 0x39
40153 +       .uleb128 0x39
40154 +       .uleb128 0x37
40155 +       .uleb128 0x39
40156 +       .uleb128 0x39
40157 +       .uleb128 0x3a
40158 +       .long   0x896c
40159 +       .byte   0x0
40160 +       .byte   0x0
40161 +       .byte   0x0
40162 +       .byte   0x0
40163 +       .byte   0x0
40164 +       .uleb128 0x6c
40165 +       .long   0x9eb0
40166 +       .byte   0x1
40167 +       .long   .LASF1618
40168 +       .byte   0x1
40169 +       .value  0x62c
40170 +       .byte   0x1
40171 +       .long   0x2f
40172 +       .long   .LFB926
40173 +       .long   .LFE926
40174 +       .byte   0x2
40175 +       .byte   0x74
40176 +       .sleb128 4
40177 +       .uleb128 0x4d
40178 +       .long   .LASF1615
40179 +       .byte   0x1
40180 +       .value  0x62b
40181 +       .long   0x77
40182 +       .long   .LLST106
40183 +       .uleb128 0x4f
40184 +       .long   .LASF1611
40185 +       .byte   0x1
40186 +       .value  0x62d
40187 +       .long   0x2f
40188 +       .long   .LLST107
40189 +       .uleb128 0x62
40190 +       .long   0x9e45
40191 +       .long   0x8c81
40192 +       .long   .LBB445
40193 +       .long   .LBE445
40194 +       .byte   0x1
40195 +       .value  0x62f
40196 +       .uleb128 0x4a
40197 +       .long   .LBB447
40198 +       .long   .LBE447
40199 +       .uleb128 0x3a
40200 +       .long   0x8c93
40201 +       .byte   0x0
40202 +       .byte   0x0
40203 +       .uleb128 0x58
40204 +       .long   0x9db9
40205 +       .long   .LBB448
40206 +       .long   .LBE448
40207 +       .byte   0x1
40208 +       .value  0x62f
40209 +       .uleb128 0x53
40210 +       .long   0x9dcb
40211 +       .uleb128 0x58
40212 +       .long   0x89a9
40213 +       .long   .LBB450
40214 +       .long   .LBE450
40215 +       .byte   0xb
40216 +       .value  0x630
40217 +       .uleb128 0x53
40218 +       .long   0x89c7
40219 +       .uleb128 0x5d
40220 +       .long   0x89bb
40221 +       .long   .LLST108
40222 +       .uleb128 0x58
40223 +       .long   0x8978
40224 +       .long   .LBB452
40225 +       .long   .LBE452
40226 +       .byte   0xb
40227 +       .value  0x621
40228 +       .uleb128 0x53
40229 +       .long   0x8993
40230 +       .uleb128 0x53
40231 +       .long   0x8989
40232 +       .uleb128 0x63
40233 +       .long   0x890f
40234 +       .long   .LBB454
40235 +       .long   .LBE454
40236 +       .byte   0xf
40237 +       .byte   0x41
40238 +       .uleb128 0x53
40239 +       .long   0x892a
40240 +       .uleb128 0x53
40241 +       .long   0x8920
40242 +       .byte   0x0
40243 +       .byte   0x0
40244 +       .byte   0x0
40245 +       .byte   0x0
40246 +       .byte   0x0
40247 +       .uleb128 0x57
40248 +       .long   0x9f33
40249 +       .byte   0x1
40250 +       .long   .LASF1619
40251 +       .byte   0x1
40252 +       .value  0x32b
40253 +       .byte   0x1
40254 +       .long   .LFB901
40255 +       .long   .LFE901
40256 +       .long   .LLST109
40257 +       .uleb128 0x4d
40258 +       .long   .LASF1620
40259 +       .byte   0x1
40260 +       .value  0x32a
40261 +       .long   0x21
40262 +       .long   .LLST110
40263 +       .uleb128 0x50
40264 +       .string "p"
40265 +       .byte   0x1
40266 +       .value  0x32c
40267 +       .long   0x15f9
40268 +       .long   .LLST111
40269 +       .uleb128 0x41
40270 +       .string "cpu"
40271 +       .byte   0x1
40272 +       .value  0x32d
40273 +       .long   0x21
40274 +       .uleb128 0x51
40275 +       .long   0x9f12
40276 +       .long   .LBB460
40277 +       .long   .LBE460
40278 +       .uleb128 0x4f
40279 +       .long   .LASF1562
40280 +       .byte   0x1
40281 +       .value  0x32d
40282 +       .long   0x21
40283 +       .long   .LLST112
40284 +       .byte   0x0
40285 +       .uleb128 0x58
40286 +       .long   0x8c81
40287 +       .long   .LBB461
40288 +       .long   .LBE461
40289 +       .byte   0x1
40290 +       .value  0x32c
40291 +       .uleb128 0x4a
40292 +       .long   .LBB463
40293 +       .long   .LBE463
40294 +       .uleb128 0x3a
40295 +       .long   0x8c93
40296 +       .byte   0x0
40297 +       .byte   0x0
40298 +       .byte   0x0
40299 +       .uleb128 0x68
40300 +       .long   0x9fd6
40301 +       .byte   0x1
40302 +       .long   .LASF1621
40303 +       .byte   0x1
40304 +       .value  0x3bd
40305 +       .byte   0x1
40306 +       .long   0x5a
40307 +       .long   .LFB909
40308 +       .long   .LFE909
40309 +       .long   .LLST113
40310 +       .uleb128 0x51
40311 +       .long   0x9fb1
40312 +       .long   .LBB474
40313 +       .long   .LBE474
40314 +       .uleb128 0x45
40315 +       .long   .LASF367
40316 +       .byte   0x1
40317 +       .value  0x3c0
40318 +       .long   0x86d0
40319 +       .byte   0x2
40320 +       .byte   0x91
40321 +       .sleb128 -48
40322 +       .uleb128 0x45
40323 +       .long   .LASF1622
40324 +       .byte   0x1
40325 +       .value  0x3c1
40326 +       .long   0x87a2
40327 +       .byte   0x2
40328 +       .byte   0x91
40329 +       .sleb128 -24
40330 +       .uleb128 0x50
40331 +       .string "eip"
40332 +       .byte   0x1
40333 +       .value  0x3c2
40334 +       .long   0x2f
40335 +       .long   .LLST114
40336 +       .uleb128 0x58
40337 +       .long   0x8c81
40338 +       .long   .LBB475
40339 +       .long   .LBE475
40340 +       .byte   0x1
40341 +       .value  0x3c5
40342 +       .uleb128 0x4a
40343 +       .long   .LBB477
40344 +       .long   .LBE477
40345 +       .uleb128 0x61
40346 +       .long   0x8c93
40347 +       .long   .LLST115
40348 +       .byte   0x0
40349 +       .byte   0x0
40350 +       .byte   0x0
40351 +       .uleb128 0x58
40352 +       .long   0x8c81
40353 +       .long   .LBB478
40354 +       .long   .LBE478
40355 +       .byte   0x1
40356 +       .value  0x3d0
40357 +       .uleb128 0x4a
40358 +       .long   .LBB480
40359 +       .long   .LBE480
40360 +       .uleb128 0x61
40361 +       .long   0x8c93
40362 +       .long   .LLST116
40363 +       .byte   0x0
40364 +       .byte   0x0
40365 +       .byte   0x0
40366 +       .uleb128 0x6c
40367 +       .long   0xa03a
40368 +       .byte   0x1
40369 +       .long   .LASF1623
40370 +       .byte   0x1
40371 +       .value  0x3da
40372 +       .byte   0x1
40373 +       .long   0x5a
40374 +       .long   .LFB910
40375 +       .long   .LFE910
40376 +       .byte   0x2
40377 +       .byte   0x74
40378 +       .sleb128 4
40379 +       .uleb128 0x41
40380 +       .string "pid"
40381 +       .byte   0x1
40382 +       .value  0x3db
40383 +       .long   0x21
40384 +       .uleb128 0x4a
40385 +       .long   .LBB485
40386 +       .long   .LBE485
40387 +       .uleb128 0x43
40388 +       .long   .LASF1624
40389 +       .byte   0x1
40390 +       .value  0x3de
40391 +       .long   0x15f9
40392 +       .uleb128 0x58
40393 +       .long   0x8c81
40394 +       .long   .LBB486
40395 +       .long   .LBE486
40396 +       .byte   0x1
40397 +       .value  0x3de
40398 +       .uleb128 0x4a
40399 +       .long   .LBB488
40400 +       .long   .LBE488
40401 +       .uleb128 0x61
40402 +       .long   0x8c93
40403 +       .long   .LLST118
40404 +       .byte   0x0
40405 +       .byte   0x0
40406 +       .byte   0x0
40407 +       .byte   0x0
40408 +       .uleb128 0x6c
40409 +       .long   0xa07c
40410 +       .byte   0x1
40411 +       .long   .LASF1625
40412 +       .byte   0x1
40413 +       .value  0x3e7
40414 +       .byte   0x1
40415 +       .long   0x5a
40416 +       .long   .LFB911
40417 +       .long   .LFE911
40418 +       .byte   0x2
40419 +       .byte   0x74
40420 +       .sleb128 4
40421 +       .uleb128 0x58
40422 +       .long   0x8c81
40423 +       .long   .LBB492
40424 +       .long   .LBE492
40425 +       .byte   0x1
40426 +       .value  0x3e9
40427 +       .uleb128 0x4a
40428 +       .long   .LBB494
40429 +       .long   .LBE494
40430 +       .uleb128 0x61
40431 +       .long   0x8c93
40432 +       .long   .LLST120
40433 +       .byte   0x0
40434 +       .byte   0x0
40435 +       .byte   0x0
40436 +       .uleb128 0x6c
40437 +       .long   0xa0be
40438 +       .byte   0x1
40439 +       .long   .LASF1626
40440 +       .byte   0x1
40441 +       .value  0x3ed
40442 +       .byte   0x1
40443 +       .long   0x5a
40444 +       .long   .LFB912
40445 +       .long   .LFE912
40446 +       .byte   0x2
40447 +       .byte   0x74
40448 +       .sleb128 4
40449 +       .uleb128 0x58
40450 +       .long   0x8c81
40451 +       .long   .LBB498
40452 +       .long   .LBE498
40453 +       .byte   0x1
40454 +       .value  0x3ef
40455 +       .uleb128 0x4a
40456 +       .long   .LBB500
40457 +       .long   .LBE500
40458 +       .uleb128 0x61
40459 +       .long   0x8c93
40460 +       .long   .LLST122
40461 +       .byte   0x0
40462 +       .byte   0x0
40463 +       .byte   0x0
40464 +       .uleb128 0x6c
40465 +       .long   0xa100
40466 +       .byte   0x1
40467 +       .long   .LASF1627
40468 +       .byte   0x1
40469 +       .value  0x3f3
40470 +       .byte   0x1
40471 +       .long   0x5a
40472 +       .long   .LFB913
40473 +       .long   .LFE913
40474 +       .byte   0x2
40475 +       .byte   0x74
40476 +       .sleb128 4
40477 +       .uleb128 0x58
40478 +       .long   0x8c81
40479 +       .long   .LBB504
40480 +       .long   .LBE504
40481 +       .byte   0x1
40482 +       .value  0x3f5
40483 +       .uleb128 0x4a
40484 +       .long   .LBB506
40485 +       .long   .LBE506
40486 +       .uleb128 0x61
40487 +       .long   0x8c93
40488 +       .long   .LLST124
40489 +       .byte   0x0
40490 +       .byte   0x0
40491 +       .byte   0x0
40492 +       .uleb128 0x6c
40493 +       .long   0xa142
40494 +       .byte   0x1
40495 +       .long   .LASF1628
40496 +       .byte   0x1
40497 +       .value  0x3f9
40498 +       .byte   0x1
40499 +       .long   0x5a
40500 +       .long   .LFB914
40501 +       .long   .LFE914
40502 +       .byte   0x2
40503 +       .byte   0x74
40504 +       .sleb128 4
40505 +       .uleb128 0x58
40506 +       .long   0x8c81
40507 +       .long   .LBB510
40508 +       .long   .LBE510
40509 +       .byte   0x1
40510 +       .value  0x3fb
40511 +       .uleb128 0x4a
40512 +       .long   .LBB512
40513 +       .long   .LBE512
40514 +       .uleb128 0x61
40515 +       .long   0x8c93
40516 +       .long   .LLST126
40517 +       .byte   0x0
40518 +       .byte   0x0
40519 +       .byte   0x0
40520 +       .uleb128 0x6c
40521 +       .long   0xa184
40522 +       .byte   0x1
40523 +       .long   .LASF1629
40524 +       .byte   0x1
40525 +       .value  0x463
40526 +       .byte   0x1
40527 +       .long   0x5a
40528 +       .long   .LFB919
40529 +       .long   .LFE919
40530 +       .byte   0x2
40531 +       .byte   0x74
40532 +       .sleb128 4
40533 +       .uleb128 0x58
40534 +       .long   0x8c81
40535 +       .long   .LBB516
40536 +       .long   .LBE516
40537 +       .byte   0x1
40538 +       .value  0x464
40539 +       .uleb128 0x4a
40540 +       .long   .LBB518
40541 +       .long   .LBE518
40542 +       .uleb128 0x61
40543 +       .long   0x8c93
40544 +       .long   .LLST128
40545 +       .byte   0x0
40546 +       .byte   0x0
40547 +       .byte   0x0
40548 +       .uleb128 0x6c
40549 +       .long   0xa1c2
40550 +       .byte   0x1
40551 +       .long   .LASF1630
40552 +       .byte   0x1
40553 +       .value  0x1d3
40554 +       .byte   0x1
40555 +       .long   0x21
40556 +       .long   .LFB895
40557 +       .long   .LFE895
40558 +       .byte   0x2
40559 +       .byte   0x74
40560 +       .sleb128 4
40561 +       .uleb128 0x4d
40562 +       .long   .LASF760
40563 +       .byte   0x1
40564 +       .value  0x1d2
40565 +       .long   0x8791
40566 +       .long   .LLST130
40567 +       .uleb128 0x4d
40568 +       .long   .LASF732
40569 +       .byte   0x1
40570 +       .value  0x1d2
40571 +       .long   0x2f
40572 +       .long   .LLST131
40573 +       .byte   0x0
40574 +       .uleb128 0x33
40575 +       .long   0xa1db
40576 +       .long   .LASF1631
40577 +       .byte   0xa
40578 +       .byte   0x83
40579 +       .byte   0x1
40580 +       .byte   0x3
40581 +       .uleb128 0x35
40582 +       .long   .LASF760
40583 +       .byte   0xa
40584 +       .byte   0x82
40585 +       .long   0x8791
40586 +       .byte   0x0
40587 +       .uleb128 0x68
40588 +       .long   0xa286
40589 +       .byte   0x1
40590 +       .long   .LASF1632
40591 +       .byte   0x1
40592 +       .value  0x1f0
40593 +       .byte   0x1
40594 +       .long   0x21
40595 +       .long   .LFB896
40596 +       .long   .LFE896
40597 +       .long   .LLST132
40598 +       .uleb128 0x4d
40599 +       .long   .LASF760
40600 +       .byte   0x1
40601 +       .value  0x1ef
40602 +       .long   0x8791
40603 +       .long   .LLST133
40604 +       .uleb128 0x4f
40605 +       .long   .LASF735
40606 +       .byte   0x1
40607 +       .value  0x1f1
40608 +       .long   0x887d
40609 +       .long   .LLST134
40610 +       .uleb128 0x45
40611 +       .long   .LASF53
40612 +       .byte   0x1
40613 +       .value  0x1f2
40614 +       .long   0x2f
40615 +       .byte   0x2
40616 +       .byte   0x91
40617 +       .sleb128 -16
40618 +       .uleb128 0x50
40619 +       .string "ret"
40620 +       .byte   0x1
40621 +       .value  0x1f3
40622 +       .long   0x21
40623 +       .long   .LLST135
40624 +       .uleb128 0x58
40625 +       .long   0x8ae7
40626 +       .long   .LBB533
40627 +       .long   .LBE533
40628 +       .byte   0x1
40629 +       .value  0x1f9
40630 +       .uleb128 0x53
40631 +       .long   0x8b01
40632 +       .uleb128 0x53
40633 +       .long   0x8af5
40634 +       .uleb128 0x4a
40635 +       .long   .LBB534
40636 +       .long   .LBE534
40637 +       .uleb128 0x3a
40638 +       .long   0x8b0d
40639 +       .uleb128 0x58
40640 +       .long   0x88a8
40641 +       .long   .LBB535
40642 +       .long   .LBE535
40643 +       .byte   0x1
40644 +       .value  0x156
40645 +       .uleb128 0x5d
40646 +       .long   0x88c0
40647 +       .long   .LLST136
40648 +       .uleb128 0x5d
40649 +       .long   0x88b5
40650 +       .long   .LLST137
40651 +       .byte   0x0
40652 +       .byte   0x0
40653 +       .byte   0x0
40654 +       .byte   0x0
40655 +       .uleb128 0x57
40656 +       .long   0xa316
40657 +       .byte   0x1
40658 +       .long   .LASF1633
40659 +       .byte   0x1
40660 +       .value  0x1b1
40661 +       .byte   0x1
40662 +       .long   .LFB894
40663 +       .long   .LFE894
40664 +       .long   .LLST138
40665 +       .uleb128 0x4d
40666 +       .long   .LASF760
40667 +       .byte   0x1
40668 +       .value  0x1b0
40669 +       .long   0x8791
40670 +       .long   .LLST139
40671 +       .uleb128 0x5a
40672 +       .string "cpu"
40673 +       .byte   0x1
40674 +       .value  0x1b0
40675 +       .long   0x21
40676 +       .long   .LLST140
40677 +       .uleb128 0x4f
40678 +       .long   .LASF735
40679 +       .byte   0x1
40680 +       .value  0x1b2
40681 +       .long   0x887d
40682 +       .long   .LLST141
40683 +       .uleb128 0x4f
40684 +       .long   .LASF53
40685 +       .byte   0x1
40686 +       .value  0x1b3
40687 +       .long   0x2f
40688 +       .long   .LLST142
40689 +       .uleb128 0x51
40690 +       .long   0xa2fa
40691 +       .long   .LBB546
40692 +       .long   .LBE546
40693 +       .uleb128 0x43
40694 +       .long   .LASF1571
40695 +       .byte   0x1
40696 +       .value  0x1b2
40697 +       .long   0x2f
40698 +       .byte   0x0
40699 +       .uleb128 0x58
40700 +       .long   0x8c5c
40701 +       .long   .LBB547
40702 +       .long   .LBE547
40703 +       .byte   0x1
40704 +       .value  0x1b8
40705 +       .uleb128 0x53
40706 +       .long   0x8c74
40707 +       .uleb128 0x53
40708 +       .long   0x8c69
40709 +       .byte   0x0
40710 +       .byte   0x0
40711 +       .uleb128 0x14
40712 +       .long   0x967
40713 +       .uleb128 0x12
40714 +       .long   0xa32b
40715 +       .long   0xbb
40716 +       .uleb128 0x13
40717 +       .long   0x28
40718 +       .byte   0xa
40719 +       .byte   0x0
40720 +       .uleb128 0x71
40721 +       .long   .LASF1634
40722 +       .byte   0x1
40723 +       .byte   0x31
40724 +       .long   0xa33c
40725 +       .byte   0x5
40726 +       .byte   0x3
40727 +       .long   __kstrtab_jiffies_64
40728 +       .uleb128 0x14
40729 +       .long   0xa31b
40730 +       .uleb128 0x71
40731 +       .long   .LASF1635
40732 +       .byte   0x1
40733 +       .byte   0x31
40734 +       .long   0x4fe5
40735 +       .byte   0x5
40736 +       .byte   0x3
40737 +       .long   __ksymtab_jiffies_64
40738 +       .uleb128 0x12
40739 +       .long   0xa362
40740 +       .long   0xbb
40741 +       .uleb128 0x13
40742 +       .long   0x28
40743 +       .byte   0xf
40744 +       .byte   0x0
40745 +       .uleb128 0x71
40746 +       .long   .LASF1636
40747 +       .byte   0x1
40748 +       .byte   0x53
40749 +       .long   0xa373
40750 +       .byte   0x5
40751 +       .byte   0x3
40752 +       .long   __kstrtab_boot_tvec_bases
40753 +       .uleb128 0x14
40754 +       .long   0xa352
40755 +       .uleb128 0x71
40756 +       .long   .LASF1637
40757 +       .byte   0x1
40758 +       .byte   0x53
40759 +       .long   0x4fe5
40760 +       .byte   0x5
40761 +       .byte   0x3
40762 +       .long   __ksymtab_boot_tvec_bases
40763 +       .uleb128 0x71
40764 +       .long   .LASF1638
40765 +       .byte   0x1
40766 +       .byte   0x54
40767 +       .long   0x887d
40768 +       .byte   0x5
40769 +       .byte   0x3
40770 +       .long   per_cpu__tvec_bases
40771 +       .uleb128 0x12
40772 +       .long   0xa3aa
40773 +       .long   0xbb
40774 +       .uleb128 0x13
40775 +       .long   0x28
40776 +       .byte   0xf
40777 +       .byte   0x0
40778 +       .uleb128 0x71
40779 +       .long   .LASF1639
40780 +       .byte   0x1
40781 +       .byte   0xac
40782 +       .long   0xa3bb
40783 +       .byte   0x5
40784 +       .byte   0x3
40785 +       .long   __kstrtab___round_jiffies
40786 +       .uleb128 0x14
40787 +       .long   0xa39a
40788 +       .uleb128 0x71
40789 +       .long   .LASF1640
40790 +       .byte   0x1
40791 +       .byte   0xac
40792 +       .long   0x4fe5
40793 +       .byte   0x5
40794 +       .byte   0x3
40795 +       .long   __ksymtab___round_jiffies
40796 +       .uleb128 0x12
40797 +       .long   0xa3e1
40798 +       .long   0xbb
40799 +       .uleb128 0x13
40800 +       .long   0x28
40801 +       .byte   0x18
40802 +       .byte   0x0
40803 +       .uleb128 0x71
40804 +       .long   .LASF1641
40805 +       .byte   0x1
40806 +       .byte   0xcc
40807 +       .long   0xa3f2
40808 +       .byte   0x5
40809 +       .byte   0x3
40810 +       .long   __kstrtab___round_jiffies_relative
40811 +       .uleb128 0x14
40812 +       .long   0xa3d1
40813 +       .uleb128 0x71
40814 +       .long   .LASF1642
40815 +       .byte   0x1
40816 +       .byte   0xcc
40817 +       .long   0x4fe5
40818 +       .byte   0x5
40819 +       .byte   0x3
40820 +       .long   __ksymtab___round_jiffies_relative
40821 +       .uleb128 0x12
40822 +       .long   0xa418
40823 +       .long   0xbb
40824 +       .uleb128 0x13
40825 +       .long   0x28
40826 +       .byte   0xd
40827 +       .byte   0x0
40828 +       .uleb128 0x71
40829 +       .long   .LASF1643
40830 +       .byte   0x1
40831 +       .byte   0xe1
40832 +       .long   0xa429
40833 +       .byte   0x5
40834 +       .byte   0x3
40835 +       .long   __kstrtab_round_jiffies
40836 +       .uleb128 0x14
40837 +       .long   0xa408
40838 +       .uleb128 0x71
40839 +       .long   .LASF1644
40840 +       .byte   0x1
40841 +       .byte   0xe1
40842 +       .long   0x4fe5
40843 +       .byte   0x5
40844 +       .byte   0x3
40845 +       .long   __ksymtab_round_jiffies
40846 +       .uleb128 0x12
40847 +       .long   0xa44f
40848 +       .long   0xbb
40849 +       .uleb128 0x13
40850 +       .long   0x28
40851 +       .byte   0x16
40852 +       .byte   0x0
40853 +       .uleb128 0x71
40854 +       .long   .LASF1645
40855 +       .byte   0x1
40856 +       .byte   0xf6
40857 +       .long   0xa460
40858 +       .byte   0x5
40859 +       .byte   0x3
40860 +       .long   __kstrtab_round_jiffies_relative
40861 +       .uleb128 0x14
40862 +       .long   0xa43f
40863 +       .uleb128 0x71
40864 +       .long   .LASF1646
40865 +       .byte   0x1
40866 +       .byte   0xf6
40867 +       .long   0x4fe5
40868 +       .byte   0x5
40869 +       .byte   0x3
40870 +       .long   __ksymtab_round_jiffies_relative
40871 +       .uleb128 0x12
40872 +       .long   0xa486
40873 +       .long   0xbb
40874 +       .uleb128 0x13
40875 +       .long   0x28
40876 +       .byte   0xa
40877 +       .byte   0x0
40878 +       .uleb128 0x45
40879 +       .long   .LASF1647
40880 +       .byte   0x1
40881 +       .value  0x148
40882 +       .long   0xa498
40883 +       .byte   0x5
40884 +       .byte   0x3
40885 +       .long   __kstrtab_init_timer
40886 +       .uleb128 0x14
40887 +       .long   0xa476
40888 +       .uleb128 0x45
40889 +       .long   .LASF1648
40890 +       .byte   0x1
40891 +       .value  0x148
40892 +       .long   0x4fe5
40893 +       .byte   0x5
40894 +       .byte   0x3
40895 +       .long   __ksymtab_init_timer
40896 +       .uleb128 0x12
40897 +       .long   0xa4bf
40898 +       .long   0xbb
40899 +       .uleb128 0x13
40900 +       .long   0x28
40901 +       .byte   0x15
40902 +       .byte   0x0
40903 +       .uleb128 0x45
40904 +       .long   .LASF1649
40905 +       .byte   0x1
40906 +       .value  0x14f
40907 +       .long   0xa4d1
40908 +       .byte   0x5
40909 +       .byte   0x3
40910 +       .long   __kstrtab_init_timer_deferrable
40911 +       .uleb128 0x14
40912 +       .long   0xa4af
40913 +       .uleb128 0x45
40914 +       .long   .LASF1650
40915 +       .byte   0x1
40916 +       .value  0x14f
40917 +       .long   0x4fe5
40918 +       .byte   0x5
40919 +       .byte   0x3
40920 +       .long   __ksymtab_init_timer_deferrable
40921 +       .uleb128 0x12
40922 +       .long   0xa4f8
40923 +       .long   0xbb
40924 +       .uleb128 0x13
40925 +       .long   0x28
40926 +       .byte   0xb
40927 +       .byte   0x0
40928 +       .uleb128 0x45
40929 +       .long   .LASF1651
40930 +       .byte   0x1
40931 +       .value  0x1a7
40932 +       .long   0xa50a
40933 +       .byte   0x5
40934 +       .byte   0x3
40935 +       .long   __kstrtab___mod_timer
40936 +       .uleb128 0x14
40937 +       .long   0xa4e8
40938 +       .uleb128 0x45
40939 +       .long   .LASF1652
40940 +       .byte   0x1
40941 +       .value  0x1a7
40942 +       .long   0x4fe5
40943 +       .byte   0x5
40944 +       .byte   0x3
40945 +       .long   __ksymtab___mod_timer
40946 +       .uleb128 0x12
40947 +       .long   0xa531
40948 +       .long   0xbb
40949 +       .uleb128 0x13
40950 +       .long   0x28
40951 +       .byte   0x9
40952 +       .byte   0x0
40953 +       .uleb128 0x45
40954 +       .long   .LASF1653
40955 +       .byte   0x1
40956 +       .value  0x1e2
40957 +       .long   0xa543
40958 +       .byte   0x5
40959 +       .byte   0x3
40960 +       .long   __kstrtab_mod_timer
40961 +       .uleb128 0x14
40962 +       .long   0xa521
40963 +       .uleb128 0x45
40964 +       .long   .LASF1654
40965 +       .byte   0x1
40966 +       .value  0x1e2
40967 +       .long   0x4fe5
40968 +       .byte   0x5
40969 +       .byte   0x3
40970 +       .long   __ksymtab_mod_timer
40971 +       .uleb128 0x12
40972 +       .long   0xa56a
40973 +       .long   0xbb
40974 +       .uleb128 0x13
40975 +       .long   0x28
40976 +       .byte   0x9
40977 +       .byte   0x0
40978 +       .uleb128 0x45
40979 +       .long   .LASF1655
40980 +       .byte   0x1
40981 +       .value  0x202
40982 +       .long   0xa57c
40983 +       .byte   0x5
40984 +       .byte   0x3
40985 +       .long   __kstrtab_del_timer
40986 +       .uleb128 0x14
40987 +       .long   0xa55a
40988 +       .uleb128 0x45
40989 +       .long   .LASF1656
40990 +       .byte   0x1
40991 +       .value  0x202
40992 +       .long   0x4fe5
40993 +       .byte   0x5
40994 +       .byte   0x3
40995 +       .long   __ksymtab_del_timer
40996 +       .uleb128 0x12
40997 +       .long   0xa5a3
40998 +       .long   0xbb
40999 +       .uleb128 0x13
41000 +       .long   0x28
41001 +       .byte   0x15
41002 +       .byte   0x0
41003 +       .uleb128 0x45
41004 +       .long   .LASF1657
41005 +       .byte   0x1
41006 +       .value  0x224
41007 +       .long   0xa5b5
41008 +       .byte   0x5
41009 +       .byte   0x3
41010 +       .long   __kstrtab_try_to_del_timer_sync
41011 +       .uleb128 0x14
41012 +       .long   0xa593
41013 +       .uleb128 0x45
41014 +       .long   .LASF1658
41015 +       .byte   0x1
41016 +       .value  0x224
41017 +       .long   0x4fe5
41018 +       .byte   0x5
41019 +       .byte   0x3
41020 +       .long   __ksymtab_try_to_del_timer_sync
41021 +       .uleb128 0x12
41022 +       .long   0xa5dc
41023 +       .long   0xbb
41024 +       .uleb128 0x13
41025 +       .long   0x28
41026 +       .byte   0xe
41027 +       .byte   0x0
41028 +       .uleb128 0x45
41029 +       .long   .LASF1659
41030 +       .byte   0x1
41031 +       .value  0x241
41032 +       .long   0xa5ee
41033 +       .byte   0x5
41034 +       .byte   0x3
41035 +       .long   __kstrtab_del_timer_sync
41036 +       .uleb128 0x14
41037 +       .long   0xa5cc
41038 +       .uleb128 0x45
41039 +       .long   .LASF1660
41040 +       .byte   0x1
41041 +       .value  0x241
41042 +       .long   0x4fe5
41043 +       .byte   0x5
41044 +       .byte   0x3
41045 +       .long   __ksymtab_del_timer_sync
41046 +       .uleb128 0x12
41047 +       .long   0xa615
41048 +       .long   0xbb
41049 +       .uleb128 0x13
41050 +       .long   0x28
41051 +       .byte   0x7
41052 +       .byte   0x0
41053 +       .uleb128 0x45
41054 +       .long   .LASF1661
41055 +       .byte   0x1
41056 +       .value  0x34d
41057 +       .long   0xa627
41058 +       .byte   0x5
41059 +       .byte   0x3
41060 +       .long   __kstrtab_avenrun
41061 +       .uleb128 0x14
41062 +       .long   0xa605
41063 +       .uleb128 0x45
41064 +       .long   .LASF1662
41065 +       .byte   0x1
41066 +       .value  0x34d
41067 +       .long   0x4fe5
41068 +       .byte   0x5
41069 +       .byte   0x3
41070 +       .long   __ksymtab_avenrun
41071 +       .uleb128 0x12
41072 +       .long   0xa64e
41073 +       .long   0xbb
41074 +       .uleb128 0x13
41075 +       .long   0x28
41076 +       .byte   0x10
41077 +       .byte   0x0
41078 +       .uleb128 0x45
41079 +       .long   .LASF1663
41080 +       .byte   0x1
41081 +       .value  0x44d
41082 +       .long   0xa660
41083 +       .byte   0x5
41084 +       .byte   0x3
41085 +       .long   __kstrtab_schedule_timeout
41086 +       .uleb128 0x14
41087 +       .long   0xa63e
41088 +       .uleb128 0x45
41089 +       .long   .LASF1664
41090 +       .byte   0x1
41091 +       .value  0x44d
41092 +       .long   0x4fe5
41093 +       .byte   0x5
41094 +       .byte   0x3
41095 +       .long   __ksymtab_schedule_timeout
41096 +       .uleb128 0x12
41097 +       .long   0xa687
41098 +       .long   0xbb
41099 +       .uleb128 0x13
41100 +       .long   0x28
41101 +       .byte   0x1e
41102 +       .byte   0x0
41103 +       .uleb128 0x45
41104 +       .long   .LASF1665
41105 +       .byte   0x1
41106 +       .value  0x458
41107 +       .long   0xa699
41108 +       .byte   0x5
41109 +       .byte   0x3
41110 +       .long   __kstrtab_schedule_timeout_interruptible
41111 +       .uleb128 0x14
41112 +       .long   0xa677
41113 +       .uleb128 0x45
41114 +       .long   .LASF1666
41115 +       .byte   0x1
41116 +       .value  0x458
41117 +       .long   0x4fe5
41118 +       .byte   0x5
41119 +       .byte   0x3
41120 +       .long   __ksymtab_schedule_timeout_interruptible
41121 +       .uleb128 0x12
41122 +       .long   0xa6c0
41123 +       .long   0xbb
41124 +       .uleb128 0x13
41125 +       .long   0x28
41126 +       .byte   0x20
41127 +       .byte   0x0
41128 +       .uleb128 0x45
41129 +       .long   .LASF1667
41130 +       .byte   0x1
41131 +       .value  0x45f
41132 +       .long   0xa6d2
41133 +       .byte   0x5
41134 +       .byte   0x3
41135 +       .long   __kstrtab_schedule_timeout_uninterruptible
41136 +       .uleb128 0x14
41137 +       .long   0xa6b0
41138 +       .uleb128 0x45
41139 +       .long   .LASF1668
41140 +       .byte   0x1
41141 +       .value  0x45f
41142 +       .long   0x4fe5
41143 +       .byte   0x5
41144 +       .byte   0x3
41145 +       .long   __ksymtab_schedule_timeout_uninterruptible
41146 +       .uleb128 0x12
41147 +       .long   0xa6f9
41148 +       .long   0x161c
41149 +       .uleb128 0x13
41150 +       .long   0x28
41151 +       .byte   0x1f
41152 +       .byte   0x0
41153 +       .uleb128 0x43
41154 +       .long   .LASF1669
41155 +       .byte   0x1
41156 +       .value  0x4ce
41157 +       .long   0xa6e9
41158 +       .uleb128 0x45
41159 +       .long   .LASF1670
41160 +       .byte   0x1
41161 +       .value  0x54c
41162 +       .long   0x2ddf
41163 +       .byte   0x5
41164 +       .byte   0x3
41165 +       .long   timers_nb
41166 +       .uleb128 0x12
41167 +       .long   0xa727
41168 +       .long   0xbb
41169 +       .uleb128 0x13
41170 +       .long   0x28
41171 +       .byte   0x6
41172 +       .byte   0x0
41173 +       .uleb128 0x45
41174 +       .long   .LASF1671
41175 +       .byte   0x1
41176 +       .value  0x625
41177 +       .long   0xa739
41178 +       .byte   0x5
41179 +       .byte   0x3
41180 +       .long   __kstrtab_msleep
41181 +       .uleb128 0x14
41182 +       .long   0xa717
41183 +       .uleb128 0x45
41184 +       .long   .LASF1672
41185 +       .byte   0x1
41186 +       .value  0x625
41187 +       .long   0x4fe5
41188 +       .byte   0x5
41189 +       .byte   0x3
41190 +       .long   __ksymtab_msleep
41191 +       .uleb128 0x12
41192 +       .long   0xa760
41193 +       .long   0xbb
41194 +       .uleb128 0x13
41195 +       .long   0x28
41196 +       .byte   0x14
41197 +       .byte   0x0
41198 +       .uleb128 0x45
41199 +       .long   .LASF1673
41200 +       .byte   0x1
41201 +       .value  0x634
41202 +       .long   0xa772
41203 +       .byte   0x5
41204 +       .byte   0x3
41205 +       .long   __kstrtab_msleep_interruptible
41206 +       .uleb128 0x14
41207 +       .long   0xa750
41208 +       .uleb128 0x45
41209 +       .long   .LASF1674
41210 +       .byte   0x1
41211 +       .value  0x634
41212 +       .long   0x4fe5
41213 +       .byte   0x5
41214 +       .byte   0x3
41215 +       .long   __ksymtab_msleep_interruptible
41216 +       .uleb128 0x12
41217 +       .long   0xa794
41218 +       .long   0x21
41219 +       .uleb128 0x72
41220 +       .byte   0x0
41221 +       .uleb128 0x73
41222 +       .long   .LASF1675
41223 +       .byte   0x38
41224 +       .byte   0x3f
41225 +       .long   0xa789
41226 +       .byte   0x1
41227 +       .byte   0x1
41228 +       .uleb128 0x12
41229 +       .long   0xa7ac
41230 +       .long   0x2f
41231 +       .uleb128 0x72
41232 +       .byte   0x0
41233 +       .uleb128 0x73
41234 +       .long   .LASF1676
41235 +       .byte   0x58
41236 +       .byte   0x30
41237 +       .long   0xa7a1
41238 +       .byte   0x1
41239 +       .byte   0x1
41240 +       .uleb128 0x73
41241 +       .long   .LASF1677
41242 +       .byte   0x58
41243 +       .byte   0x3a
41244 +       .long   0x2f
41245 +       .byte   0x1
41246 +       .byte   0x1
41247 +       .uleb128 0x73
41248 +       .long   .LASF1678
41249 +       .byte   0x59
41250 +       .byte   0x77
41251 +       .long   0x923
41252 +       .byte   0x1
41253 +       .byte   0x1
41254 +       .uleb128 0x73
41255 +       .long   .LASF1679
41256 +       .byte   0x8
41257 +       .byte   0x97
41258 +       .long   0x2f
41259 +       .byte   0x1
41260 +       .byte   0x1
41261 +       .uleb128 0x74
41262 +       .long   .LASF1680
41263 +       .byte   0x10
41264 +       .byte   0x58
41265 +       .long   0x2f
41266 +       .byte   0x1
41267 +       .byte   0x1
41268 +       .byte   0x54
41269 +       .uleb128 0x73
41270 +       .long   .LASF1681
41271 +       .byte   0x9
41272 +       .byte   0x9
41273 +       .long   0x15f9
41274 +       .byte   0x1
41275 +       .byte   0x1
41276 +       .uleb128 0x73
41277 +       .long   .LASF1682
41278 +       .byte   0x1f
41279 +       .byte   0x5b
41280 +       .long   0x173b
41281 +       .byte   0x1
41282 +       .byte   0x1
41283 +       .uleb128 0x73
41284 +       .long   .LASF1683
41285 +       .byte   0x1f
41286 +       .byte   0x5c
41287 +       .long   0x173b
41288 +       .byte   0x1
41289 +       .byte   0x1
41290 +       .uleb128 0x73
41291 +       .long   .LASF1684
41292 +       .byte   0x1f
41293 +       .byte   0x5d
41294 +       .long   0x170a
41295 +       .byte   0x1
41296 +       .byte   0x1
41297 +       .uleb128 0x73
41298 +       .long   .LASF1685
41299 +       .byte   0x5a
41300 +       .byte   0xc9
41301 +       .long   0x21
41302 +       .byte   0x1
41303 +       .byte   0x1
41304 +       .uleb128 0x74
41305 +       .long   .LASF1686
41306 +       .byte   0x1
41307 +       .byte   0x2f
41308 +       .long   0x189
41309 +       .byte   0x1
41310 +       .byte   0x5
41311 +       .byte   0x3
41312 +       .long   jiffies_64
41313 +       .uleb128 0x73
41314 +       .long   .LASF1687
41315 +       .byte   0x5b
41316 +       .byte   0x52
41317 +       .long   0x8941
41318 +       .byte   0x1
41319 +       .byte   0x1
41320 +       .uleb128 0x11
41321 +       .long   0xa863
41322 +       .byte   0x1
41323 +       .long   0x21
41324 +       .uleb128 0x6
41325 +       .long   0x1e7d
41326 +       .uleb128 0x6
41327 +       .long   0x21
41328 +       .byte   0x0
41329 +       .uleb128 0x75
41330 +       .long   .LASF1688
41331 +       .byte   0x5c
41332 +       .value  0x132
41333 +       .long   0xa871
41334 +       .byte   0x1
41335 +       .byte   0x1
41336 +       .uleb128 0x4
41337 +       .byte   0x4
41338 +       .long   0xa84e
41339 +       .uleb128 0x73
41340 +       .long   .LASF1689
41341 +       .byte   0x60
41342 +       .byte   0x16
41343 +       .long   0x2f
41344 +       .byte   0x1
41345 +       .byte   0x1
41346 +       .uleb128 0x73
41347 +       .long   .LASF1690
41348 +       .byte   0x61
41349 +       .byte   0x5d
41350 +       .long   0x21
41351 +       .byte   0x1
41352 +       .byte   0x1
41353 +       .uleb128 0x73
41354 +       .long   .LASF1691
41355 +       .byte   0x61
41356 +       .byte   0x5f
41357 +       .long   0x21
41358 +       .byte   0x1
41359 +       .byte   0x1
41360 +       .uleb128 0x73
41361 +       .long   .LASF1692
41362 +       .byte   0x61
41363 +       .byte   0x60
41364 +       .long   0x21
41365 +       .byte   0x1
41366 +       .byte   0x1
41367 +       .uleb128 0x73
41368 +       .long   .LASF1693
41369 +       .byte   0x61
41370 +       .byte   0x61
41371 +       .long   0x21
41372 +       .byte   0x1
41373 +       .byte   0x1
41374 +       .uleb128 0x73
41375 +       .long   .LASF1694
41376 +       .byte   0x62
41377 +       .byte   0x7b
41378 +       .long   0x21
41379 +       .byte   0x1
41380 +       .byte   0x1
41381 +       .uleb128 0x73
41382 +       .long   .LASF405
41383 +       .byte   0x59
41384 +       .byte   0x41
41385 +       .long   0x1e83
41386 +       .byte   0x1
41387 +       .byte   0x1
41388 +       .uleb128 0x73
41389 +       .long   .LASF1695
41390 +       .byte   0x59
41391 +       .byte   0x72
41392 +       .long   0x21
41393 +       .byte   0x1
41394 +       .byte   0x1
41395 +       .uleb128 0x73
41396 +       .long   .LASF1696
41397 +       .byte   0x59
41398 +       .byte   0x75
41399 +       .long   0x923
41400 +       .byte   0x1
41401 +       .byte   0x1
41402 +       .uleb128 0x73
41403 +       .long   .LASF413
41404 +       .byte   0x63
41405 +       .byte   0x7d
41406 +       .long   0xa8f9
41407 +       .byte   0x1
41408 +       .byte   0x1
41409 +       .uleb128 0x4
41410 +       .byte   0x4
41411 +       .long   0x1f51
41412 +       .uleb128 0x75
41413 +       .long   .LASF1697
41414 +       .byte   0x18
41415 +       .value  0x19e
41416 +       .long   0x2d82
41417 +       .byte   0x1
41418 +       .byte   0x1
41419 +       .uleb128 0x75
41420 +       .long   .LASF1698
41421 +       .byte   0x18
41422 +       .value  0x241
41423 +       .long   0x2bf1
41424 +       .byte   0x1
41425 +       .byte   0x1
41426 +       .uleb128 0x12
41427 +       .long   0xa926
41428 +       .long   0x2e6b
41429 +       .uleb128 0x72
41430 +       .byte   0x0
41431 +       .uleb128 0x73
41432 +       .long   .LASF1699
41433 +       .byte   0x3
41434 +       .byte   0x1a
41435 +       .long   0xa91b
41436 +       .byte   0x1
41437 +       .byte   0x1
41438 +       .uleb128 0x73
41439 +       .long   .LASF1700
41440 +       .byte   0x25
41441 +       .byte   0x71
41442 +       .long   0x2ee9
41443 +       .byte   0x1
41444 +       .byte   0x1
41445 +       .uleb128 0x73
41446 +       .long   .LASF1701
41447 +       .byte   0x25
41448 +       .byte   0x72
41449 +       .long   0x2ee9
41450 +       .byte   0x1
41451 +       .byte   0x1
41452 +       .uleb128 0x74
41453 +       .long   .LASF1702
41454 +       .byte   0x1
41455 +       .byte   0x53
41456 +       .long   0x378d
41457 +       .byte   0x1
41458 +       .byte   0x5
41459 +       .byte   0x3
41460 +       .long   boot_tvec_bases
41461 +       .uleb128 0x76
41462 +       .long   .LASF1703
41463 +       .byte   0x1
41464 +       .value  0x34d
41465 +       .long   0x8dc
41466 +       .byte   0x1
41467 +       .byte   0x5
41468 +       .byte   0x3
41469 +       .long   avenrun
41470 +       .uleb128 0x73
41471 +       .long   .LASF1704
41472 +       .byte   0xb
41473 +       .byte   0x7c
41474 +       .long   0x21
41475 +       .byte   0x1
41476 +       .byte   0x1
41477 +       .uleb128 0x75
41478 +       .long   .LASF1705
41479 +       .byte   0xb
41480 +       .value  0x47d
41481 +       .long   0x3070
41482 +       .byte   0x1
41483 +       .byte   0x1
41484 +       .uleb128 0x73
41485 +       .long   .LASF1706
41486 +       .byte   0x66
41487 +       .byte   0x21
41488 +       .long   0x45d4
41489 +       .byte   0x1
41490 +       .byte   0x1
41491 +       .uleb128 0x73
41492 +       .long   .LASF1707
41493 +       .byte   0x67
41494 +       .byte   0x19
41495 +       .long   0x46e1
41496 +       .byte   0x1
41497 +       .byte   0x1
41498 +       .uleb128 0x12
41499 +       .long   0xa9b2
41500 +       .long   0x942
41501 +       .uleb128 0x72
41502 +       .byte   0x0
41503 +       .uleb128 0x73
41504 +       .long   .LASF1708
41505 +       .byte   0x67
41506 +       .byte   0x21
41507 +       .long   0xa9a7
41508 +       .byte   0x1
41509 +       .byte   0x1
41510 +       .uleb128 0x73
41511 +       .long   .LASF1709
41512 +       .byte   0x68
41513 +       .byte   0xc
41514 +       .long   0x674
41515 +       .byte   0x1
41516 +       .byte   0x1
41517 +       .uleb128 0x12
41518 +       .long   0xa9dc
41519 +       .long   0x507e
41520 +       .uleb128 0x13
41521 +       .long   0x28
41522 +       .byte   0xdf
41523 +       .byte   0x0
41524 +       .uleb128 0x73
41525 +       .long   .LASF1016
41526 +       .byte   0x6a
41527 +       .byte   0xb2
41528 +       .long   0xa9cc
41529 +       .byte   0x1
41530 +       .byte   0x1
41531 +       .uleb128 0x73
41532 +       .long   .LASF1710
41533 +       .byte   0x6c
41534 +       .byte   0xd
41535 +       .long   0x21
41536 +       .byte   0x1
41537 +       .byte   0x1
41538 +       .uleb128 0x73
41539 +       .long   .LASF1711
41540 +       .byte   0x6d
41541 +       .byte   0x62
41542 +       .long   0x534f
41543 +       .byte   0x1
41544 +       .byte   0x1
41545 +       .uleb128 0x73
41546 +       .long   .LASF1712
41547 +       .byte   0x24
41548 +       .byte   0xb4
41549 +       .long   0x1680
41550 +       .byte   0x1
41551 +       .byte   0x1
41552 +       .uleb128 0x12
41553 +       .long   0xaa20
41554 +       .long   0x36e
41555 +       .uleb128 0x13
41556 +       .long   0x28
41557 +       .byte   0xf
41558 +       .byte   0x0
41559 +       .uleb128 0x73
41560 +       .long   .LASF1713
41561 +       .byte   0x15
41562 +       .byte   0xc1
41563 +       .long   0xaa10
41564 +       .byte   0x1
41565 +       .byte   0x1
41566 +       .uleb128 0x73
41567 +       .long   .LASF1714
41568 +       .byte   0x6e
41569 +       .byte   0x3a
41570 +       .long   0x86a5
41571 +       .byte   0x1
41572 +       .byte   0x1
41573 +       .uleb128 0x73
41574 +       .long   .LASF563
41575 +       .byte   0x6e
41576 +       .byte   0x7a
41577 +       .long   0x2bdb
41578 +       .byte   0x1
41579 +       .byte   0x1
41580 +       .uleb128 0x73
41581 +       .long   .LASF1715
41582 +       .byte   0x51
41583 +       .byte   0xe2
41584 +       .long   0x6e9c
41585 +       .byte   0x1
41586 +       .byte   0x1
41587 +       .uleb128 0x75
41588 +       .long   .LASF1716
41589 +       .byte   0x51
41590 +       .value  0x106
41591 +       .long   0x36ad
41592 +       .byte   0x1
41593 +       .byte   0x1
41594 +       .uleb128 0x5
41595 +       .long   0xaa73
41596 +       .byte   0x1
41597 +       .uleb128 0x6
41598 +       .long   0x160b
41599 +       .uleb128 0x6
41600 +       .long   0x77
41601 +       .byte   0x0
41602 +       .uleb128 0x76
41603 +       .long   .LASF1717
41604 +       .byte   0x1
41605 +       .value  0x3b9
41606 +       .long   0xaa86
41607 +       .byte   0x1
41608 +       .byte   0x5
41609 +       .byte   0x3
41610 +       .long   rec_event
41611 +       .uleb128 0x4
41612 +       .byte   0x4
41613 +       .long   0xaa62
41614 +       .byte   0x0
41615 +       .section        .debug_abbrev
41616 +       .uleb128 0x1
41617 +       .uleb128 0x11
41618 +       .byte   0x1
41619 +       .uleb128 0x10
41620 +       .uleb128 0x6
41621 +       .uleb128 0x52
41622 +       .uleb128 0x1
41623 +       .uleb128 0x25
41624 +       .uleb128 0xe
41625 +       .uleb128 0x13
41626 +       .uleb128 0xb
41627 +       .uleb128 0x3
41628 +       .uleb128 0xe
41629 +       .uleb128 0x1b
41630 +       .uleb128 0xe
41631 +       .byte   0x0
41632 +       .byte   0x0
41633 +       .uleb128 0x2
41634 +       .uleb128 0x24
41635 +       .byte   0x0
41636 +       .uleb128 0x3
41637 +       .uleb128 0x8
41638 +       .uleb128 0xb
41639 +       .uleb128 0xb
41640 +       .uleb128 0x3e
41641 +       .uleb128 0xb
41642 +       .byte   0x0
41643 +       .byte   0x0
41644 +       .uleb128 0x3
41645 +       .uleb128 0x24
41646 +       .byte   0x0
41647 +       .uleb128 0x3
41648 +       .uleb128 0xe
41649 +       .uleb128 0xb
41650 +       .uleb128 0xb
41651 +       .uleb128 0x3e
41652 +       .uleb128 0xb
41653 +       .byte   0x0
41654 +       .byte   0x0
41655 +       .uleb128 0x4
41656 +       .uleb128 0xf
41657 +       .byte   0x0
41658 +       .uleb128 0xb
41659 +       .uleb128 0xb
41660 +       .uleb128 0x49
41661 +       .uleb128 0x13
41662 +       .byte   0x0
41663 +       .byte   0x0
41664 +       .uleb128 0x5
41665 +       .uleb128 0x15
41666 +       .byte   0x1
41667 +       .uleb128 0x1
41668 +       .uleb128 0x13
41669 +       .uleb128 0x27
41670 +       .uleb128 0xc
41671 +       .byte   0x0
41672 +       .byte   0x0
41673 +       .uleb128 0x6
41674 +       .uleb128 0x5
41675 +       .byte   0x0
41676 +       .uleb128 0x49
41677 +       .uleb128 0x13
41678 +       .byte   0x0
41679 +       .byte   0x0
41680 +       .uleb128 0x7
41681 +       .uleb128 0x16
41682 +       .byte   0x0
41683 +       .uleb128 0x3
41684 +       .uleb128 0xe
41685 +       .uleb128 0x3a
41686 +       .uleb128 0xb
41687 +       .uleb128 0x3b
41688 +       .uleb128 0xb
41689 +       .uleb128 0x49
41690 +       .uleb128 0x13
41691 +       .byte   0x0
41692 +       .byte   0x0
41693 +       .uleb128 0x8
41694 +       .uleb128 0x16
41695 +       .byte   0x0
41696 +       .uleb128 0x3
41697 +       .uleb128 0x8
41698 +       .uleb128 0x3a
41699 +       .uleb128 0xb
41700 +       .uleb128 0x3b
41701 +       .uleb128 0xb
41702 +       .uleb128 0x49
41703 +       .uleb128 0x13
41704 +       .byte   0x0
41705 +       .byte   0x0
41706 +       .uleb128 0x9
41707 +       .uleb128 0x13
41708 +       .byte   0x1
41709 +       .uleb128 0x1
41710 +       .uleb128 0x13
41711 +       .uleb128 0xb
41712 +       .uleb128 0xb
41713 +       .uleb128 0x3a
41714 +       .uleb128 0xb
41715 +       .uleb128 0x3b
41716 +       .uleb128 0xb
41717 +       .byte   0x0
41718 +       .byte   0x0
41719 +       .uleb128 0xa
41720 +       .uleb128 0xd
41721 +       .byte   0x0
41722 +       .uleb128 0x3
41723 +       .uleb128 0xe
41724 +       .uleb128 0x3a
41725 +       .uleb128 0xb
41726 +       .uleb128 0x3b
41727 +       .uleb128 0xb
41728 +       .uleb128 0x49
41729 +       .uleb128 0x13
41730 +       .uleb128 0x38
41731 +       .uleb128 0xa
41732 +       .byte   0x0
41733 +       .byte   0x0
41734 +       .uleb128 0xb
41735 +       .uleb128 0xd
41736 +       .byte   0x0
41737 +       .uleb128 0x3
41738 +       .uleb128 0x8
41739 +       .uleb128 0x3a
41740 +       .uleb128 0xb
41741 +       .uleb128 0x3b
41742 +       .uleb128 0xb
41743 +       .uleb128 0x49
41744 +       .uleb128 0x13
41745 +       .uleb128 0x38
41746 +       .uleb128 0xa
41747 +       .byte   0x0
41748 +       .byte   0x0
41749 +       .uleb128 0xc
41750 +       .uleb128 0x17
41751 +       .byte   0x1
41752 +       .uleb128 0x1
41753 +       .uleb128 0x13
41754 +       .uleb128 0xb
41755 +       .uleb128 0xb
41756 +       .uleb128 0x3a
41757 +       .uleb128 0xb
41758 +       .uleb128 0x3b
41759 +       .uleb128 0xb
41760 +       .byte   0x0
41761 +       .byte   0x0
41762 +       .uleb128 0xd
41763 +       .uleb128 0xd
41764 +       .byte   0x0
41765 +       .uleb128 0x49
41766 +       .uleb128 0x13
41767 +       .byte   0x0
41768 +       .byte   0x0
41769 +       .uleb128 0xe
41770 +       .uleb128 0xd
41771 +       .byte   0x0
41772 +       .uleb128 0x3
41773 +       .uleb128 0xe
41774 +       .uleb128 0x3a
41775 +       .uleb128 0xb
41776 +       .uleb128 0x3b
41777 +       .uleb128 0xb
41778 +       .uleb128 0x49
41779 +       .uleb128 0x13
41780 +       .byte   0x0
41781 +       .byte   0x0
41782 +       .uleb128 0xf
41783 +       .uleb128 0x13
41784 +       .byte   0x1
41785 +       .uleb128 0x1
41786 +       .uleb128 0x13
41787 +       .uleb128 0x3
41788 +       .uleb128 0xe
41789 +       .uleb128 0xb
41790 +       .uleb128 0xb
41791 +       .uleb128 0x3a
41792 +       .uleb128 0xb
41793 +       .uleb128 0x3b
41794 +       .uleb128 0xb
41795 +       .byte   0x0
41796 +       .byte   0x0
41797 +       .uleb128 0x10
41798 +       .uleb128 0xd
41799 +       .byte   0x0
41800 +       .uleb128 0x49
41801 +       .uleb128 0x13
41802 +       .uleb128 0x38
41803 +       .uleb128 0xa
41804 +       .byte   0x0
41805 +       .byte   0x0
41806 +       .uleb128 0x11
41807 +       .uleb128 0x15
41808 +       .byte   0x1
41809 +       .uleb128 0x1
41810 +       .uleb128 0x13
41811 +       .uleb128 0x27
41812 +       .uleb128 0xc
41813 +       .uleb128 0x49
41814 +       .uleb128 0x13
41815 +       .byte   0x0
41816 +       .byte   0x0
41817 +       .uleb128 0x12
41818 +       .uleb128 0x1
41819 +       .byte   0x1
41820 +       .uleb128 0x1
41821 +       .uleb128 0x13
41822 +       .uleb128 0x49
41823 +       .uleb128 0x13
41824 +       .byte   0x0
41825 +       .byte   0x0
41826 +       .uleb128 0x13
41827 +       .uleb128 0x21
41828 +       .byte   0x0
41829 +       .uleb128 0x49
41830 +       .uleb128 0x13
41831 +       .uleb128 0x2f
41832 +       .uleb128 0xb
41833 +       .byte   0x0
41834 +       .byte   0x0
41835 +       .uleb128 0x14
41836 +       .uleb128 0x26
41837 +       .byte   0x0
41838 +       .uleb128 0x49
41839 +       .uleb128 0x13
41840 +       .byte   0x0
41841 +       .byte   0x0
41842 +       .uleb128 0x15
41843 +       .uleb128 0x13
41844 +       .byte   0x1
41845 +       .uleb128 0x1
41846 +       .uleb128 0x13
41847 +       .uleb128 0x3
41848 +       .uleb128 0xe
41849 +       .uleb128 0xb
41850 +       .uleb128 0xb
41851 +       .uleb128 0x3a
41852 +       .uleb128 0xb
41853 +       .uleb128 0x3b
41854 +       .uleb128 0x5
41855 +       .byte   0x0
41856 +       .byte   0x0
41857 +       .uleb128 0x16
41858 +       .uleb128 0xd
41859 +       .byte   0x0
41860 +       .uleb128 0x3
41861 +       .uleb128 0xe
41862 +       .uleb128 0x3a
41863 +       .uleb128 0xb
41864 +       .uleb128 0x3b
41865 +       .uleb128 0x5
41866 +       .uleb128 0x49
41867 +       .uleb128 0x13
41868 +       .uleb128 0x38
41869 +       .uleb128 0xa
41870 +       .byte   0x0
41871 +       .byte   0x0
41872 +       .uleb128 0x17
41873 +       .uleb128 0xd
41874 +       .byte   0x0
41875 +       .uleb128 0x3
41876 +       .uleb128 0x8
41877 +       .uleb128 0x3a
41878 +       .uleb128 0xb
41879 +       .uleb128 0x3b
41880 +       .uleb128 0x5
41881 +       .uleb128 0x49
41882 +       .uleb128 0x13
41883 +       .uleb128 0x38
41884 +       .uleb128 0xa
41885 +       .byte   0x0
41886 +       .byte   0x0
41887 +       .uleb128 0x18
41888 +       .uleb128 0x15
41889 +       .byte   0x0
41890 +       .uleb128 0x27
41891 +       .uleb128 0xc
41892 +       .uleb128 0x49
41893 +       .uleb128 0x13
41894 +       .byte   0x0
41895 +       .byte   0x0
41896 +       .uleb128 0x19
41897 +       .uleb128 0x15
41898 +       .byte   0x0
41899 +       .uleb128 0x27
41900 +       .uleb128 0xc
41901 +       .byte   0x0
41902 +       .byte   0x0
41903 +       .uleb128 0x1a
41904 +       .uleb128 0x13
41905 +       .byte   0x1
41906 +       .uleb128 0x1
41907 +       .uleb128 0x13
41908 +       .uleb128 0x3
41909 +       .uleb128 0xe
41910 +       .uleb128 0xb
41911 +       .uleb128 0x5
41912 +       .uleb128 0x3a
41913 +       .uleb128 0xb
41914 +       .uleb128 0x3b
41915 +       .uleb128 0xb
41916 +       .byte   0x0
41917 +       .byte   0x0
41918 +       .uleb128 0x1b
41919 +       .uleb128 0x17
41920 +       .byte   0x1
41921 +       .uleb128 0x1
41922 +       .uleb128 0x13
41923 +       .uleb128 0x3
41924 +       .uleb128 0xe
41925 +       .uleb128 0xb
41926 +       .uleb128 0x5
41927 +       .uleb128 0x3a
41928 +       .uleb128 0xb
41929 +       .uleb128 0x3b
41930 +       .uleb128 0x5
41931 +       .byte   0x0
41932 +       .byte   0x0
41933 +       .uleb128 0x1c
41934 +       .uleb128 0xd
41935 +       .byte   0x0
41936 +       .uleb128 0x3
41937 +       .uleb128 0xe
41938 +       .uleb128 0x3a
41939 +       .uleb128 0xb
41940 +       .uleb128 0x3b
41941 +       .uleb128 0x5
41942 +       .uleb128 0x49
41943 +       .uleb128 0x13
41944 +       .byte   0x0
41945 +       .byte   0x0
41946 +       .uleb128 0x1d
41947 +       .uleb128 0x13
41948 +       .byte   0x1
41949 +       .uleb128 0x1
41950 +       .uleb128 0x13
41951 +       .uleb128 0xb
41952 +       .uleb128 0xb
41953 +       .uleb128 0x3a
41954 +       .uleb128 0xb
41955 +       .uleb128 0x3b
41956 +       .uleb128 0x5
41957 +       .byte   0x0
41958 +       .byte   0x0
41959 +       .uleb128 0x1e
41960 +       .uleb128 0x16
41961 +       .byte   0x0
41962 +       .uleb128 0x3
41963 +       .uleb128 0xe
41964 +       .uleb128 0x3a
41965 +       .uleb128 0xb
41966 +       .uleb128 0x3b
41967 +       .uleb128 0x5
41968 +       .uleb128 0x49
41969 +       .uleb128 0x13
41970 +       .byte   0x0
41971 +       .byte   0x0
41972 +       .uleb128 0x1f
41973 +       .uleb128 0x13
41974 +       .byte   0x1
41975 +       .uleb128 0x1
41976 +       .uleb128 0x13
41977 +       .uleb128 0x3
41978 +       .uleb128 0xe
41979 +       .uleb128 0xb
41980 +       .uleb128 0x5
41981 +       .uleb128 0x3a
41982 +       .uleb128 0xb
41983 +       .uleb128 0x3b
41984 +       .uleb128 0x5
41985 +       .byte   0x0
41986 +       .byte   0x0
41987 +       .uleb128 0x20
41988 +       .uleb128 0xd
41989 +       .byte   0x0
41990 +       .uleb128 0x3
41991 +       .uleb128 0xe
41992 +       .uleb128 0x3a
41993 +       .uleb128 0xb
41994 +       .uleb128 0x3b
41995 +       .uleb128 0x5
41996 +       .uleb128 0x49
41997 +       .uleb128 0x13
41998 +       .uleb128 0xb
41999 +       .uleb128 0xb
42000 +       .uleb128 0xd
42001 +       .uleb128 0xb
42002 +       .uleb128 0xc
42003 +       .uleb128 0xb
42004 +       .uleb128 0x38
42005 +       .uleb128 0xa
42006 +       .byte   0x0
42007 +       .byte   0x0
42008 +       .uleb128 0x21
42009 +       .uleb128 0x13
42010 +       .byte   0x0
42011 +       .uleb128 0x3
42012 +       .uleb128 0xe
42013 +       .uleb128 0x3c
42014 +       .uleb128 0xc
42015 +       .byte   0x0
42016 +       .byte   0x0
42017 +       .uleb128 0x22
42018 +       .uleb128 0xf
42019 +       .byte   0x0
42020 +       .uleb128 0xb
42021 +       .uleb128 0xb
42022 +       .byte   0x0
42023 +       .byte   0x0
42024 +       .uleb128 0x23
42025 +       .uleb128 0x21
42026 +       .byte   0x0
42027 +       .uleb128 0x49
42028 +       .uleb128 0x13
42029 +       .byte   0x0
42030 +       .byte   0x0
42031 +       .uleb128 0x24
42032 +       .uleb128 0x13
42033 +       .byte   0x0
42034 +       .uleb128 0x3
42035 +       .uleb128 0xe
42036 +       .uleb128 0xb
42037 +       .uleb128 0xb
42038 +       .uleb128 0x3a
42039 +       .uleb128 0xb
42040 +       .uleb128 0x3b
42041 +       .uleb128 0x5
42042 +       .byte   0x0
42043 +       .byte   0x0
42044 +       .uleb128 0x25
42045 +       .uleb128 0x17
42046 +       .byte   0x1
42047 +       .uleb128 0x1
42048 +       .uleb128 0x13
42049 +       .uleb128 0x3
42050 +       .uleb128 0xe
42051 +       .uleb128 0xb
42052 +       .uleb128 0xb
42053 +       .uleb128 0x3a
42054 +       .uleb128 0xb
42055 +       .uleb128 0x3b
42056 +       .uleb128 0xb
42057 +       .byte   0x0
42058 +       .byte   0x0
42059 +       .uleb128 0x26
42060 +       .uleb128 0xd
42061 +       .byte   0x0
42062 +       .uleb128 0x3
42063 +       .uleb128 0x8
42064 +       .uleb128 0x3a
42065 +       .uleb128 0xb
42066 +       .uleb128 0x3b
42067 +       .uleb128 0xb
42068 +       .uleb128 0x49
42069 +       .uleb128 0x13
42070 +       .byte   0x0
42071 +       .byte   0x0
42072 +       .uleb128 0x27
42073 +       .uleb128 0x4
42074 +       .byte   0x1
42075 +       .uleb128 0x1
42076 +       .uleb128 0x13
42077 +       .uleb128 0x3
42078 +       .uleb128 0xe
42079 +       .uleb128 0xb
42080 +       .uleb128 0xb
42081 +       .uleb128 0x3a
42082 +       .uleb128 0xb
42083 +       .uleb128 0x3b
42084 +       .uleb128 0xb
42085 +       .byte   0x0
42086 +       .byte   0x0
42087 +       .uleb128 0x28
42088 +       .uleb128 0x28
42089 +       .byte   0x0
42090 +       .uleb128 0x3
42091 +       .uleb128 0xe
42092 +       .uleb128 0x1c
42093 +       .uleb128 0xd
42094 +       .byte   0x0
42095 +       .byte   0x0
42096 +       .uleb128 0x29
42097 +       .uleb128 0x13
42098 +       .byte   0x1
42099 +       .uleb128 0x1
42100 +       .uleb128 0x13
42101 +       .uleb128 0x3
42102 +       .uleb128 0x8
42103 +       .uleb128 0xb
42104 +       .uleb128 0xb
42105 +       .uleb128 0x3a
42106 +       .uleb128 0xb
42107 +       .uleb128 0x3b
42108 +       .uleb128 0xb
42109 +       .byte   0x0
42110 +       .byte   0x0
42111 +       .uleb128 0x2a
42112 +       .uleb128 0x13
42113 +       .byte   0x0
42114 +       .uleb128 0x3
42115 +       .uleb128 0xe
42116 +       .uleb128 0xb
42117 +       .uleb128 0xb
42118 +       .uleb128 0x3a
42119 +       .uleb128 0xb
42120 +       .uleb128 0x3b
42121 +       .uleb128 0xb
42122 +       .byte   0x0
42123 +       .byte   0x0
42124 +       .uleb128 0x2b
42125 +       .uleb128 0x17
42126 +       .byte   0x1
42127 +       .uleb128 0x1
42128 +       .uleb128 0x13
42129 +       .uleb128 0xb
42130 +       .uleb128 0xb
42131 +       .uleb128 0x3a
42132 +       .uleb128 0xb
42133 +       .uleb128 0x3b
42134 +       .uleb128 0x5
42135 +       .byte   0x0
42136 +       .byte   0x0
42137 +       .uleb128 0x2c
42138 +       .uleb128 0x4
42139 +       .byte   0x1
42140 +       .uleb128 0x1
42141 +       .uleb128 0x13
42142 +       .uleb128 0x3
42143 +       .uleb128 0xe
42144 +       .uleb128 0xb
42145 +       .uleb128 0xb
42146 +       .uleb128 0x3a
42147 +       .uleb128 0xb
42148 +       .uleb128 0x3b
42149 +       .uleb128 0x5
42150 +       .byte   0x0
42151 +       .byte   0x0
42152 +       .uleb128 0x2d
42153 +       .uleb128 0x35
42154 +       .byte   0x0
42155 +       .uleb128 0x49
42156 +       .uleb128 0x13
42157 +       .byte   0x0
42158 +       .byte   0x0
42159 +       .uleb128 0x2e
42160 +       .uleb128 0x13
42161 +       .byte   0x0
42162 +       .uleb128 0x3
42163 +       .uleb128 0x8
42164 +       .uleb128 0x3c
42165 +       .uleb128 0xc
42166 +       .byte   0x0
42167 +       .byte   0x0
42168 +       .uleb128 0x2f
42169 +       .uleb128 0xd
42170 +       .byte   0x0
42171 +       .uleb128 0x3
42172 +       .uleb128 0xe
42173 +       .uleb128 0x3a
42174 +       .uleb128 0xb
42175 +       .uleb128 0x3b
42176 +       .uleb128 0xb
42177 +       .uleb128 0x49
42178 +       .uleb128 0x13
42179 +       .uleb128 0xb
42180 +       .uleb128 0xb
42181 +       .uleb128 0xd
42182 +       .uleb128 0xb
42183 +       .uleb128 0xc
42184 +       .uleb128 0xb
42185 +       .uleb128 0x38
42186 +       .uleb128 0xa
42187 +       .byte   0x0
42188 +       .byte   0x0
42189 +       .uleb128 0x30
42190 +       .uleb128 0xd
42191 +       .byte   0x0
42192 +       .uleb128 0x3
42193 +       .uleb128 0x8
42194 +       .uleb128 0x3a
42195 +       .uleb128 0xb
42196 +       .uleb128 0x3b
42197 +       .uleb128 0x5
42198 +       .uleb128 0x49
42199 +       .uleb128 0x13
42200 +       .byte   0x0
42201 +       .byte   0x0
42202 +       .uleb128 0x31
42203 +       .uleb128 0x26
42204 +       .byte   0x0
42205 +       .byte   0x0
42206 +       .byte   0x0
42207 +       .uleb128 0x32
42208 +       .uleb128 0x2e
42209 +       .byte   0x0
42210 +       .uleb128 0x3
42211 +       .uleb128 0xe
42212 +       .uleb128 0x3a
42213 +       .uleb128 0xb
42214 +       .uleb128 0x3b
42215 +       .uleb128 0xb
42216 +       .uleb128 0x27
42217 +       .uleb128 0xc
42218 +       .uleb128 0x20
42219 +       .uleb128 0xb
42220 +       .byte   0x0
42221 +       .byte   0x0
42222 +       .uleb128 0x33
42223 +       .uleb128 0x2e
42224 +       .byte   0x1
42225 +       .uleb128 0x1
42226 +       .uleb128 0x13
42227 +       .uleb128 0x3
42228 +       .uleb128 0xe
42229 +       .uleb128 0x3a
42230 +       .uleb128 0xb
42231 +       .uleb128 0x3b
42232 +       .uleb128 0xb
42233 +       .uleb128 0x27
42234 +       .uleb128 0xc
42235 +       .uleb128 0x20
42236 +       .uleb128 0xb
42237 +       .byte   0x0
42238 +       .byte   0x0
42239 +       .uleb128 0x34
42240 +       .uleb128 0x5
42241 +       .byte   0x0
42242 +       .uleb128 0x3
42243 +       .uleb128 0x8
42244 +       .uleb128 0x3a
42245 +       .uleb128 0xb
42246 +       .uleb128 0x3b
42247 +       .uleb128 0xb
42248 +       .uleb128 0x49
42249 +       .uleb128 0x13
42250 +       .byte   0x0
42251 +       .byte   0x0
42252 +       .uleb128 0x35
42253 +       .uleb128 0x5
42254 +       .byte   0x0
42255 +       .uleb128 0x3
42256 +       .uleb128 0xe
42257 +       .uleb128 0x3a
42258 +       .uleb128 0xb
42259 +       .uleb128 0x3b
42260 +       .uleb128 0xb
42261 +       .uleb128 0x49
42262 +       .uleb128 0x13
42263 +       .byte   0x0
42264 +       .byte   0x0
42265 +       .uleb128 0x36
42266 +       .uleb128 0x2e
42267 +       .byte   0x1
42268 +       .uleb128 0x1
42269 +       .uleb128 0x13
42270 +       .uleb128 0x3
42271 +       .uleb128 0xe
42272 +       .uleb128 0x3a
42273 +       .uleb128 0xb
42274 +       .uleb128 0x3b
42275 +       .uleb128 0xb
42276 +       .uleb128 0x27
42277 +       .uleb128 0xc
42278 +       .uleb128 0x49
42279 +       .uleb128 0x13
42280 +       .uleb128 0x20
42281 +       .uleb128 0xb
42282 +       .byte   0x0
42283 +       .byte   0x0
42284 +       .uleb128 0x37
42285 +       .uleb128 0xb
42286 +       .byte   0x0
42287 +       .byte   0x0
42288 +       .byte   0x0
42289 +       .uleb128 0x38
42290 +       .uleb128 0x34
42291 +       .byte   0x0
42292 +       .uleb128 0x3
42293 +       .uleb128 0xe
42294 +       .uleb128 0x3a
42295 +       .uleb128 0xb
42296 +       .uleb128 0x3b
42297 +       .uleb128 0xb
42298 +       .uleb128 0x49
42299 +       .uleb128 0x13
42300 +       .byte   0x0
42301 +       .byte   0x0
42302 +       .uleb128 0x39
42303 +       .uleb128 0xb
42304 +       .byte   0x1
42305 +       .byte   0x0
42306 +       .byte   0x0
42307 +       .uleb128 0x3a
42308 +       .uleb128 0x34
42309 +       .byte   0x0
42310 +       .uleb128 0x31
42311 +       .uleb128 0x13
42312 +       .byte   0x0
42313 +       .byte   0x0
42314 +       .uleb128 0x3b
42315 +       .uleb128 0x2e
42316 +       .byte   0x1
42317 +       .uleb128 0x1
42318 +       .uleb128 0x13
42319 +       .uleb128 0x3
42320 +       .uleb128 0xe
42321 +       .uleb128 0x3a
42322 +       .uleb128 0xb
42323 +       .uleb128 0x3b
42324 +       .uleb128 0x5
42325 +       .uleb128 0x27
42326 +       .uleb128 0xc
42327 +       .uleb128 0x49
42328 +       .uleb128 0x13
42329 +       .uleb128 0x20
42330 +       .uleb128 0xb
42331 +       .byte   0x0
42332 +       .byte   0x0
42333 +       .uleb128 0x3c
42334 +       .uleb128 0x5
42335 +       .byte   0x0
42336 +       .uleb128 0x3
42337 +       .uleb128 0x8
42338 +       .uleb128 0x3a
42339 +       .uleb128 0xb
42340 +       .uleb128 0x3b
42341 +       .uleb128 0x5
42342 +       .uleb128 0x49
42343 +       .uleb128 0x13
42344 +       .byte   0x0
42345 +       .byte   0x0
42346 +       .uleb128 0x3d
42347 +       .uleb128 0x5
42348 +       .byte   0x0
42349 +       .uleb128 0x3
42350 +       .uleb128 0xe
42351 +       .uleb128 0x3a
42352 +       .uleb128 0xb
42353 +       .uleb128 0x3b
42354 +       .uleb128 0x5
42355 +       .uleb128 0x49
42356 +       .uleb128 0x13
42357 +       .byte   0x0
42358 +       .byte   0x0
42359 +       .uleb128 0x3e
42360 +       .uleb128 0xa
42361 +       .byte   0x0
42362 +       .uleb128 0x3
42363 +       .uleb128 0xe
42364 +       .uleb128 0x3a
42365 +       .uleb128 0xb
42366 +       .uleb128 0x3b
42367 +       .uleb128 0xb
42368 +       .byte   0x0
42369 +       .byte   0x0
42370 +       .uleb128 0x3f
42371 +       .uleb128 0x34
42372 +       .byte   0x0
42373 +       .uleb128 0x3
42374 +       .uleb128 0x8
42375 +       .uleb128 0x3a
42376 +       .uleb128 0xb
42377 +       .uleb128 0x3b
42378 +       .uleb128 0xb
42379 +       .uleb128 0x49
42380 +       .uleb128 0x13
42381 +       .byte   0x0
42382 +       .byte   0x0
42383 +       .uleb128 0x40
42384 +       .uleb128 0xa
42385 +       .byte   0x0
42386 +       .uleb128 0x31
42387 +       .uleb128 0x13
42388 +       .byte   0x0
42389 +       .byte   0x0
42390 +       .uleb128 0x41
42391 +       .uleb128 0x34
42392 +       .byte   0x0
42393 +       .uleb128 0x3
42394 +       .uleb128 0x8
42395 +       .uleb128 0x3a
42396 +       .uleb128 0xb
42397 +       .uleb128 0x3b
42398 +       .uleb128 0x5
42399 +       .uleb128 0x49
42400 +       .uleb128 0x13
42401 +       .byte   0x0
42402 +       .byte   0x0
42403 +       .uleb128 0x42
42404 +       .uleb128 0x2e
42405 +       .byte   0x1
42406 +       .uleb128 0x1
42407 +       .uleb128 0x13
42408 +       .uleb128 0x3
42409 +       .uleb128 0xe
42410 +       .uleb128 0x3a
42411 +       .uleb128 0xb
42412 +       .uleb128 0x3b
42413 +       .uleb128 0x5
42414 +       .uleb128 0x27
42415 +       .uleb128 0xc
42416 +       .uleb128 0x20
42417 +       .uleb128 0xb
42418 +       .byte   0x0
42419 +       .byte   0x0
42420 +       .uleb128 0x43
42421 +       .uleb128 0x34
42422 +       .byte   0x0
42423 +       .uleb128 0x3
42424 +       .uleb128 0xe
42425 +       .uleb128 0x3a
42426 +       .uleb128 0xb
42427 +       .uleb128 0x3b
42428 +       .uleb128 0x5
42429 +       .uleb128 0x49
42430 +       .uleb128 0x13
42431 +       .byte   0x0
42432 +       .byte   0x0
42433 +       .uleb128 0x44
42434 +       .uleb128 0x2e
42435 +       .byte   0x0
42436 +       .uleb128 0x3
42437 +       .uleb128 0xe
42438 +       .uleb128 0x3a
42439 +       .uleb128 0xb
42440 +       .uleb128 0x3b
42441 +       .uleb128 0xb
42442 +       .uleb128 0x27
42443 +       .uleb128 0xc
42444 +       .uleb128 0x49
42445 +       .uleb128 0x13
42446 +       .uleb128 0x20
42447 +       .uleb128 0xb
42448 +       .byte   0x0
42449 +       .byte   0x0
42450 +       .uleb128 0x45
42451 +       .uleb128 0x34
42452 +       .byte   0x0
42453 +       .uleb128 0x3
42454 +       .uleb128 0xe
42455 +       .uleb128 0x3a
42456 +       .uleb128 0xb
42457 +       .uleb128 0x3b
42458 +       .uleb128 0x5
42459 +       .uleb128 0x49
42460 +       .uleb128 0x13
42461 +       .uleb128 0x2
42462 +       .uleb128 0xa
42463 +       .byte   0x0
42464 +       .byte   0x0
42465 +       .uleb128 0x46
42466 +       .uleb128 0x2e
42467 +       .byte   0x0
42468 +       .uleb128 0x3
42469 +       .uleb128 0xe
42470 +       .uleb128 0x3a
42471 +       .uleb128 0xb
42472 +       .uleb128 0x3b
42473 +       .uleb128 0x5
42474 +       .uleb128 0x27
42475 +       .uleb128 0xc
42476 +       .uleb128 0x20
42477 +       .uleb128 0xb
42478 +       .byte   0x0
42479 +       .byte   0x0
42480 +       .uleb128 0x47
42481 +       .uleb128 0x2e
42482 +       .byte   0x1
42483 +       .uleb128 0x1
42484 +       .uleb128 0x13
42485 +       .uleb128 0x3f
42486 +       .uleb128 0xc
42487 +       .uleb128 0x3
42488 +       .uleb128 0xe
42489 +       .uleb128 0x3a
42490 +       .uleb128 0xb
42491 +       .uleb128 0x3b
42492 +       .uleb128 0xb
42493 +       .uleb128 0x27
42494 +       .uleb128 0xc
42495 +       .uleb128 0x49
42496 +       .uleb128 0x13
42497 +       .uleb128 0x11
42498 +       .uleb128 0x1
42499 +       .uleb128 0x12
42500 +       .uleb128 0x1
42501 +       .uleb128 0x40
42502 +       .uleb128 0x6
42503 +       .byte   0x0
42504 +       .byte   0x0
42505 +       .uleb128 0x48
42506 +       .uleb128 0x5
42507 +       .byte   0x0
42508 +       .uleb128 0x3
42509 +       .uleb128 0x8
42510 +       .uleb128 0x3a
42511 +       .uleb128 0xb
42512 +       .uleb128 0x3b
42513 +       .uleb128 0xb
42514 +       .uleb128 0x49
42515 +       .uleb128 0x13
42516 +       .uleb128 0x2
42517 +       .uleb128 0x6
42518 +       .byte   0x0
42519 +       .byte   0x0
42520 +       .uleb128 0x49
42521 +       .uleb128 0x2e
42522 +       .byte   0x1
42523 +       .uleb128 0x1
42524 +       .uleb128 0x13
42525 +       .uleb128 0x3f
42526 +       .uleb128 0xc
42527 +       .uleb128 0x3
42528 +       .uleb128 0xe
42529 +       .uleb128 0x3a
42530 +       .uleb128 0xb
42531 +       .uleb128 0x3b
42532 +       .uleb128 0xb
42533 +       .uleb128 0x27
42534 +       .uleb128 0xc
42535 +       .uleb128 0x49
42536 +       .uleb128 0x13
42537 +       .uleb128 0x11
42538 +       .uleb128 0x1
42539 +       .uleb128 0x12
42540 +       .uleb128 0x1
42541 +       .uleb128 0x40
42542 +       .uleb128 0xa
42543 +       .byte   0x0
42544 +       .byte   0x0
42545 +       .uleb128 0x4a
42546 +       .uleb128 0xb
42547 +       .byte   0x1
42548 +       .uleb128 0x11
42549 +       .uleb128 0x1
42550 +       .uleb128 0x12
42551 +       .uleb128 0x1
42552 +       .byte   0x0
42553 +       .byte   0x0
42554 +       .uleb128 0x4b
42555 +       .uleb128 0x34
42556 +       .byte   0x0
42557 +       .uleb128 0x3
42558 +       .uleb128 0xe
42559 +       .uleb128 0x3a
42560 +       .uleb128 0xb
42561 +       .uleb128 0x3b
42562 +       .uleb128 0xb
42563 +       .uleb128 0x49
42564 +       .uleb128 0x13
42565 +       .uleb128 0x2
42566 +       .uleb128 0x6
42567 +       .byte   0x0
42568 +       .byte   0x0
42569 +       .uleb128 0x4c
42570 +       .uleb128 0x2e
42571 +       .byte   0x1
42572 +       .uleb128 0x1
42573 +       .uleb128 0x13
42574 +       .uleb128 0x3
42575 +       .uleb128 0xe
42576 +       .uleb128 0x3a
42577 +       .uleb128 0xb
42578 +       .uleb128 0x3b
42579 +       .uleb128 0x5
42580 +       .uleb128 0x27
42581 +       .uleb128 0xc
42582 +       .uleb128 0x11
42583 +       .uleb128 0x1
42584 +       .uleb128 0x12
42585 +       .uleb128 0x1
42586 +       .uleb128 0x40
42587 +       .uleb128 0x6
42588 +       .byte   0x0
42589 +       .byte   0x0
42590 +       .uleb128 0x4d
42591 +       .uleb128 0x5
42592 +       .byte   0x0
42593 +       .uleb128 0x3
42594 +       .uleb128 0xe
42595 +       .uleb128 0x3a
42596 +       .uleb128 0xb
42597 +       .uleb128 0x3b
42598 +       .uleb128 0x5
42599 +       .uleb128 0x49
42600 +       .uleb128 0x13
42601 +       .uleb128 0x2
42602 +       .uleb128 0x6
42603 +       .byte   0x0
42604 +       .byte   0x0
42605 +       .uleb128 0x4e
42606 +       .uleb128 0x5
42607 +       .byte   0x0
42608 +       .uleb128 0x3
42609 +       .uleb128 0xe
42610 +       .uleb128 0x3a
42611 +       .uleb128 0xb
42612 +       .uleb128 0x3b
42613 +       .uleb128 0x5
42614 +       .uleb128 0x49
42615 +       .uleb128 0x13
42616 +       .uleb128 0x2
42617 +       .uleb128 0xa
42618 +       .byte   0x0
42619 +       .byte   0x0
42620 +       .uleb128 0x4f
42621 +       .uleb128 0x34
42622 +       .byte   0x0
42623 +       .uleb128 0x3
42624 +       .uleb128 0xe
42625 +       .uleb128 0x3a
42626 +       .uleb128 0xb
42627 +       .uleb128 0x3b
42628 +       .uleb128 0x5
42629 +       .uleb128 0x49
42630 +       .uleb128 0x13
42631 +       .uleb128 0x2
42632 +       .uleb128 0x6
42633 +       .byte   0x0
42634 +       .byte   0x0
42635 +       .uleb128 0x50
42636 +       .uleb128 0x34
42637 +       .byte   0x0
42638 +       .uleb128 0x3
42639 +       .uleb128 0x8
42640 +       .uleb128 0x3a
42641 +       .uleb128 0xb
42642 +       .uleb128 0x3b
42643 +       .uleb128 0x5
42644 +       .uleb128 0x49
42645 +       .uleb128 0x13
42646 +       .uleb128 0x2
42647 +       .uleb128 0x6
42648 +       .byte   0x0
42649 +       .byte   0x0
42650 +       .uleb128 0x51
42651 +       .uleb128 0xb
42652 +       .byte   0x1
42653 +       .uleb128 0x1
42654 +       .uleb128 0x13
42655 +       .uleb128 0x11
42656 +       .uleb128 0x1
42657 +       .uleb128 0x12
42658 +       .uleb128 0x1
42659 +       .byte   0x0
42660 +       .byte   0x0
42661 +       .uleb128 0x52
42662 +       .uleb128 0x1d
42663 +       .byte   0x1
42664 +       .uleb128 0x31
42665 +       .uleb128 0x13
42666 +       .uleb128 0x55
42667 +       .uleb128 0x6
42668 +       .uleb128 0x58
42669 +       .uleb128 0xb
42670 +       .uleb128 0x59
42671 +       .uleb128 0x5
42672 +       .byte   0x0
42673 +       .byte   0x0
42674 +       .uleb128 0x53
42675 +       .uleb128 0x5
42676 +       .byte   0x0
42677 +       .uleb128 0x31
42678 +       .uleb128 0x13
42679 +       .byte   0x0
42680 +       .byte   0x0
42681 +       .uleb128 0x54
42682 +       .uleb128 0x1d
42683 +       .byte   0x1
42684 +       .uleb128 0x31
42685 +       .uleb128 0x13
42686 +       .uleb128 0x55
42687 +       .uleb128 0x6
42688 +       .uleb128 0x58
42689 +       .uleb128 0xb
42690 +       .uleb128 0x59
42691 +       .uleb128 0xb
42692 +       .byte   0x0
42693 +       .byte   0x0
42694 +       .uleb128 0x55
42695 +       .uleb128 0x5
42696 +       .byte   0x0
42697 +       .uleb128 0x31
42698 +       .uleb128 0x13
42699 +       .uleb128 0x2
42700 +       .uleb128 0xa
42701 +       .byte   0x0
42702 +       .byte   0x0
42703 +       .uleb128 0x56
42704 +       .uleb128 0x2e
42705 +       .byte   0x1
42706 +       .uleb128 0x1
42707 +       .uleb128 0x13
42708 +       .uleb128 0x3f
42709 +       .uleb128 0xc
42710 +       .uleb128 0x3
42711 +       .uleb128 0xe
42712 +       .uleb128 0x3a
42713 +       .uleb128 0xb
42714 +       .uleb128 0x3b
42715 +       .uleb128 0x5
42716 +       .uleb128 0x27
42717 +       .uleb128 0xc
42718 +       .uleb128 0x11
42719 +       .uleb128 0x1
42720 +       .uleb128 0x12
42721 +       .uleb128 0x1
42722 +       .uleb128 0x40
42723 +       .uleb128 0xa
42724 +       .byte   0x0
42725 +       .byte   0x0
42726 +       .uleb128 0x57
42727 +       .uleb128 0x2e
42728 +       .byte   0x1
42729 +       .uleb128 0x1
42730 +       .uleb128 0x13
42731 +       .uleb128 0x3f
42732 +       .uleb128 0xc
42733 +       .uleb128 0x3
42734 +       .uleb128 0xe
42735 +       .uleb128 0x3a
42736 +       .uleb128 0xb
42737 +       .uleb128 0x3b
42738 +       .uleb128 0x5
42739 +       .uleb128 0x27
42740 +       .uleb128 0xc
42741 +       .uleb128 0x11
42742 +       .uleb128 0x1
42743 +       .uleb128 0x12
42744 +       .uleb128 0x1
42745 +       .uleb128 0x40
42746 +       .uleb128 0x6
42747 +       .byte   0x0
42748 +       .byte   0x0
42749 +       .uleb128 0x58
42750 +       .uleb128 0x1d
42751 +       .byte   0x1
42752 +       .uleb128 0x31
42753 +       .uleb128 0x13
42754 +       .uleb128 0x11
42755 +       .uleb128 0x1
42756 +       .uleb128 0x12
42757 +       .uleb128 0x1
42758 +       .uleb128 0x58
42759 +       .uleb128 0xb
42760 +       .uleb128 0x59
42761 +       .uleb128 0x5
42762 +       .byte   0x0
42763 +       .byte   0x0
42764 +       .uleb128 0x59
42765 +       .uleb128 0x2e
42766 +       .byte   0x1
42767 +       .uleb128 0x1
42768 +       .uleb128 0x13
42769 +       .uleb128 0x3
42770 +       .uleb128 0xe
42771 +       .uleb128 0x3a
42772 +       .uleb128 0xb
42773 +       .uleb128 0x3b
42774 +       .uleb128 0x5
42775 +       .uleb128 0x27
42776 +       .uleb128 0xc
42777 +       .uleb128 0x49
42778 +       .uleb128 0x13
42779 +       .uleb128 0x11
42780 +       .uleb128 0x1
42781 +       .uleb128 0x12
42782 +       .uleb128 0x1
42783 +       .uleb128 0x40
42784 +       .uleb128 0x6
42785 +       .byte   0x0
42786 +       .byte   0x0
42787 +       .uleb128 0x5a
42788 +       .uleb128 0x5
42789 +       .byte   0x0
42790 +       .uleb128 0x3
42791 +       .uleb128 0x8
42792 +       .uleb128 0x3a
42793 +       .uleb128 0xb
42794 +       .uleb128 0x3b
42795 +       .uleb128 0x5
42796 +       .uleb128 0x49
42797 +       .uleb128 0x13
42798 +       .uleb128 0x2
42799 +       .uleb128 0x6
42800 +       .byte   0x0
42801 +       .byte   0x0
42802 +       .uleb128 0x5b
42803 +       .uleb128 0x1d
42804 +       .byte   0x1
42805 +       .uleb128 0x1
42806 +       .uleb128 0x13
42807 +       .uleb128 0x31
42808 +       .uleb128 0x13
42809 +       .uleb128 0x55
42810 +       .uleb128 0x6
42811 +       .uleb128 0x58
42812 +       .uleb128 0xb
42813 +       .uleb128 0x59
42814 +       .uleb128 0x5
42815 +       .byte   0x0
42816 +       .byte   0x0
42817 +       .uleb128 0x5c
42818 +       .uleb128 0x1d
42819 +       .byte   0x1
42820 +       .uleb128 0x1
42821 +       .uleb128 0x13
42822 +       .uleb128 0x31
42823 +       .uleb128 0x13
42824 +       .uleb128 0x55
42825 +       .uleb128 0x6
42826 +       .uleb128 0x58
42827 +       .uleb128 0xb
42828 +       .uleb128 0x59
42829 +       .uleb128 0xb
42830 +       .byte   0x0
42831 +       .byte   0x0
42832 +       .uleb128 0x5d
42833 +       .uleb128 0x5
42834 +       .byte   0x0
42835 +       .uleb128 0x31
42836 +       .uleb128 0x13
42837 +       .uleb128 0x2
42838 +       .uleb128 0x6
42839 +       .byte   0x0
42840 +       .byte   0x0
42841 +       .uleb128 0x5e
42842 +       .uleb128 0xb
42843 +       .byte   0x1
42844 +       .uleb128 0x1
42845 +       .uleb128 0x13
42846 +       .byte   0x0
42847 +       .byte   0x0
42848 +       .uleb128 0x5f
42849 +       .uleb128 0x34
42850 +       .byte   0x0
42851 +       .uleb128 0x3
42852 +       .uleb128 0xe
42853 +       .uleb128 0x49
42854 +       .uleb128 0x13
42855 +       .uleb128 0x34
42856 +       .uleb128 0xc
42857 +       .uleb128 0x2
42858 +       .uleb128 0xa
42859 +       .byte   0x0
42860 +       .byte   0x0
42861 +       .uleb128 0x60
42862 +       .uleb128 0xb
42863 +       .byte   0x1
42864 +       .uleb128 0x1
42865 +       .uleb128 0x13
42866 +       .uleb128 0x55
42867 +       .uleb128 0x6
42868 +       .byte   0x0
42869 +       .byte   0x0
42870 +       .uleb128 0x61
42871 +       .uleb128 0x34
42872 +       .byte   0x0
42873 +       .uleb128 0x31
42874 +       .uleb128 0x13
42875 +       .uleb128 0x2
42876 +       .uleb128 0x6
42877 +       .byte   0x0
42878 +       .byte   0x0
42879 +       .uleb128 0x62
42880 +       .uleb128 0x1d
42881 +       .byte   0x1
42882 +       .uleb128 0x1
42883 +       .uleb128 0x13
42884 +       .uleb128 0x31
42885 +       .uleb128 0x13
42886 +       .uleb128 0x11
42887 +       .uleb128 0x1
42888 +       .uleb128 0x12
42889 +       .uleb128 0x1
42890 +       .uleb128 0x58
42891 +       .uleb128 0xb
42892 +       .uleb128 0x59
42893 +       .uleb128 0x5
42894 +       .byte   0x0
42895 +       .byte   0x0
42896 +       .uleb128 0x63
42897 +       .uleb128 0x1d
42898 +       .byte   0x1
42899 +       .uleb128 0x31
42900 +       .uleb128 0x13
42901 +       .uleb128 0x11
42902 +       .uleb128 0x1
42903 +       .uleb128 0x12
42904 +       .uleb128 0x1
42905 +       .uleb128 0x58
42906 +       .uleb128 0xb
42907 +       .uleb128 0x59
42908 +       .uleb128 0xb
42909 +       .byte   0x0
42910 +       .byte   0x0
42911 +       .uleb128 0x64
42912 +       .uleb128 0xb
42913 +       .byte   0x1
42914 +       .uleb128 0x55
42915 +       .uleb128 0x6
42916 +       .byte   0x0
42917 +       .byte   0x0
42918 +       .uleb128 0x65
42919 +       .uleb128 0x34
42920 +       .byte   0x0
42921 +       .uleb128 0x31
42922 +       .uleb128 0x13
42923 +       .uleb128 0x2
42924 +       .uleb128 0xa
42925 +       .byte   0x0
42926 +       .byte   0x0
42927 +       .uleb128 0x66
42928 +       .uleb128 0x1d
42929 +       .byte   0x1
42930 +       .uleb128 0x1
42931 +       .uleb128 0x13
42932 +       .uleb128 0x31
42933 +       .uleb128 0x13
42934 +       .uleb128 0x11
42935 +       .uleb128 0x1
42936 +       .uleb128 0x12
42937 +       .uleb128 0x1
42938 +       .uleb128 0x58
42939 +       .uleb128 0xb
42940 +       .uleb128 0x59
42941 +       .uleb128 0xb
42942 +       .byte   0x0
42943 +       .byte   0x0
42944 +       .uleb128 0x67
42945 +       .uleb128 0x1d
42946 +       .byte   0x0
42947 +       .uleb128 0x31
42948 +       .uleb128 0x13
42949 +       .uleb128 0x11
42950 +       .uleb128 0x1
42951 +       .uleb128 0x12
42952 +       .uleb128 0x1
42953 +       .uleb128 0x58
42954 +       .uleb128 0xb
42955 +       .uleb128 0x59
42956 +       .uleb128 0xb
42957 +       .byte   0x0
42958 +       .byte   0x0
42959 +       .uleb128 0x68
42960 +       .uleb128 0x2e
42961 +       .byte   0x1
42962 +       .uleb128 0x1
42963 +       .uleb128 0x13
42964 +       .uleb128 0x3f
42965 +       .uleb128 0xc
42966 +       .uleb128 0x3
42967 +       .uleb128 0xe
42968 +       .uleb128 0x3a
42969 +       .uleb128 0xb
42970 +       .uleb128 0x3b
42971 +       .uleb128 0x5
42972 +       .uleb128 0x27
42973 +       .uleb128 0xc
42974 +       .uleb128 0x49
42975 +       .uleb128 0x13
42976 +       .uleb128 0x11
42977 +       .uleb128 0x1
42978 +       .uleb128 0x12
42979 +       .uleb128 0x1
42980 +       .uleb128 0x40
42981 +       .uleb128 0x6
42982 +       .byte   0x0
42983 +       .byte   0x0
42984 +       .uleb128 0x69
42985 +       .uleb128 0xa
42986 +       .byte   0x0
42987 +       .uleb128 0x3
42988 +       .uleb128 0x8
42989 +       .uleb128 0x3a
42990 +       .uleb128 0xb
42991 +       .uleb128 0x3b
42992 +       .uleb128 0x5
42993 +       .uleb128 0x11
42994 +       .uleb128 0x1
42995 +       .byte   0x0
42996 +       .byte   0x0
42997 +       .uleb128 0x6a
42998 +       .uleb128 0x34
42999 +       .byte   0x0
43000 +       .uleb128 0x3
43001 +       .uleb128 0x8
43002 +       .uleb128 0x3a
43003 +       .uleb128 0xb
43004 +       .uleb128 0x3b
43005 +       .uleb128 0x5
43006 +       .uleb128 0x49
43007 +       .uleb128 0x13
43008 +       .uleb128 0x2
43009 +       .uleb128 0xa
43010 +       .byte   0x0
43011 +       .byte   0x0
43012 +       .uleb128 0x6b
43013 +       .uleb128 0x2e
43014 +       .byte   0x1
43015 +       .uleb128 0x1
43016 +       .uleb128 0x13
43017 +       .uleb128 0x3
43018 +       .uleb128 0xe
43019 +       .uleb128 0x3a
43020 +       .uleb128 0xb
43021 +       .uleb128 0x3b
43022 +       .uleb128 0x5
43023 +       .uleb128 0x27
43024 +       .uleb128 0xc
43025 +       .uleb128 0x11
43026 +       .uleb128 0x1
43027 +       .uleb128 0x12
43028 +       .uleb128 0x1
43029 +       .uleb128 0x40
43030 +       .uleb128 0xa
43031 +       .byte   0x0
43032 +       .byte   0x0
43033 +       .uleb128 0x6c
43034 +       .uleb128 0x2e
43035 +       .byte   0x1
43036 +       .uleb128 0x1
43037 +       .uleb128 0x13
43038 +       .uleb128 0x3f
43039 +       .uleb128 0xc
43040 +       .uleb128 0x3
43041 +       .uleb128 0xe
43042 +       .uleb128 0x3a
43043 +       .uleb128 0xb
43044 +       .uleb128 0x3b
43045 +       .uleb128 0x5
43046 +       .uleb128 0x27
43047 +       .uleb128 0xc
43048 +       .uleb128 0x49
43049 +       .uleb128 0x13
43050 +       .uleb128 0x11
43051 +       .uleb128 0x1
43052 +       .uleb128 0x12
43053 +       .uleb128 0x1
43054 +       .uleb128 0x40
43055 +       .uleb128 0xa
43056 +       .byte   0x0
43057 +       .byte   0x0
43058 +       .uleb128 0x6d
43059 +       .uleb128 0x2e
43060 +       .byte   0x0
43061 +       .uleb128 0x3
43062 +       .uleb128 0xe
43063 +       .uleb128 0x3a
43064 +       .uleb128 0xb
43065 +       .uleb128 0x3b
43066 +       .uleb128 0x5
43067 +       .uleb128 0x27
43068 +       .uleb128 0xc
43069 +       .uleb128 0x49
43070 +       .uleb128 0x13
43071 +       .uleb128 0x20
43072 +       .uleb128 0xb
43073 +       .byte   0x0
43074 +       .byte   0x0
43075 +       .uleb128 0x6e
43076 +       .uleb128 0x1d
43077 +       .byte   0x0
43078 +       .uleb128 0x31
43079 +       .uleb128 0x13
43080 +       .uleb128 0x55
43081 +       .uleb128 0x6
43082 +       .uleb128 0x58
43083 +       .uleb128 0xb
43084 +       .uleb128 0x59
43085 +       .uleb128 0x5
43086 +       .byte   0x0
43087 +       .byte   0x0
43088 +       .uleb128 0x6f
43089 +       .uleb128 0x2e
43090 +       .byte   0x0
43091 +       .uleb128 0x3f
43092 +       .uleb128 0xc
43093 +       .uleb128 0x3
43094 +       .uleb128 0xe
43095 +       .uleb128 0x3a
43096 +       .uleb128 0xb
43097 +       .uleb128 0x3b
43098 +       .uleb128 0x5
43099 +       .uleb128 0x27
43100 +       .uleb128 0xc
43101 +       .uleb128 0x11
43102 +       .uleb128 0x1
43103 +       .uleb128 0x12
43104 +       .uleb128 0x1
43105 +       .uleb128 0x40
43106 +       .uleb128 0xa
43107 +       .byte   0x0
43108 +       .byte   0x0
43109 +       .uleb128 0x70
43110 +       .uleb128 0x1d
43111 +       .byte   0x0
43112 +       .uleb128 0x31
43113 +       .uleb128 0x13
43114 +       .uleb128 0x11
43115 +       .uleb128 0x1
43116 +       .uleb128 0x12
43117 +       .uleb128 0x1
43118 +       .uleb128 0x58
43119 +       .uleb128 0xb
43120 +       .uleb128 0x59
43121 +       .uleb128 0x5
43122 +       .byte   0x0
43123 +       .byte   0x0
43124 +       .uleb128 0x71
43125 +       .uleb128 0x34
43126 +       .byte   0x0
43127 +       .uleb128 0x3
43128 +       .uleb128 0xe
43129 +       .uleb128 0x3a
43130 +       .uleb128 0xb
43131 +       .uleb128 0x3b
43132 +       .uleb128 0xb
43133 +       .uleb128 0x49
43134 +       .uleb128 0x13
43135 +       .uleb128 0x2
43136 +       .uleb128 0xa
43137 +       .byte   0x0
43138 +       .byte   0x0
43139 +       .uleb128 0x72
43140 +       .uleb128 0x21
43141 +       .byte   0x0
43142 +       .byte   0x0
43143 +       .byte   0x0
43144 +       .uleb128 0x73
43145 +       .uleb128 0x34
43146 +       .byte   0x0
43147 +       .uleb128 0x3
43148 +       .uleb128 0xe
43149 +       .uleb128 0x3a
43150 +       .uleb128 0xb
43151 +       .uleb128 0x3b
43152 +       .uleb128 0xb
43153 +       .uleb128 0x49
43154 +       .uleb128 0x13
43155 +       .uleb128 0x3f
43156 +       .uleb128 0xc
43157 +       .uleb128 0x3c
43158 +       .uleb128 0xc
43159 +       .byte   0x0
43160 +       .byte   0x0
43161 +       .uleb128 0x74
43162 +       .uleb128 0x34
43163 +       .byte   0x0
43164 +       .uleb128 0x3
43165 +       .uleb128 0xe
43166 +       .uleb128 0x3a
43167 +       .uleb128 0xb
43168 +       .uleb128 0x3b
43169 +       .uleb128 0xb
43170 +       .uleb128 0x49
43171 +       .uleb128 0x13
43172 +       .uleb128 0x3f
43173 +       .uleb128 0xc
43174 +       .uleb128 0x2
43175 +       .uleb128 0xa
43176 +       .byte   0x0
43177 +       .byte   0x0
43178 +       .uleb128 0x75
43179 +       .uleb128 0x34
43180 +       .byte   0x0
43181 +       .uleb128 0x3
43182 +       .uleb128 0xe
43183 +       .uleb128 0x3a
43184 +       .uleb128 0xb
43185 +       .uleb128 0x3b
43186 +       .uleb128 0x5
43187 +       .uleb128 0x49
43188 +       .uleb128 0x13
43189 +       .uleb128 0x3f
43190 +       .uleb128 0xc
43191 +       .uleb128 0x3c
43192 +       .uleb128 0xc
43193 +       .byte   0x0
43194 +       .byte   0x0
43195 +       .uleb128 0x76
43196 +       .uleb128 0x34
43197 +       .byte   0x0
43198 +       .uleb128 0x3
43199 +       .uleb128 0xe
43200 +       .uleb128 0x3a
43201 +       .uleb128 0xb
43202 +       .uleb128 0x3b
43203 +       .uleb128 0x5
43204 +       .uleb128 0x49
43205 +       .uleb128 0x13
43206 +       .uleb128 0x3f
43207 +       .uleb128 0xc
43208 +       .uleb128 0x2
43209 +       .uleb128 0xa
43210 +       .byte   0x0
43211 +       .byte   0x0
43212 +       .byte   0x0
43213 +       .section        .debug_pubnames,"",@progbits
43214 +       .long   0x2bb
43215 +       .value  0x2
43216 +       .long   .Ldebug_info0
43217 +       .long   0xaa8d
43218 +       .long   0x8ccf
43219 +       .string "__round_jiffies"
43220 +       .long   0x8d1f
43221 +       .string "__round_jiffies_relative"
43222 +       .long   0x8d58
43223 +       .string "round_jiffies"
43224 +       .long   0x8d9b
43225 +       .string "round_jiffies_relative"
43226 +       .long   0x8eda
43227 +       .string "init_timer"
43228 +       .long   0x8f49
43229 +       .string "init_timer_deferrable"
43230 +       .long   0x9345
43231 +       .string "init_timers"
43232 +       .long   0x96cb
43233 +       .string "do_sysinfo"
43234 +       .long   0x97ea
43235 +       .string "sys_sysinfo"
43236 +       .long   0x9851
43237 +       .string "sys_alarm"
43238 +       .long   0x98b0
43239 +       .string "do_timer"
43240 +       .long   0x9926
43241 +       .string "run_local_timers"
43242 +       .long   0x99b3
43243 +       .string "try_to_del_timer_sync"
43244 +       .long   0x9a6a
43245 +       .string "del_timer_sync"
43246 +       .long   0x9ac3
43247 +       .string "__mod_timer"
43248 +       .long   0x9c15
43249 +       .string "schedule_timeout"
43250 +       .long   0x9cdb
43251 +       .string "schedule_timeout_uninterruptible"
43252 +       .long   0x9d2d
43253 +       .string "msleep"
43254 +       .long   0x9d67
43255 +       .string "schedule_timeout_interruptible"
43256 +       .long   0x9de4
43257 +       .string "msleep_interruptible"
43258 +       .long   0x9eb0
43259 +       .string "update_process_times"
43260 +       .long   0x9f33
43261 +       .string "sys_getpid"
43262 +       .long   0x9fd6
43263 +       .string "sys_getppid"
43264 +       .long   0xa03a
43265 +       .string "sys_getuid"
43266 +       .long   0xa07c
43267 +       .string "sys_geteuid"
43268 +       .long   0xa0be
43269 +       .string "sys_getgid"
43270 +       .long   0xa100
43271 +       .string "sys_getegid"
43272 +       .long   0xa142
43273 +       .string "sys_gettid"
43274 +       .long   0xa184
43275 +       .string "mod_timer"
43276 +       .long   0xa1db
43277 +       .string "del_timer"
43278 +       .long   0xa286
43279 +       .string "add_timer_on"
43280 +       .long   0xa7e0
43281 +       .string "current_stack_pointer"
43282 +       .long   0xa82f
43283 +       .string "jiffies_64"
43284 +       .long   0xa94d
43285 +       .string "boot_tvec_bases"
43286 +       .long   0xa95f
43287 +       .string "avenrun"
43288 +       .long   0xaa73
43289 +       .string "rec_event"
43290 +       .long   0x0
43291 +       .section        .debug_aranges,"",@progbits
43292 +       .long   0x44
43293 +       .value  0x2
43294 +       .long   .Ldebug_info0
43295 +       .byte   0x4
43296 +       .byte   0x0
43297 +       .value  0x0
43298 +       .value  0x0
43299 +       .long   .Ltext0
43300 +       .long   .Letext0-.Ltext0
43301 +       .long   .LFB923
43302 +       .long   .LFE923-.LFB923
43303 +       .long   .LFB924
43304 +       .long   .LFE924-.LFB924
43305 +       .long   .LFB916
43306 +       .long   .LFE916-.LFB916
43307 +       .long   .LFB918
43308 +       .long   .LFE918-.LFB918
43309 +       .long   .LFB917
43310 +       .long   .LFE917-.LFB917
43311 +       .long   0x0
43312 +       .long   0x0
43313 +       .section        .debug_ranges,"",@progbits
43314 +.Ldebug_ranges0:
43315 +       .long   .LBB185
43316 +       .long   .LBE185
43317 +       .long   .LBB189
43318 +       .long   .LBE189
43319 +       .long   0x0
43320 +       .long   0x0
43321 +       .long   .LBB187
43322 +       .long   .LBE187
43323 +       .long   .LBB191
43324 +       .long   .LBE191
43325 +       .long   0x0
43326 +       .long   0x0
43327 +       .long   .LBB199
43328 +       .long   .LBE199
43329 +       .long   .LBB208
43330 +       .long   .LBE208
43331 +       .long   0x0
43332 +       .long   0x0
43333 +       .long   .LBB201
43334 +       .long   .LBE201
43335 +       .long   .LBB205
43336 +       .long   .LBE205
43337 +       .long   0x0
43338 +       .long   0x0
43339 +       .long   .LBB203
43340 +       .long   .LBE203
43341 +       .long   .LBB210
43342 +       .long   .LBE210
43343 +       .long   0x0
43344 +       .long   0x0
43345 +       .long   .LBB241
43346 +       .long   .LBE241
43347 +       .long   .LBB257
43348 +       .long   .LBE257
43349 +       .long   .LBB255
43350 +       .long   .LBE255
43351 +       .long   .LBB253
43352 +       .long   .LBE253
43353 +       .long   .LBB250
43354 +       .long   .LBE250
43355 +       .long   0x0
43356 +       .long   0x0
43357 +       .long   .LBB258
43358 +       .long   .LBE258
43359 +       .long   .LBB260
43360 +       .long   .LBE260
43361 +       .long   0x0
43362 +       .long   0x0
43363 +       .long   .LBB322
43364 +       .long   .LBE322
43365 +       .long   .LBB323
43366 +       .long   .LBE323
43367 +       .long   0x0
43368 +       .long   0x0
43369 +       .long   .LBB326
43370 +       .long   .LBE326
43371 +       .long   .LBB332
43372 +       .long   .LBE332
43373 +       .long   .LBB330
43374 +       .long   .LBE330
43375 +       .long   .LBB328
43376 +       .long   .LBE328
43377 +       .long   0x0
43378 +       .long   0x0
43379 +       .long   .LBB327
43380 +       .long   .LBE327
43381 +       .long   .LBB333
43382 +       .long   .LBE333
43383 +       .long   .LBB331
43384 +       .long   .LBE331
43385 +       .long   .LBB329
43386 +       .long   .LBE329
43387 +       .long   0x0
43388 +       .long   0x0
43389 +       .long   .LBB334
43390 +       .long   .LBE334
43391 +       .long   .LBB338
43392 +       .long   .LBE338
43393 +       .long   0x0
43394 +       .long   0x0
43395 +       .long   .LBB336
43396 +       .long   .LBE336
43397 +       .long   .LBB340
43398 +       .long   .LBE340
43399 +       .long   0x0
43400 +       .long   0x0
43401 +       .long   .LBB368
43402 +       .long   .LBE368
43403 +       .long   .LBB371
43404 +       .long   .LBE371
43405 +       .long   0x0
43406 +       .long   0x0
43407 +       .long   .LBB370
43408 +       .long   .LBE370
43409 +       .long   .LBB373
43410 +       .long   .LBE373
43411 +       .long   0x0
43412 +       .long   0x0
43413 +       .long   .LBB389
43414 +       .long   .LBE389
43415 +       .long   .LBB390
43416 +       .long   .LBE390
43417 +       .long   0x0
43418 +       .long   0x0
43419 +       .long   .LBB412
43420 +       .long   .LBE412
43421 +       .long   .LBB414
43422 +       .long   .LBE414
43423 +       .long   0x0
43424 +       .long   0x0
43425 +       .long   .LBB419
43426 +       .long   .LBE419
43427 +       .long   .LBB424
43428 +       .long   .LBE424
43429 +       .long   0x0
43430 +       .long   0x0
43431 +       .section        .debug_str,"MS",@progbits,1
43432 +.LASF16:
43433 +       .string "long long int"
43434 +.LASF610:
43435 +       .string "qs_pending"
43436 +.LASF28:
43437 +       .string "__u64"
43438 +.LASF1708:
43439 +       .string "idt_table"
43440 +.LASF596:
43441 +       .string "notifier_call"
43442 +.LASF768:
43443 +       .string "ki_flags"
43444 +.LASF107:
43445 +       .string "line"
43446 +.LASF1360:
43447 +       .string "link"
43448 +.LASF1675:
43449 +       .string "console_printk"
43450 +.LASF828:
43451 +       .string "vm_page_prot"
43452 +.LASF694:
43453 +       .string "shared_vm"
43454 +.LASF547:
43455 +       .string "vm_stat_diff"
43456 +.LASF496:
43457 +       .string "si_errno"
43458 +.LASF1381:
43459 +       .string "read"
43460 +.LASF687:
43461 +       .string "mmlist"
43462 +.LASF1505:
43463 +       .string "vm_set"
43464 +.LASF1609:
43465 +       .string "__mod_timer"
43466 +.LASF1636:
43467 +       .string "__kstrtab_boot_tvec_bases"
43468 +.LASF1:
43469 +       .string "long unsigned int"
43470 +.LASF264:
43471 +       .string "pi_lock"
43472 +.LASF315:
43473 +       .string "private"
43474 +.LASF552:
43475 +       .string "lowmem_reserve"
43476 +.LASF1498:
43477 +       .string "offset"
43478 +.LASF1168:
43479 +       .string "ia_valid"
43480 +.LASF1101:
43481 +       .string "last"
43482 +.LASF711:
43483 +       .string "cpu_vm_mask"
43484 +.LASF468:
43485 +       .string "sa_flags"
43486 +.LASF1687:
43487 +       .string "jiffies"
43488 +.LASF684:
43489 +       .string "map_count"
43490 +.LASF406:
43491 +       .string "smp_prepare_boot_cpu"
43492 +.LASF681:
43493 +       .string "free_area_cache"
43494 +.LASF1331:
43495 +       .string "assoc_mapping"
43496 +.LASF139:
43497 +       .string "fsave"
43498 +.LASF404:
43499 +       .string "release"
43500 +.LASF678:
43501 +       .string "mmap_base"
43502 +.LASF207:
43503 +       .string "sibling"
43504 +.LASF1562:
43505 +       .string "ret__"
43506 +.LASF1431:
43507 +       .string "file_lock_operations"
43508 +.LASF1463:
43509 +       .string "read_inode"
43510 +.LASF1623:
43511 +       .string "sys_getppid"
43512 +.LASF394:
43513 +       .string "coherent_dma_mask"
43514 +.LASF356:
43515 +       .string "mpc_config_translation"
43516 +.LASF718:
43517 +       .string "core_startup_done"
43518 +.LASF455:
43519 +       .string "semadj"
43520 +.LASF1558:
43521 +       .string "timer_stats_timer_set_start_info"
43522 +.LASF95:
43523 +       .string "___eip"
43524 +.LASF1125:
43525 +       .string "s_qcop"
43526 +.LASF14:
43527 +       .string "__kernel_gid32_t"
43528 +.LASF905:
43529 +       .string "kstat"
43530 +.LASF222:
43531 +       .string "it_prof_expires"
43532 +.LASF1645:
43533 +       .string "__kstrtab_round_jiffies_relative"
43534 +.LASF1138:
43535 +       .string "s_dirty"
43536 +.LASF1464:
43537 +       .string "dirty_inode"
43538 +.LASF830:
43539 +       .string "vm_rb"
43540 +.LASF214:
43541 +       .string "rt_priority"
43542 +.LASF1295:
43543 +       .string "set_xquota"
43544 +.LASF881:
43545 +       .string "SLEEP_INTERRUPTED"
43546 +.LASF874:
43547 +       .string "ngroups"
43548 +.LASF1163:
43549 +       .string "height"
43550 +.LASF1016:
43551 +       .string "irq_desc"
43552 +.LASF1565:
43553 +       .string "__round_jiffies"
43554 +.LASF1699:
43555 +       .string "malloc_sizes"
43556 +.LASF17:
43557 +       .string "umode_t"
43558 +.LASF197:
43559 +       .string "exit_state"
43560 +.LASF1596:
43561 +       .string "found"
43562 +.LASF703:
43563 +       .string "end_data"
43564 +.LASF164:
43565 +       .string "addr_limit"
43566 +.LASF895:
43567 +       .string "cpu_usage_stat"
43568 +.LASF1126:
43569 +       .string "s_export_op"
43570 +.LASF748:
43571 +       .string "resolution"
43572 +.LASF662:
43573 +       .string "i_cindex"
43574 +.LASF1015:
43575 +       .string "irq_flow_handler_t"
43576 +.LASF1298:
43577 +       .string "dqonoff_mutex"
43578 +.LASF216:
43579 +       .string "stime"
43580 +.LASF509:
43581 +       .string "list"
43582 +.LASF1172:
43583 +       .string "ia_size"
43584 +.LASF359:
43585 +       .string "trans_quad"
43586 +.LASF1586:
43587 +       .string "init_timers"
43588 +.LASF284:
43589 +       .string "raw_spinlock_t"
43590 +.LASF407:
43591 +       .string "smp_prepare_cpus"
43592 +.LASF414:
43593 +       .string "name"
43594 +.LASF276:
43595 +       .string "ioac"
43596 +.LASF1203:
43597 +       .string "d_icount"
43598 +.LASF471:
43599 +       .string "k_sigaction"
43600 +.LASF692:
43601 +       .string "total_vm"
43602 +.LASF1455:
43603 +       .string "fs_flags"
43604 +.LASF1473:
43605 +       .string "unlockfs"
43606 +.LASF317:
43607 +       .string "task_list"
43608 +.LASF1131:
43609 +       .string "s_lock"
43610 +.LASF39:
43611 +       .string "loff_t"
43612 +.LASF1404:
43613 +       .string "fl_owner"
43614 +.LASF549:
43615 +       .string "pages_min"
43616 +.LASF1567:
43617 +       .string "round_jiffies"
43618 +.LASF1631:
43619 +       .string "timer_stats_timer_clear_start_info"
43620 +.LASF535:
43621 +       .string "vfsmount"
43622 +.LASF515:
43623 +       .string "pwdmnt"
43624 +.LASF1332:
43625 +       .string "block_device"
43626 +.LASF650:
43627 +       .string "i_bytes"
43628 +.LASF1337:
43629 +       .string "bd_mount_sem"
43630 +.LASF1077:
43631 +       .string "device_attribute"
43632 +.LASF765:
43633 +       .string "iov_len"
43634 +.LASF966:
43635 +       .string "symtab"
43636 +.LASF76:
43637 +       .string "regs"
43638 +.LASF162:
43639 +       .string "exec_domain"
43640 +.LASF1167:
43641 +       .string "iattr"
43642 +.LASF1075:
43643 +       .string "resume"
43644 +.LASF174:
43645 +       .string "load_weight"
43646 +.LASF1523:
43647 +       .string "__list_add"
43648 +.LASF545:
43649 +       .string "per_cpu_pageset"
43650 +.LASF986:
43651 +       .string "kset_uevent_ops"
43652 +.LASF1237:
43653 +       .string "dqi_free_entry"
43654 +.LASF143:
43655 +       .string "thread_struct"
43656 +.LASF1072:
43657 +       .string "suspend"
43658 +.LASF1398:
43659 +       .string "splice_write"
43660 +.LASF670:
43661 +       .string "i_writecount"
43662 +.LASF1500:
43663 +       .string "mapping"
43664 +.LASF305:
43665 +       .string "rb_root"
43666 +.LASF1178:
43667 +       .string "qsize_t"
43668 +.LASF1394:
43669 +       .string "sendpage"
43670 +.LASF232:
43671 +       .string "group_info"
43672 +.LASF677:
43673 +       .string "unmap_area"
43674 +.LASF518:
43675 +       .string "d_count"
43676 +.LASF982:
43677 +       .string "list_lock"
43678 +.LASF153:
43679 +       .string "v86mask"
43680 +.LASF1348:
43681 +       .string "bd_list"
43682 +.LASF543:
43683 +       .string "high"
43684 +.LASF469:
43685 +       .string "sa_restorer"
43686 +.LASF1423:
43687 +       .string "ahead_start"
43688 +.LASF689:
43689 +       .string "_anon_rss"
43690 +.LASF1228:
43691 +       .string "qs_btimelimit"
43692 +.LASF1258:
43693 +       .string "dq_id"
43694 +.LASF1438:
43695 +       .string "fl_notify"
43696 +.LASF582:
43697 +       .string "node_id"
43698 +.LASF821:
43699 +       .string "internal_pages"
43700 +.LASF1027:
43701 +       .string "pending_mask"
43702 +.LASF120:
43703 +       .string "mem_unit"
43704 +.LASF1223:
43705 +       .string "qs_flags"
43706 +.LASF1530:
43707 +       .string "tbase_get_base"
43708 +.LASF361:
43709 +       .string "trans_local"
43710 +.LASF1227:
43711 +       .string "qs_incoredqs"
43712 +.LASF1595:
43713 +       .string "bitcount"
43714 +.LASF466:
43715 +       .string "sigaction"
43716 +.LASF853:
43717 +       .string "group_stop_count"
43718 +.LASF1458:
43719 +       .string "fs_supers"
43720 +.LASF1679:
43721 +       .string "mmu_cr4_features"
43722 +.LASF1666:
43723 +       .string "__ksymtab_schedule_timeout_interruptible"
43724 +.LASF474:
43725 +       .string "sival_int"
43726 +.LASF201:
43727 +       .string "personality"
43728 +.LASF1703:
43729 +       .string "avenrun"
43730 +.LASF1418:
43731 +       .string "fown_struct"
43732 +.LASF1640:
43733 +       .string "__ksymtab___round_jiffies"
43734 +.LASF565:
43735 +       .string "_pad2_"
43736 +.LASF351:
43737 +       .string "mpc_featureflag"
43738 +.LASF1364:
43739 +       .string "rmdir"
43740 +.LASF278:
43741 +       .string "pi_state_list"
43742 +.LASF899:
43743 +       .string "idle"
43744 +.LASF438:
43745 +       .string "phys_pkg_id"
43746 +.LASF1406:
43747 +       .string "fl_wait"
43748 +.LASF1311:
43749 +       .string "releasepage"
43750 +.LASF1155:
43751 +       .string "last_type"
43752 +.LASF814:
43753 +       .string "ring_info"
43754 +.LASF31:
43755 +       .string "dev_t"
43756 +.LASF1577:
43757 +       .string "init_timers_cpu"
43758 +.LASF564:
43759 +       .string "prev_priority"
43760 +.LASF323:
43761 +       .string "wait_lock"
43762 +.LASF717:
43763 +       .string "core_waiters"
43764 +.LASF1424:
43765 +       .string "ahead_size"
43766 +.LASF604:
43767 +       .string "cs_cachep"
43768 +.LASF326:
43769 +       .string "sleepers"
43770 +.LASF516:
43771 +       .string "altrootmnt"
43772 +.LASF1477:
43773 +       .string "umount_begin"
43774 +.LASF1020:
43775 +       .string "handler_data"
43776 +.LASF301:
43777 +       .string "rb_node"
43778 +.LASF998:
43779 +       .string "module_kobject"
43780 +.LASF1444:
43781 +       .string "nlm_lockowner"
43782 +.LASF387:
43783 +       .string "uevent_attr"
43784 +.LASF271:
43785 +       .string "backing_dev_info"
43786 +.LASF386:
43787 +       .string "uevent_suppress"
43788 +.LASF864:
43789 +       .string "cnvcsw"
43790 +.LASF379:
43791 +       .string "knode_parent"
43792 +.LASF1055:
43793 +       .string "dev_archdata"
43794 +.LASF536:
43795 +       .string "completion"
43796 +.LASF739:
43797 +       .string "pid_type"
43798 +.LASF1646:
43799 +       .string "__ksymtab_round_jiffies_relative"
43800 +.LASF1004:
43801 +       .string "MODULE_STATE_GOING"
43802 +.LASF838:
43803 +       .string "vm_truncate_count"
43804 +.LASF87:
43805 +       .string "___esi"
43806 +.LASF487:
43807 +       .string "_addr"
43808 +.LASF98:
43809 +       .string "___esp"
43810 +.LASF941:
43811 +       .string "unused_gpl_syms"
43812 +.LASF67:
43813 +       .string "eflags"
43814 +.LASF731:
43815 +       .string "timer_list"
43816 +.LASF1250:
43817 +       .string "dq_hash"
43818 +.LASF1285:
43819 +       .string "quota_on"
43820 +.LASF940:
43821 +       .string "unused_crcs"
43822 +.LASF1341:
43823 +       .string "bd_holder_list"
43824 +.LASF1384:
43825 +       .string "aio_write"
43826 +.LASF766:
43827 +       .string "kiocb"
43828 +.LASF888:
43829 +       .string "capabilities"
43830 +.LASF1047:
43831 +       .string "klist"
43832 +.LASF1062:
43833 +       .string "klist_devices"
43834 +.LASF1185:
43835 +       .string "dqb_curinodes"
43836 +.LASF1248:
43837 +       .string "qf_next"
43838 +.LASF659:
43839 +       .string "i_mapping"
43840 +.LASF157:
43841 +       .string "io_bitmap_ptr"
43842 +.LASF1280:
43843 +       .string "acquire_dquot"
43844 +.LASF328:
43845 +       .string "size"
43846 +.LASF644:
43847 +       .string "i_size_seqcount"
43848 +.LASF252:
43849 +       .string "pending"
43850 +.LASF862:
43851 +       .string "cutime"
43852 +.LASF104:
43853 +       .string "bug_entry"
43854 +.LASF954:
43855 +       .string "init_text_size"
43856 +.LASF1395:
43857 +       .string "check_flags"
43858 +.LASF919:
43859 +       .string "st_size"
43860 +.LASF368:
43861 +       .string "pm_message_t"
43862 +.LASF15:
43863 +       .string "__kernel_loff_t"
43864 +.LASF402:
43865 +       .string "devt"
43866 +.LASF310:
43867 +       .string "first"
43868 +.LASF909:
43869 +       .string "mtime"
43870 +.LASF619:
43871 +       .string "barrier"
43872 +.LASF132:
43873 +       .string "i387_soft_struct"
43874 +.LASF1448:
43875 +       .string "nfs4_fl"
43876 +.LASF1056:
43877 +       .string "acpi_handle"
43878 +.LASF363:
43879 +       .string "physid_mask"
43880 +.LASF1091:
43881 +       .string "class_data"
43882 +.LASF190:
43883 +       .string "time_slice"
43884 +.LASF432:
43885 +       .string "cpu_present_to_apicid"
43886 +.LASF1175:
43887 +       .string "ia_ctime"
43888 +.LASF580:
43889 +       .string "node_present_pages"
43890 +.LASF419:
43891 +       .string "int_dest_mode"
43892 +.LASF738:
43893 +       .string "timer_jiffies"
43894 +.LASF1003:
43895 +       .string "MODULE_STATE_COMING"
43896 +.LASF1615:
43897 +       .string "msecs"
43898 +.LASF679:
43899 +       .string "task_size"
43900 +.LASF1120:
43901 +       .string "s_dirt"
43902 +.LASF151:
43903 +       .string "vm86_info"
43904 +.LASF617:
43905 +       .string "donetail"
43906 +.LASF1225:
43907 +       .string "qs_uquota"
43908 +.LASF40:
43909 +       .string "size_t"
43910 +.LASF598:
43911 +       .string "blocking_notifier_head"
43912 +.LASF449:
43913 +       .string "kref"
43914 +.LASF1319:
43915 +       .string "page_tree"
43916 +.LASF1409:
43917 +       .string "fl_type"
43918 +.LASF1480:
43919 +       .string "export_operations"
43920 +.LASF1474:
43921 +       .string "statfs"
43922 +.LASF1590:
43923 +       .string "__dummy2"
43924 +.LASF1608:
43925 +       .string "del_timer_sync"
43926 +.LASF1541:
43927 +       .string "pattern"
43928 +.LASF886:
43929 +       .string "reclaimed_slab"
43930 +.LASF791:
43931 +       .string "f_path"
43932 +.LASF1413:
43933 +       .string "fl_break_time"
43934 +.LASF1117:
43935 +       .string "s_dev"
43936 +.LASF962:
43937 +       .string "num_bugs"
43938 +.LASF1033:
43939 +       .string "mask_ack"
43940 +.LASF882:
43941 +       .string "prio_array"
43942 +.LASF1684:
43943 +       .string "xtime_lock"
43944 +.LASF444:
43945 +       .string "apic_id_mask"
43946 +.LASF691:
43947 +       .string "hiwater_vm"
43948 +.LASF762:
43949 +       .string "res2"
43950 +.LASF978:
43951 +       .string "poll"
43952 +.LASF1605:
43953 +       .string "lock_timer_base"
43954 +.LASF844:
43955 +       .string "__session"
43956 +.LASF158:
43957 +       .string "iopl"
43958 +.LASF367:
43959 +       .string "event"
43960 +.LASF42:
43961 +       .string "time_t"
43962 +.LASF296:
43963 +       .string "seqcount"
43964 +.LASF857:
43965 +       .string "it_prof_incr"
43966 +.LASF108:
43967 +       .string "sysinfo"
43968 +.LASF846:
43969 +       .string "live"
43970 +.LASF325:
43971 +       .string "semaphore"
43972 +.LASF1257:
43973 +       .string "dq_sb"
43974 +.LASF685:
43975 +       .string "mmap_sem"
43976 +.LASF1218:
43977 +       .string "qfs_nblks"
43978 +.LASF299:
43979 +       .string "tv_sec"
43980 +.LASF1333:
43981 +       .string "bd_dev"
43982 +.LASF295:
43983 +       .string "seqlock_t"
43984 +.LASF930:
43985 +       .string "srcversion"
43986 +.LASF1692:
43987 +       .string "acpi_ht"
43988 +.LASF431:
43989 +       .string "cpu_to_logical_apicid"
43990 +.LASF56:
43991 +       .string "pgd_t"
43992 +.LASF1034:
43993 +       .string "unmask"
43994 +.LASF345:
43995 +       .string "mpc_config_processor"
43996 +.LASF1097:
43997 +       .string "raw_prio_tree_node"
43998 +.LASF427:
43999 +       .string "ioapic_phys_id_map"
44000 +.LASF1425:
44001 +       .string "mmap_hit"
44002 +.LASF927:
44003 +       .string "param_attrs"
44004 +.LASF1032:
44005 +       .string "disable"
44006 +.LASF556:
44007 +       .string "active_list"
44008 +.LASF1548:
44009 +       .string "native_irq_enable"
44010 +.LASF1422:
44011 +       .string "prev_index"
44012 +.LASF1036:
44013 +       .string "retrigger"
44014 +.LASF1271:
44015 +       .string "dquot_operations"
44016 +.LASF855:
44017 +       .string "real_timer"
44018 +.LASF274:
44019 +       .string "last_siginfo"
44020 +.LASF802:
44021 +       .string "private_data"
44022 +.LASF554:
44023 +       .string "_pad1_"
44024 +.LASF546:
44025 +       .string "stat_threshold"
44026 +.LASF654:
44027 +       .string "i_alloc_sem"
44028 +.LASF1718:
44029 +       .string "GNU C 4.1.1 (Gentoo 4.1.1-r3)"
44030 +.LASF1385:
44031 +       .string "readdir"
44032 +.LASF889:
44033 +       .string "congested_fn"
44034 +.LASF576:
44035 +       .string "nr_zones"
44036 +.LASF1088:
44037 +       .string "class_attribute"
44038 +.LASF788:
44039 +       .string "ki_cur_seg"
44040 +.LASF720:
44041 +       .string "ioctx_list_lock"
44042 +.LASF1507:
44043 +       .string "close"
44044 +.LASF1439:
44045 +       .string "fl_grant"
44046 +.LASF396:
44047 +       .string "dma_mem"
44048 +.LASF1151:
44049 +       .string "s_time_gran"
44050 +.LASF1343:
44051 +       .string "bd_block_size"
44052 +.LASF258:
44053 +       .string "security"
44054 +.LASF1657:
44055 +       .string "__kstrtab_try_to_del_timer_sync"
44056 +.LASF1509:
44057 +       .string "nopfn"
44058 +.LASF1249:
44059 +       .string "dquot"
44060 +.LASF453:
44061 +       .string "id_next"
44062 +.LASF130:
44063 +       .string "xmm_space"
44064 +.LASF472:
44065 +       .string "i387_union"
44066 +.LASF1149:
44067 +       .string "s_fs_info"
44068 +.LASF1531:
44069 +       .string "constant_test_bit"
44070 +.LASF1508:
44071 +       .string "nopage"
44072 +.LASF945:
44073 +       .string "num_gpl_future_syms"
44074 +.LASF745:
44075 +       .string "cpu_base"
44076 +.LASF1198:
44077 +       .string "d_blk_hardlimit"
44078 +.LASF622:
44079 +       .string "PIDTYPE_SID"
44080 +.LASF558:
44081 +       .string "nr_scan_active"
44082 +.LASF749:
44083 +       .string "get_time"
44084 +.LASF794:
44085 +       .string "f_flags"
44086 +.LASF134:
44087 +       .string "changed"
44088 +.LASF70:
44089 +       .string "__dsh"
44090 +.LASF1083:
44091 +       .string "class_attrs"
44092 +.LASF1351:
44093 +       .string "hd_struct"
44094 +.LASF1628:
44095 +       .string "sys_getegid"
44096 +.LASF1306:
44097 +       .string "readpages"
44098 +.LASF831:
44099 +       .string "shared"
44100 +.LASF1485:
44101 +       .string "get_dentry"
44102 +.LASF525:
44103 +       .string "d_lru"
44104 +.LASF646:
44105 +       .string "i_mtime"
44106 +.LASF298:
44107 +       .string "timespec"
44108 +.LASF377:
44109 +       .string "device"
44110 +.LASF478:
44111 +       .string "_uid"
44112 +.LASF597:
44113 +       .string "priority"
44114 +.LASF1182:
44115 +       .string "dqb_curspace"
44116 +.LASF1264:
44117 +       .string "check_quota_file"
44118 +.LASF408:
44119 +       .string "cpu_up"
44120 +.LASF929:
44121 +       .string "version"
44122 +.LASF171:
44123 +       .string "usage"
44124 +.LASF1143:
44125 +       .string "s_mtd"
44126 +.LASF911:
44127 +       .string "blksize"
44128 +.LASF1499:
44129 +       .string "_mapcount"
44130 +.LASF815:
44131 +       .string "aio_ring_info"
44132 +.LASF285:
44133 +       .string "lock"
44134 +.LASF1516:
44135 +       .string "tvec_t"
44136 +.LASF355:
44137 +       .string "mpc_bustype"
44138 +.LASF1334:
44139 +       .string "bd_inode"
44140 +.LASF683:
44141 +       .string "mm_count"
44142 +.LASF790:
44143 +       .string "ki_eventfd"
44144 +.LASF231:
44145 +       .string "fsgid"
44146 +.LASF1320:
44147 +       .string "tree_lock"
44148 +.LASF1103:
44149 +       .string "index_bits"
44150 +.LASF1078:
44151 +       .string "driver_attribute"
44152 +.LASF263:
44153 +       .string "alloc_lock"
44154 +.LASF588:
44155 +       .string "zones"
44156 +.LASF268:
44157 +       .string "bio_list"
44158 +.LASF1434:
44159 +       .string "fl_copy_lock"
44160 +.LASF1190:
44161 +       .string "dqi_bgrace"
44162 +.LASF1146:
44163 +       .string "s_frozen"
44164 +.LASF1221:
44165 +       .string "fs_quota_stat"
44166 +.LASF1588:
44167 +       .string "work_list"
44168 +.LASF1430:
44169 +       .string "fl_owner_t"
44170 +.LASF1661:
44171 +       .string "__kstrtab_avenrun"
44172 +.LASF1702:
44173 +       .string "boot_tvec_bases"
44174 +.LASF817:
44175 +       .string "ring_pages"
44176 +.LASF1603:
44177 +       .string "count_active_tasks"
44178 +.LASF1213:
44179 +       .string "d_rtbwarns"
44180 +.LASF634:
44181 +       .string "i_sb_list"
44182 +.LASF330:
44183 +       .string "mm_context_t"
44184 +.LASF1576:
44185 +       .string "__mptr"
44186 +.LASF235:
44187 +       .string "cap_permitted"
44188 +.LASF1416:
44189 +       .string "fl_u"
44190 +.LASF18:
44191 +       .string "__s8"
44192 +.LASF75:
44193 +       .string "vm86_struct"
44194 +.LASF753:
44195 +       .string "lock_key"
44196 +.LASF1308:
44197 +       .string "commit_write"
44198 +.LASF1579:
44199 +       .string "boot_done"
44200 +.LASF964:
44201 +       .string "waiter"
44202 +.LASF996:
44203 +       .string "test"
44204 +.LASF1244:
44205 +       .string "quota_format_type"
44206 +.LASF524:
44207 +       .string "d_name"
44208 +.LASF555:
44209 +       .string "lru_lock"
44210 +.LASF1370:
44211 +       .string "truncate"
44212 +.LASF211:
44213 +       .string "vfork_done"
44214 +.LASF297:
44215 +       .string "seqcount_t"
44216 +.LASF792:
44217 +       .string "f_op"
44218 +.LASF1060:
44219 +       .string "drivers"
44220 +.LASF1265:
44221 +       .string "read_file_info"
44222 +.LASF512:
44223 +       .string "root"
44224 +.LASF1433:
44225 +       .string "fl_remove"
44226 +.LASF747:
44227 +       .string "active"
44228 +.LASF642:
44229 +       .string "i_version"
44230 +.LASF700:
44231 +       .string "start_code"
44232 +.LASF612:
44233 +       .string "nxttail"
44234 +.LASF664:
44235 +       .string "i_dnotify_mask"
44236 +.LASF989:
44237 +       .string "local_t"
44238 +.LASF452:
44239 +       .string "proc_next"
44240 +.LASF219:
44241 +       .string "start_time"
44242 +.LASF595:
44243 +       .string "notifier_block"
44244 +.LASF836:
44245 +       .string "vm_file"
44246 +.LASF1461:
44247 +       .string "super_operations"
44248 +.LASF243:
44249 +       .string "sysvsem"
44250 +.LASF212:
44251 +       .string "set_child_tid"
44252 +.LASF1170:
44253 +       .string "ia_uid"
44254 +.LASF20:
44255 +       .string "__u8"
44256 +.LASF641:
44257 +       .string "i_rdev"
44258 +.LASF1466:
44259 +       .string "put_inode"
44260 +.LASF1518:
44261 +       .string "tvec_root_t"
44262 +.LASF1229:
44263 +       .string "qs_itimelimit"
44264 +.LASF721:
44265 +       .string "ioctx_list"
44266 +.LASF858:
44267 +       .string "it_virt_incr"
44268 +.LASF557:
44269 +       .string "inactive_list"
44270 +.LASF137:
44271 +       .string "alimit"
44272 +.LASF1514:
44273 +       .string "event_type"
44274 +.LASF1038:
44275 +       .string "set_wake"
44276 +.LASF1207:
44277 +       .string "d_bwarns"
44278 +.LASF1612:
44279 +       .string "expire"
44280 +.LASF1268:
44281 +       .string "read_dqblk"
44282 +.LASF1247:
44283 +       .string "qf_owner"
44284 +.LASF1110:
44285 +       .string "d_compare"
44286 +.LASF73:
44287 +       .string "revectored_struct"
44288 +.LASF1193:
44289 +       .string "dqi_valid"
44290 +.LASF348:
44291 +       .string "mpc_apicver"
44292 +.LASF1625:
44293 +       .string "sys_getuid"
44294 +.LASF1578:
44295 +       .string "__ret_warn_on"
44296 +.LASF918:
44297 +       .string "st_value"
44298 +.LASF1104:
44299 +       .string "qstr"
44300 +.LASF350:
44301 +       .string "mpc_cpufeature"
44302 +.LASF203:
44303 +       .string "tgid"
44304 +.LASF1714:
44305 +       .string "per_cpu__vm_event_states"
44306 +.LASF800:
44307 +       .string "f_ra"
44308 +.LASF1338:
44309 +       .string "bd_inodes"
44310 +.LASF570:
44311 +       .string "zone_start_pfn"
44312 +.LASF467:
44313 +       .string "sa_handler"
44314 +.LASF257:
44315 +       .string "notifier_mask"
44316 +.LASF1115:
44317 +       .string "super_block"
44318 +.LASF411:
44319 +       .string "smp_send_reschedule"
44320 +.LASF1396:
44321 +       .string "dir_notify"
44322 +.LASF1347:
44323 +       .string "bd_disk"
44324 +.LASF113:
44325 +       .string "sharedram"
44326 +.LASF1013:
44327 +       .string "fixup"
44328 +.LASF1676:
44329 +       .string "__per_cpu_offset"
44330 +.LASF1269:
44331 +       .string "commit_dqblk"
44332 +.LASF78:
44333 +       .string "cpu_type"
44334 +.LASF1150:
44335 +       .string "s_vfs_rename_mutex"
44336 +.LASF1242:
44337 +       .string "dqi_format"
44338 +.LASF115:
44339 +       .string "totalswap"
44340 +.LASF562:
44341 +       .string "reclaim_in_progress"
44342 +.LASF437:
44343 +       .string "enable_apic_mode"
44344 +.LASF508:
44345 +       .string "uidhash_list"
44346 +.LASF1342:
44347 +       .string "bd_contains"
44348 +.LASF1336:
44349 +       .string "bd_mutex"
44350 +.LASF538:
44351 +       .string "free_area"
44352 +.LASF1637:
44353 +       .string "__ksymtab_boot_tvec_bases"
44354 +.LASF1643:
44355 +       .string "__kstrtab_round_jiffies"
44356 +.LASF1241:
44357 +       .string "mem_dqinfo"
44358 +.LASF430:
44359 +       .string "apicid_to_node"
44360 +.LASF502:
44361 +       .string "processes"
44362 +.LASF1411:
44363 +       .string "fl_end"
44364 +.LASF938:
44365 +       .string "unused_syms"
44366 +.LASF809:
44367 +       .string "user_id"
44368 +.LASF867:
44369 +       .string "cmaj_flt"
44370 +.LASF965:
44371 +       .string "exit"
44372 +.LASF1133:
44373 +       .string "s_syncing"
44374 +.LASF1435:
44375 +       .string "fl_release_private"
44376 +.LASF178:
44377 +       .string "run_list"
44378 +.LASF316:
44379 +       .string "func"
44380 +.LASF1713:
44381 +       .string "protection_map"
44382 +.LASF1325:
44383 +       .string "truncate_count"
44384 +.LASF126:
44385 +       .string "status"
44386 +.LASF446:
44387 +       .string "send_IPI_mask"
44388 +.LASF1671:
44389 +       .string "__kstrtab_msleep"
44390 +.LASF340:
44391 +       .string "mpc_oemptr"
44392 +.LASF1515:
44393 +       .string "tvec_s"
44394 +.LASF875:
44395 +       .string "small_block"
44396 +.LASF594:
44397 +       .string "owner"
44398 +.LASF354:
44399 +       .string "mpc_busid"
44400 +.LASF812:
44401 +       .string "active_reqs"
44402 +.LASF1503:
44403 +       .string "first_page"
44404 +.LASF1405:
44405 +       .string "fl_pid"
44406 +.LASF1068:
44407 +       .string "drivers_autoprobe_attr"
44408 +.LASF1690:
44409 +       .string "acpi_noirq"
44410 +.LASF1246:
44411 +       .string "qf_ops"
44412 +.LASF994:
44413 +       .string "attr"
44414 +.LASF1037:
44415 +       .string "set_type"
44416 +.LASF1490:
44417 +       .string "written"
44418 +.LASF506:
44419 +       .string "mq_bytes"
44420 +.LASF1220:
44421 +       .string "fs_qfilestat_t"
44422 +.LASF77:
44423 +       .string "screen_bitmap"
44424 +.LASF1532:
44425 +       .string "addr"
44426 +.LASF1559:
44427 +       .string "timer_set_base"
44428 +.LASF953:
44429 +       .string "core_size"
44430 +.LASF1482:
44431 +       .string "encode_fh"
44432 +.LASF1598:
44433 +       .string "process_timeout"
44434 +.LASF1382:
44435 +       .string "write"
44436 +.LASF1407:
44437 +       .string "fl_file"
44438 +.LASF908:
44439 +       .string "atime"
44440 +.LASF182:
44441 +       .string "timestamp"
44442 +.LASF1712:
44443 +       .string "dcache_lock"
44444 +.LASF370:
44445 +       .string "power_state"
44446 +.LASF1002:
44447 +       .string "MODULE_STATE_LIVE"
44448 +.LASF740:
44449 +       .string "hrtimer_restart"
44450 +.LASF1067:
44451 +       .string "drv_attrs"
44452 +.LASF991:
44453 +       .string "kernel_symbol"
44454 +.LASF1080:
44455 +       .string "mod_name"
44456 +.LASF1180:
44457 +       .string "dqb_bhardlimit"
44458 +.LASF1279:
44459 +       .string "write_dquot"
44460 +.LASF313:
44461 +       .string "wait_queue_t"
44462 +.LASF923:
44463 +       .string "Elf32_Sym"
44464 +.LASF1300:
44465 +       .string "address_space_operations"
44466 +.LASF987:
44467 +       .string "filter"
44468 +.LASF1371:
44469 +       .string "permission"
44470 +.LASF239:
44471 +       .string "oomkilladj"
44472 +.LASF111:
44473 +       .string "totalram"
44474 +.LASF194:
44475 +       .string "ptrace_list"
44476 +.LASF188:
44477 +       .string "policy"
44478 +.LASF1591:
44479 +       .string "run_timer_softirq"
44480 +.LASF1076:
44481 +       .string "drivers_autoprobe"
44482 +.LASF898:
44483 +       .string "softirq"
44484 +.LASF722:
44485 +       .string "plist_head"
44486 +.LASF1651:
44487 +       .string "__kstrtab___mod_timer"
44488 +.LASF461:
44489 +       .string "sigset_t"
44490 +.LASF1305:
44491 +       .string "set_page_dirty"
44492 +.LASF250:
44493 +       .string "real_blocked"
44494 +.LASF7:
44495 +       .string "__kernel_ssize_t"
44496 +.LASF497:
44497 +       .string "si_code"
44498 +.LASF200:
44499 +       .string "pdeath_signal"
44500 +.LASF1330:
44501 +       .string "private_list"
44502 +.LASF1367:
44503 +       .string "readlink"
44504 +.LASF1710:
44505 +       .string "prof_on"
44506 +.LASF79:
44507 +       .string "int_revectored"
44508 +.LASF534:
44509 +       .string "d_iname"
44510 +.LASF869:
44511 +       .string "oublock"
44512 +.LASF1688:
44513 +       .string "platform_enable_wakeup"
44514 +.LASF733:
44515 +       .string "function"
44516 +.LASF1587:
44517 +       .string "__run_timers"
44518 +.LASF1357:
44519 +       .string "inode_operations"
44520 +.LASF1236:
44521 +       .string "dqi_free_blk"
44522 +.LASF621:
44523 +       .string "PIDTYPE_PGID"
44524 +.LASF1393:
44525 +       .string "sendfile"
44526 +.LASF166:
44527 +       .string "previous_esp"
44528 +.LASF464:
44529 +       .string "__restorefn_t"
44530 +.LASF772:
44531 +       .string "ki_ctx"
44532 +.LASF352:
44533 +       .string "mpc_reserved"
44534 +.LASF494:
44535 +       .string "siginfo"
44536 +.LASF1462:
44537 +       .string "destroy_inode"
44538 +.LASF587:
44539 +       .string "zlcache_ptr"
44540 +.LASF1426:
44541 +       .string "mmap_miss"
44542 +.LASF523:
44543 +       .string "d_parent"
44544 +.LASF262:
44545 +       .string "self_exec_id"
44546 +.LASF302:
44547 +       .string "rb_parent_color"
44548 +.LASF10:
44549 +       .string "__kernel_timer_t"
44550 +.LASF1670:
44551 +       .string "timers_nb"
44552 +.LASF1084:
44553 +       .string "class_dev_attrs"
44554 +.LASF96:
44555 +       .string "___cs"
44556 +.LASF968:
44557 +       .string "strtab"
44558 +.LASF820:
44559 +       .string "tail"
44560 +.LASF709:
44561 +       .string "env_end"
44562 +.LASF388:
44563 +       .string "devt_attr"
44564 +.LASF593:
44565 +       .string "mutex"
44566 +.LASF459:
44567 +       .string "sysv_sem"
44568 +.LASF320:
44569 +       .string "wait_queue_head_t"
44570 +.LASF1234:
44571 +       .string "v2_mem_dqinfo"
44572 +.LASF1282:
44573 +       .string "mark_dirty"
44574 +.LASF500:
44575 +       .string "user_struct"
44576 +.LASF1650:
44577 +       .string "__ksymtab_init_timer_deferrable"
44578 +.LASF159:
44579 +       .string "io_bitmap_max"
44580 +.LASF91:
44581 +       .string "___ds"
44582 +.LASF925:
44583 +       .string "module"
44584 +.LASF993:
44585 +       .string "module_attribute"
44586 +.LASF777:
44587 +       .string "ki_user_data"
44588 +.LASF727:
44589 +       .string "rlim_max"
44590 +.LASF307:
44591 +       .string "next"
44592 +.LASF942:
44593 +       .string "num_unused_gpl_syms"
44594 +.LASF893:
44595 +       .string "futex_pi_state"
44596 +.LASF1488:
44597 +       .string "mtd_info"
44598 +.LASF1704:
44599 +       .string "nr_threads"
44600 +.LASF1624:
44601 +       .string "_________p1"
44602 +.LASF1021:
44603 +       .string "chip_data"
44604 +.LASF1326:
44605 +       .string "nrpages"
44606 +.LASF1274:
44607 +       .string "alloc_space"
44608 +.LASF1273:
44609 +       .string "drop"
44610 +.LASF155:
44611 +       .string "saved_fs"
44612 +.LASF1593:
44613 +       .string "mem_total"
44614 +.LASF410:
44615 +       .string "smp_send_stop"
44616 +.LASF900:
44617 +       .string "iowait"
44618 +.LASF540:
44619 +       .string "nr_free"
44620 +.LASF818:
44621 +       .string "ring_lock"
44622 +.LASF1501:
44623 +       .string "lockless_freelist"
44624 +.LASF186:
44625 +       .string "sched_time"
44626 +.LASF1318:
44627 +       .string "host"
44628 +.LASF131:
44629 +       .string "padding"
44630 +.LASF990:
44631 +       .string "mod_arch_specific"
44632 +.LASF1456:
44633 +       .string "get_sb"
44634 +.LASF36:
44635 +       .string "_Bool"
44636 +.LASF1619:
44637 +       .string "update_process_times"
44638 +.LASF1450:
44639 +       .string "magic"
44640 +.LASF93:
44641 +       .string "___fs"
44642 +.LASF1504:
44643 +       .string "freelist"
44644 +.LASF645:
44645 +       .string "i_atime"
44646 +.LASF548:
44647 +       .string "zone"
44648 +.LASF539:
44649 +       .string "free_list"
44650 +.LASF156:
44651 +       .string "saved_gs"
44652 +.LASF668:
44653 +       .string "dirtied_when"
44654 +.LASF1090:
44655 +       .string "class_device"
44656 +.LASF896:
44657 +       .string "nice"
44658 +.LASF415:
44659 +       .string "probe"
44660 +.LASF915:
44661 +       .string "Elf32_Word"
44662 +.LASF1469:
44663 +       .string "put_super"
44664 +.LASF973:
44665 +       .string "attrs"
44666 +.LASF215:
44667 +       .string "utime"
44668 +.LASF1188:
44669 +       .string "dqb_valid"
44670 +.LASF571:
44671 +       .string "spanned_pages"
44672 +.LASF751:
44673 +       .string "softirq_time"
44674 +.LASF1633:
44675 +       .string "add_timer_on"
44676 +.LASF1634:
44677 +       .string "__kstrtab_jiffies_64"
44678 +.LASF140:
44679 +       .string "fxsave"
44680 +.LASF482:
44681 +       .string "_sigval"
44682 +.LASF519:
44683 +       .string "d_flags"
44684 +.LASF736:
44685 +       .string "tvec_t_base_s"
44686 +.LASF208:
44687 +       .string "group_leader"
44688 +.LASF265:
44689 +       .string "pi_waiters"
44690 +.LASF995:
44691 +       .string "setup"
44692 +.LASF428:
44693 +       .string "setup_apic_routing"
44694 +.LASF1659:
44695 +       .string "__kstrtab_del_timer_sync"
44696 +.LASF117:
44697 +       .string "procs"
44698 +.LASF1486:
44699 +       .string "find_exported_dentry"
44700 +.LASF892:
44701 +       .string "unplug_io_data"
44702 +.LASF574:
44703 +       .string "node_zones"
44704 +.LASF1276:
44705 +       .string "free_space"
44706 +.LASF958:
44707 +       .string "unsafe"
44708 +.LASF1717:
44709 +       .string "rec_event"
44710 +.LASF1547:
44711 +       .string "raw_local_irq_enable"
44712 +.LASF1315:
44713 +       .string "launder_page"
44714 +.LASF66:
44715 +       .string "__csh"
44716 +.LASF1553:
44717 +       .string "calc_load"
44718 +.LASF1563:
44719 +       .string "setup_timer"
44720 +.LASF1647:
44721 +       .string "__kstrtab_init_timer"
44722 +.LASF1053:
44723 +       .string "n_ref"
44724 +.LASF1239:
44725 +       .string "v1_i"
44726 +.LASF1026:
44727 +       .string "affinity"
44728 +.LASF568:
44729 +       .string "wait_table_bits"
44730 +.LASF1696:
44731 +       .string "cpu_callout_map"
44732 +.LASF520:
44733 +       .string "d_lock"
44734 +.LASF559:
44735 +       .string "nr_scan_inactive"
44736 +.LASF981:
44737 +       .string "store"
44738 +.LASF375:
44739 +       .string "pm_parent"
44740 +.LASF1096:
44741 +       .string "softirq_action"
44742 +.LASF655:
44743 +       .string "i_op"
44744 +.LASF616:
44745 +       .string "donelist"
44746 +.LASF946:
44747 +       .string "gpl_future_crcs"
44748 +.LASF752:
44749 +       .string "hrtimer_cpu_base"
44750 +.LASF267:
44751 +       .string "journal_info"
44752 +.LASF220:
44753 +       .string "min_flt"
44754 +.LASF937:
44755 +       .string "gpl_crcs"
44756 +.LASF934:
44757 +       .string "crcs"
44758 +.LASF1291:
44759 +       .string "set_dqblk"
44760 +.LASF364:
44761 +       .string "mask"
44762 +.LASF353:
44763 +       .string "mpc_config_bus"
44764 +.LASF1686:
44765 +       .string "jiffies_64"
44766 +.LASF737:
44767 +       .string "running_timer"
44768 +.LASF365:
44769 +       .string "physid_mask_t"
44770 +.LASF32:
44771 +       .string "mode_t"
44772 +.LASF346:
44773 +       .string "mpc_type"
44774 +.LASF125:
44775 +       .string "st_space"
44776 +.LASF710:
44777 +       .string "saved_auxv"
44778 +.LASF1267:
44779 +       .string "free_file_info"
44780 +.LASF1415:
44781 +       .string "fl_lmops"
44782 +.LASF1281:
44783 +       .string "release_dquot"
44784 +.LASF185:
44785 +       .string "last_ran_j"
44786 +.LASF213:
44787 +       .string "clear_child_tid"
44788 +.LASF1145:
44789 +       .string "s_dquot"
44790 +.LASF1122:
44791 +       .string "s_type"
44792 +.LASF1709:
44793 +       .string "per_cpu__irq_regs"
44794 +.LASF481:
44795 +       .string "_pad"
44796 +.LASF1255:
44797 +       .string "dq_count"
44798 +.LASF877:
44799 +       .string "blocks"
44800 +.LASF59:
44801 +       .string "restart_block"
44802 +.LASF409:
44803 +       .string "smp_cpus_done"
44804 +.LASF779:
44805 +       .string "ki_pos"
44806 +.LASF1191:
44807 +       .string "dqi_igrace"
44808 +.LASF1049:
44809 +       .string "k_list"
44810 +.LASF988:
44811 +       .string "uevent"
44812 +.LASF551:
44813 +       .string "pages_high"
44814 +.LASF1118:
44815 +       .string "s_blocksize"
44816 +.LASF1557:
44817 +       .string "timer_pending"
44818 +.LASF975:
44819 +       .string "k_name"
44820 +.LASF746:
44821 +       .string "index"
44822 +.LASF1506:
44823 +       .string "vm_operations_struct"
44824 +.LASF754:
44825 +       .string "clock_base"
44826 +.LASF1481:
44827 +       .string "decode_fh"
44828 +.LASF1092:
44829 +       .string "class_id"
44830 +.LASF913:
44831 +       .string "Elf32_Addr"
44832 +.LASF702:
44833 +       .string "start_data"
44834 +.LASF939:
44835 +       .string "num_unused_syms"
44836 +.LASF1134:
44837 +       .string "s_need_sync_fs"
44838 +.LASF202:
44839 +       .string "did_exec"
44840 +.LASF852:
44841 +       .string "notify_count"
44842 +.LASF161:
44843 +       .string "task"
44844 +.LASF289:
44845 +       .string "rwlock_t"
44846 +.LASF249:
44847 +       .string "blocked"
44848 +.LASF1719:
44849 +       .string "kernel/timer.c"
44850 +.LASF729:
44851 +       .string "tv64"
44852 +.LASF1544:
44853 +       .string "detach_timer"
44854 +.LASF1642:
44855 +       .string "__ksymtab___round_jiffies_relative"
44856 +.LASF424:
44857 +       .string "no_balance_irq"
44858 +.LASF1604:
44859 +       .string "do_timer"
44860 +.LASF657:
44861 +       .string "i_sb"
44862 +.LASF1620:
44863 +       .string "user_tick"
44864 +.LASF1600:
44865 +       .string "sys_alarm"
44866 +.LASF826:
44867 +       .string "vm_end"
44868 +.LASF1491:
44869 +       .string "error"
44870 +.LASF246:
44871 +       .string "nsproxy"
44872 +.LASF68:
44873 +       .string "__ssh"
44874 +.LASF1349:
44875 +       .string "bd_inode_backing_dev_info"
44876 +.LASF34:
44877 +       .string "timer_t"
44878 +.LASF661:
44879 +       .string "i_devices"
44880 +.LASF261:
44881 +       .string "parent_exec_id"
44882 +.LASF880:
44883 +       .string "SLEEP_INTERACTIVE"
44884 +.LASF631:
44885 +       .string "inode"
44886 +.LASF894:
44887 +       .string "pipe_inode_info"
44888 +.LASF1297:
44889 +       .string "dqio_mutex"
44890 +.LASF1057:
44891 +       .string "bus_attribute"
44892 +.LASF1358:
44893 +       .string "create"
44894 +.LASF1556:
44895 +       .string "rep_nop"
44896 +.LASF1346:
44897 +       .string "bd_invalidated"
44898 +.LASF1070:
44899 +       .string "match"
44900 +.LASF362:
44901 +       .string "trans_reserved"
44902 +.LASF760:
44903 +       .string "timer"
44904 +.LASF773:
44905 +       .string "ki_cancel"
44906 +.LASF1535:
44907 +       .string "flag"
44908 +.LASF1206:
44909 +       .string "d_iwarns"
44910 +.LASF454:
44911 +       .string "semid"
44912 +.LASF1094:
44913 +       .string "dma_coherent_mem"
44914 +.LASF1573:
44915 +       .string "init_timer_deferrable"
44916 +.LASF420:
44917 +       .string "ESR_DISABLE"
44918 +.LASF1073:
44919 +       .string "suspend_late"
44920 +.LASF486:
44921 +       .string "_stime"
44922 +.LASF321:
44923 +       .string "rw_semaphore"
44924 +.LASF843:
44925 +       .string "session"
44926 +.LASF1379:
44927 +       .string "file_operations"
44928 +.LASF1459:
44929 +       .string "s_lock_key"
44930 +.LASF1492:
44931 +       .string "read_descriptor_t"
44932 +.LASF624:
44933 +       .string "pid_chain"
44934 +.LASF1701:
44935 +       .string "per_cpu__rcu_bh_data"
44936 +.LASF884:
44937 +       .string "files_struct"
44938 +.LASF247:
44939 +       .string "signal"
44940 +.LASF1400:
44941 +       .string "file_lock"
44942 +.LASF1648:
44943 +       .string "__ksymtab_init_timer"
44944 +.LASF282:
44945 +       .string "lock_class_key"
44946 +.LASF470:
44947 +       .string "sa_mask"
44948 +.LASF1194:
44949 +       .string "fs_disk_quota"
44950 +.LASF590:
44951 +       .string "page"
44952 +.LASF713:
44953 +       .string "faultstamp"
44954 +.LASF1365:
44955 +       .string "mknod"
44956 +.LASF785:
44957 +       .string "ki_inline_vec"
44958 +.LASF383:
44959 +       .string "bus_id"
44960 +.LASF1359:
44961 +       .string "lookup"
44962 +.LASF1310:
44963 +       .string "invalidatepage"
44964 +.LASF1478:
44965 +       .string "show_options"
44966 +.LASF344:
44967 +       .string "reserved"
44968 +.LASF176:
44969 +       .string "static_prio"
44970 +.LASF1106:
44971 +       .string "d_child"
44972 +.LASF119:
44973 +       .string "freehigh"
44974 +.LASF38:
44975 +       .string "gid_t"
44976 +.LASF2:
44977 +       .string "short unsigned int"
44978 +.LASF450:
44979 +       .string "refcount"
44980 +.LASF698:
44981 +       .string "def_flags"
44982 +.LASF542:
44983 +       .string "per_cpu_pages"
44984 +.LASF950:
44985 +       .string "module_init"
44986 +.LASF1355:
44987 +       .string "i_cdev"
44988 +.LASF1537:
44989 +       .string "kmalloc"
44990 +.LASF1130:
44991 +       .string "s_umount"
44992 +.LASF851:
44993 +       .string "group_exit_task"
44994 +.LASF1350:
44995 +       .string "bd_private"
44996 +.LASF477:
44997 +       .string "_pid"
44998 +.LASF1329:
44999 +       .string "private_lock"
45000 +.LASF1352:
45001 +       .string "gendisk"
45002 +.LASF648:
45003 +       .string "i_blkbits"
45004 +.LASF903:
45005 +       .string "cpustat"
45006 +.LASF1256:
45007 +       .string "dq_wait_unused"
45008 +.LASF1561:
45009 +       .string "get_current"
45010 +.LASF1428:
45011 +       .string "fu_list"
45012 +.LASF357:
45013 +       .string "trans_len"
45014 +.LASF251:
45015 +       .string "saved_sigmask"
45016 +.LASF1375:
45017 +       .string "getxattr"
45018 +.LASF504:
45019 +       .string "inotify_watches"
45020 +.LASF856:
45021 +       .string "it_real_incr"
45022 +.LASF803:
45023 +       .string "f_ep_links"
45024 +.LASF871:
45025 +       .string "coublock"
45026 +.LASF1017:
45027 +       .string "handle_irq"
45028 +.LASF599:
45029 +       .string "rwsem"
45030 +.LASF1129:
45031 +       .string "s_root"
45032 +.LASF339:
45033 +       .string "mpc_productid"
45034 +.LASF1475:
45035 +       .string "remount_fs"
45036 +.LASF332:
45037 +       .string "cputime64_t"
45038 +.LASF627:
45039 +       .string "seccomp_t"
45040 +.LASF1219:
45041 +       .string "qfs_nextents"
45042 +.LASF1656:
45043 +       .string "__ksymtab_del_timer"
45044 +.LASF742:
45045 +       .string "HRTIMER_RESTART"
45046 +.LASF735:
45047 +       .string "base"
45048 +.LASF1137:
45049 +       .string "s_inodes"
45050 +.LASF560:
45051 +       .string "pages_scanned"
45052 +.LASF1007:
45053 +       .string "address"
45054 +.LASF1496:
45055 +       .string "seq_file"
45056 +.LASF382:
45057 +       .string "kobj"
45058 +.LASF165:
45059 +       .string "sysenter_return"
45060 +.LASF327:
45061 +       .string "wait"
45062 +.LASF456:
45063 +       .string "sem_undo_list"
45064 +.LASF1208:
45065 +       .string "d_padding2"
45066 +.LASF1214:
45067 +       .string "d_padding3"
45068 +.LASF1215:
45069 +       .string "d_padding4"
45070 +.LASF1536:
45071 +       .string "test_tsk_thread_flag"
45072 +.LASF433:
45073 +       .string "apicid_to_cpu_present"
45074 +.LASF695:
45075 +       .string "exec_vm"
45076 +.LASF1585:
45077 +       .string "init_timer_stats"
45078 +.LASF533:
45079 +       .string "d_mounted"
45080 +.LASF715:
45081 +       .string "last_interval"
45082 +.LASF1312:
45083 +       .string "direct_IO"
45084 +.LASF955:
45085 +       .string "core_text_size"
45086 +.LASF217:
45087 +       .string "nvcsw"
45088 +.LASF1095:
45089 +       .string "irq_handler_t"
45090 +.LASF1183:
45091 +       .string "dqb_ihardlimit"
45092 +.LASF807:
45093 +       .string "users"
45094 +.LASF823:
45095 +       .string "vm_area_struct"
45096 +.LASF1522:
45097 +       .string "number"
45098 +.LASF573:
45099 +       .string "pglist_data"
45100 +.LASF286:
45101 +       .string "raw_rwlock_t"
45102 +.LASF839:
45103 +       .string "sighand_struct"
45104 +.LASF1164:
45105 +       .string "gfp_mask"
45106 +.LASF1008:
45107 +       .string "module_sect_attrs"
45108 +.LASF58:
45109 +       .string "pgprot_t"
45110 +.LASF269:
45111 +       .string "bio_tail"
45112 +.LASF980:
45113 +       .string "show"
45114 +.LASF29:
45115 +       .string "long long unsigned int"
45116 +.LASF1136:
45117 +       .string "s_xattr"
45118 +.LASF780:
45119 +       .string "ki_bio_count"
45120 +.LASF1290:
45121 +       .string "get_dqblk"
45122 +.LASF1674:
45123 +       .string "__ksymtab_msleep_interruptible"
45124 +.LASF1440:
45125 +       .string "fl_break"
45126 +.LASF1289:
45127 +       .string "set_info"
45128 +.LASF1520:
45129 +       .string "event_spec"
45130 +.LASF567:
45131 +       .string "wait_table_hash_nr_entries"
45132 +.LASF510:
45133 +       .string "fs_struct"
45134 +.LASF21:
45135 +       .string "unsigned char"
45136 +.LASF907:
45137 +       .string "rdev"
45138 +.LASF890:
45139 +       .string "congested_data"
45140 +.LASF373:
45141 +       .string "prev_state"
45142 +.LASF921:
45143 +       .string "st_other"
45144 +.LASF347:
45145 +       .string "mpc_apicid"
45146 +.LASF489:
45147 +       .string "_kill"
45148 +.LASF1685:
45149 +       .string "time_status"
45150 +.LASF1292:
45151 +       .string "get_xstate"
45152 +.LASF476:
45153 +       .string "sigval_t"
45154 +.LASF1124:
45155 +       .string "dq_op"
45156 +.LASF1526:
45157 +       .string "tbase_get_deferrable"
45158 +.LASF1429:
45159 +       .string "fu_rcuhead"
45160 +.LASF819:
45161 +       .string "nr_pages"
45162 +.LASF1621:
45163 +       .string "sys_getpid"
45164 +.LASF1493:
45165 +       .string "read_actor_t"
45166 +.LASF293:
45167 +       .string "kernel_cap_t"
45168 +.LASF1452:
45169 +       .string "fa_next"
45170 +.LASF761:
45171 +       .string "io_event"
45172 +.LASF798:
45173 +       .string "f_uid"
45174 +.LASF959:
45175 +       .string "taints"
45176 +.LASF283:
45177 +       .string "slock"
45178 +.LASF532:
45179 +       .string "d_cookie"
45180 +.LASF1107:
45181 +       .string "d_rcu"
45182 +.LASF1328:
45183 +       .string "a_ops"
45184 +.LASF1082:
45185 +       .string "class_dirs"
45186 +.LASF371:
45187 +       .string "can_wakeup"
45188 +.LASF878:
45189 +       .string "SLEEP_NORMAL"
45190 +.LASF1511:
45191 +       .string "page_mkwrite"
45192 +.LASF1148:
45193 +       .string "s_id"
45194 +.LASF11:
45195 +       .string "__kernel_clockid_t"
45196 +.LASF401:
45197 +       .string "class"
45198 +.LASF1139:
45199 +       .string "s_io"
45200 +.LASF1209:
45201 +       .string "d_rtb_hardlimit"
45202 +.LASF1366:
45203 +       .string "rename"
45204 +.LASF226:
45205 +       .string "euid"
45206 +.LASF743:
45207 +       .string "hrtimer"
45208 +.LASF1397:
45209 +       .string "flock"
45210 +.LASF529:
45211 +       .string "d_op"
45212 +.LASF1421:
45213 +       .string "cache_hit"
45214 +.LASF1533:
45215 +       .string "variable_test_bit"
45216 +.LASF563:
45217 +       .string "vm_stat"
45218 +.LASF632:
45219 +       .string "i_hash"
45220 +.LASF223:
45221 +       .string "it_virt_expires"
45222 +.LASF1487:
45223 +       .string "xattr_handler"
45224 +.LASF234:
45225 +       .string "cap_inheritable"
45226 +.LASF726:
45227 +       .string "rlim_cur"
45228 +.LASF358:
45229 +       .string "trans_type"
45230 +.LASF920:
45231 +       .string "st_info"
45232 +.LASF391:
45233 +       .string "platform_data"
45234 +.LASF1465:
45235 +       .string "write_inode"
45236 +.LASF141:
45237 +       .string "soft"
45238 +.LASF463:
45239 +       .string "__sighandler_t"
45240 +.LASF5:
45241 +       .string "__kernel_pid_t"
45242 +.LASF1158:
45243 +       .string "open_intent"
45244 +.LASF1599:
45245 +       .string "__data"
45246 +.LASF82:
45247 +       .string "info"
45248 +.LASF146:
45249 +       .string "sysenter_cs"
45250 +.LASF1014:
45251 +       .string "irqreturn_t"
45252 +.LASF1323:
45253 +       .string "i_mmap_nonlinear"
45254 +.LASF1663:
45255 +       .string "__kstrtab_schedule_timeout"
45256 +.LASF1552:
45257 +       .string "read_seqretry"
45258 +.LASF775:
45259 +       .string "ki_dtor"
45260 +.LASF253:
45261 +       .string "sas_ss_sp"
45262 +.LASF384:
45263 +       .string "type"
45264 +.LASF1460:
45265 +       .string "s_umount_key"
45266 +.LASF195:
45267 +       .string "active_mm"
45268 +.LASF335:
45269 +       .string "mpc_length"
45270 +.LASF1217:
45271 +       .string "qfs_ino"
45272 +.LASF1199:
45273 +       .string "d_blk_softlimit"
45274 +.LASF308:
45275 +       .string "prev"
45276 +.LASF47:
45277 +       .string "resource_size_t"
45278 +.LASF248:
45279 +       .string "sighand"
45280 +.LASF866:
45281 +       .string "cmin_flt"
45282 +.LASF1071:
45283 +       .string "remove"
45284 +.LASF1046:
45285 +       .string "child"
45286 +.LASF1114:
45287 +       .string "d_dname"
45288 +.LASF1000:
45289 +       .string "module_ref"
45290 +.LASF1254:
45291 +       .string "dq_lock"
45292 +.LASF413:
45293 +       .string "genapic"
45294 +.LASF630:
45295 +       .string "list_op_pending"
45296 +.LASF1322:
45297 +       .string "i_mmap"
45298 +.LASF1263:
45299 +       .string "quota_format_ops"
45300 +.LASF725:
45301 +       .string "rlimit"
45302 +.LASF97:
45303 +       .string "___eflags"
45304 +.LASF1238:
45305 +       .string "mem_dqblk"
45306 +.LASF629:
45307 +       .string "futex_offset"
45308 +.LASF1441:
45309 +       .string "fl_mylease"
45310 +.LASF279:
45311 +       .string "pi_state_cache"
45312 +.LASF329:
45313 +       .string "vdso"
45314 +.LASF537:
45315 +       .string "done"
45316 +.LASF1031:
45317 +       .string "enable"
45318 +.LASF906:
45319 +       .string "nlink"
45320 +.LASF618:
45321 +       .string "blimit"
45322 +.LASF1446:
45323 +       .string "nfs4_lock_state"
45324 +.LASF291:
45325 +       .string "atomic_t"
45326 +.LASF1161:
45327 +       .string "path"
45328 +.LASF825:
45329 +       .string "vm_start"
45330 +.LASF833:
45331 +       .string "anon_vma"
45332 +.LASF666:
45333 +       .string "inotify_mutex"
45334 +.LASF1602:
45335 +       .string "update_times"
45336 +.LASF1324:
45337 +       .string "i_mmap_lock"
45338 +.LASF1546:
45339 +       .string "__raw_spin_unlock"
45340 +.LASF949:
45341 +       .string "init"
45342 +.LASF572:
45343 +       .string "present_pages"
45344 +.LASF1680:
45345 +       .string "current_stack_pointer"
45346 +.LASF997:
45347 +       .string "free"
45348 +.LASF850:
45349 +       .string "group_exit_code"
45350 +.LASF1048:
45351 +       .string "k_lock"
45352 +.LASF628:
45353 +       .string "robust_list_head"
45354 +.LASF1065:
45355 +       .string "bus_attrs"
45356 +.LASF541:
45357 +       .string "zone_padding"
45358 +.LASF1369:
45359 +       .string "put_link"
45360 +.LASF688:
45361 +       .string "_file_rss"
45362 +.LASF1314:
45363 +       .string "migratepage"
45364 +.LASF1123:
45365 +       .string "s_op"
45366 +.LASF956:
45367 +       .string "unwind_info"
45368 +.LASF1019:
45369 +       .string "msi_desc"
45370 +.LASF1410:
45371 +       .string "fl_start"
45372 +.LASF1171:
45373 +       .string "ia_gid"
45374 +.LASF1303:
45375 +       .string "sync_page"
45376 +.LASF349:
45377 +       .string "mpc_cpuflag"
45378 +.LASF183:
45379 +       .string "last_ran"
45380 +.LASF1721:
45381 +       .string "run_local_timers"
45382 +.LASF460:
45383 +       .string "undo_list"
45384 +.LASF1006:
45385 +       .string "mattr"
45386 +.LASF399:
45387 +       .string "devres_head"
45388 +.LASF227:
45389 +       .string "suid"
45390 +.LASF1502:
45391 +       .string "slab"
45392 +.LASF1001:
45393 +       .string "module_state"
45394 +.LASF1128:
45395 +       .string "s_magic"
45396 +.LASF1534:
45397 +       .string "test_ti_thread_flag"
45398 +.LASF810:
45399 +       .string "ctx_lock"
45400 +.LASF244:
45401 +       .string "thread"
45402 +.LASF1627:
45403 +       .string "sys_getgid"
45404 +.LASF931:
45405 +       .string "holders_dir"
45406 +.LASF1086:
45407 +       .string "class_release"
45408 +.LASF883:
45409 +       .string "linux_binfmt"
45410 +.LASF1589:
45411 +       .string "__dummy"
45412 +.LASF441:
45413 +       .string "mps_oem_check"
45414 +.LASF1665:
45415 +       .string "__kstrtab_schedule_timeout_interruptible"
45416 +.LASF1574:
45417 +       .string "cascade"
45418 +.LASF658:
45419 +       .string "i_flock"
45420 +.LASF924:
45421 +       .string "attribute"
45422 +.LASF835:
45423 +       .string "vm_pgoff"
45424 +.LASF770:
45425 +       .string "ki_key"
45426 +.LASF676:
45427 +       .string "get_unmapped_area"
45428 +.LASF443:
45429 +       .string "get_apic_id"
45430 +.LASF1009:
45431 +       .string "nsections"
45432 +.LASF1494:
45433 +       .string "poll_table_struct"
45434 +.LASF1575:
45435 +       .string "tv_list"
45436 +.LASF625:
45437 +       .string "pid_link"
45438 +.LASF686:
45439 +       .string "page_table_lock"
45440 +.LASF170:
45441 +       .string "stack"
45442 +.LASF928:
45443 +       .string "modinfo_attrs"
45444 +.LASF1296:
45445 +       .string "quota_info"
45446 +.LASF290:
45447 +       .string "counter"
45448 +.LASF1294:
45449 +       .string "get_xquota"
45450 +.LASF837:
45451 +       .string "vm_private_data"
45452 +.LASF1119:
45453 +       .string "s_blocksize_bits"
45454 +.LASF255:
45455 +       .string "notifier"
45456 +.LASF306:
45457 +       .string "list_head"
45458 +.LASF1025:
45459 +       .string "irqs_unhandled"
45460 +.LASF312:
45461 +       .string "pprev"
45462 +.LASF663:
45463 +       .string "i_generation"
45464 +.LASF442:
45465 +       .string "acpi_madt_oem_check"
45466 +.LASF417:
45467 +       .string "target_cpus"
45468 +.LASF797:
45469 +       .string "f_owner"
45470 +.LASF1176:
45471 +       .string "ia_file"
45472 +.LASF238:
45473 +       .string "fpu_counter"
45474 +.LASF1412:
45475 +       .string "fl_fasync"
45476 +.LASF1054:
45477 +       .string "n_removed"
45478 +.LASF910:
45479 +       .string "ctime"
45480 +.LASF1200:
45481 +       .string "d_ino_hardlimit"
45482 +.LASF1093:
45483 +       .string "device_type"
45484 +.LASF1652:
45485 +       .string "__ksymtab___mod_timer"
45486 +.LASF135:
45487 +       .string "lookahead"
45488 +.LASF1581:
45489 +       .string "tvec_base_done"
45490 +.LASF37:
45491 +       .string "uid_t"
45492 +.LASF801:
45493 +       .string "f_version"
45494 +.LASF129:
45495 +       .string "mxcsr_mask"
45496 +.LASF1278:
45497 +       .string "transfer"
45498 +.LASF1419:
45499 +       .string "signum"
45500 +.LASF517:
45501 +       .string "dentry"
45502 +.LASF985:
45503 +       .string "default_attrs"
45504 +.LASF947:
45505 +       .string "num_exentries"
45506 +.LASF1157:
45507 +       .string "intent"
45508 +.LASF789:
45509 +       .string "ki_list"
45510 +.LASF160:
45511 +       .string "thread_info"
45512 +.LASF1432:
45513 +       .string "fl_insert"
45514 +.LASF1654:
45515 +       .string "__ksymtab_mod_timer"
45516 +.LASF811:
45517 +       .string "reqs_active"
45518 +.LASF583:
45519 +       .string "kswapd_wait"
45520 +.LASF707:
45521 +       .string "arg_end"
45522 +.LASF1387:
45523 +       .string "unlocked_ioctl"
45524 +.LASF1074:
45525 +       .string "resume_early"
45526 +.LASF860:
45527 +       .string "tty_old_pgrp"
45528 +.LASF1669:
45529 +       .string "base_lock_keys"
45530 +.LASF1420:
45531 +       .string "file_ra_state"
45532 +.LASF505:
45533 +       .string "inotify_devs"
45534 +.LASF638:
45535 +       .string "i_nlink"
45536 +.LASF273:
45537 +       .string "ptrace_message"
45538 +.LASF933:
45539 +       .string "num_syms"
45540 +.LASF578:
45541 +       .string "bdata"
45542 +.LASF1542:
45543 +       .string "timer_stats_account_timer"
45544 +.LASF177:
45545 +       .string "normal_prio"
45546 +.LASF1402:
45547 +       .string "fl_link"
45548 +.LASF787:
45549 +       .string "ki_nr_segs"
45550 +.LASF1617:
45551 +       .string "signal_pending"
45552 +.LASF429:
45553 +       .string "multi_timer_check"
45554 +.LASF840:
45555 +       .string "action"
45556 +.LASF977:
45557 +       .string "ktype"
45558 +.LASF1513:
45559 +       .string "event_data"
45560 +.LASF723:
45561 +       .string "prio_list"
45562 +.LASF1061:
45563 +       .string "devices"
45564 +.LASF1231:
45565 +       .string "qs_bwarnlimit"
45566 +.LASF609:
45567 +       .string "passed_quiesc"
45568 +.LASF1286:
45569 +       .string "quota_off"
45570 +.LASF1040:
45571 +       .string "irqaction"
45572 +.LASF1353:
45573 +       .string "i_pipe"
45574 +.LASF865:
45575 +       .string "cnivcsw"
45576 +.LASF730:
45577 +       .string "ktime_t"
45578 +.LASF49:
45579 +       .string "arg1"
45580 +.LASF225:
45581 +       .string "cpu_timers"
45582 +.LASF699:
45583 +       .string "nr_ptes"
45584 +.LASF1059:
45585 +       .string "subsys"
45586 +.LASF45:
45587 +       .string "blkcnt_t"
45588 +.LASF1079:
45589 +       .string "device_driver"
45590 +.LASF1697:
45591 +       .string "mem_map"
45592 +.LASF1012:
45593 +       .string "insn"
45594 +.LASF94:
45595 +       .string "___orig_eax"
45596 +.LASF8:
45597 +       .string "__kernel_time_t"
45598 +.LASF44:
45599 +       .string "sector_t"
45600 +.LASF435:
45601 +       .string "setup_portio_remap"
45602 +.LASF395:
45603 +       .string "dma_pools"
45604 +.LASF1417:
45605 +       .string "dnotify_struct"
45606 +.LASF366:
45607 +       .string "pm_message"
45608 +.LASF1251:
45609 +       .string "dq_inuse"
45610 +.LASF1695:
45611 +       .string "per_cpu__cpu_number"
45612 +.LASF704:
45613 +       .string "start_brk"
45614 +.LASF868:
45615 +       .string "inblock"
45616 +.LASF378:
45617 +       .string "klist_children"
45618 +.LASF418:
45619 +       .string "int_delivery_mode"
45620 +.LASF1253:
45621 +       .string "dq_dirty"
45622 +.LASF592:
45623 +       .string "bootmem_data"
45624 +.LASF1192:
45625 +       .string "dqi_flags"
45626 +.LASF1468:
45627 +       .string "delete_inode"
45628 +.LASF1232:
45629 +       .string "qs_iwarnlimit"
45630 +.LASF614:
45631 +       .string "curlist"
45632 +.LASF1235:
45633 +       .string "dqi_blocks"
45634 +.LASF1388:
45635 +       .string "compat_ioctl"
45636 +.LASF1716:
45637 +       .string "swap_token_mm"
45638 +.LASF734:
45639 +       .string "data"
45640 +.LASF336:
45641 +       .string "mpc_spec"
45642 +.LASF342:
45643 +       .string "mpc_oemcount"
45644 +.LASF1632:
45645 +       .string "del_timer"
45646 +.LASF805:
45647 +       .string "f_mapping"
45648 +.LASF673:
45649 +       .string "mmap"
45650 +.LASF1030:
45651 +       .string "shutdown"
45652 +.LASF660:
45653 +       .string "i_data"
45654 +.LASF876:
45655 +       .string "nblocks"
45656 +.LASF1521:
45657 +       .string "dcookie"
45658 +.LASF637:
45659 +       .string "i_count"
45660 +.LASF173:
45661 +       .string "lock_depth"
45662 +.LASF400:
45663 +       .string "node"
45664 +.LASF479:
45665 +       .string "_tid"
45666 +.LASF1592:
45667 +       .string "do_sysinfo"
45668 +.LASF1470:
45669 +       .string "write_super"
45670 +.LASF1705:
45671 +       .string "cad_pid"
45672 +.LASF1362:
45673 +       .string "symlink"
45674 +.LASF879:
45675 +       .string "SLEEP_NONINTERACTIVE"
45676 +.LASF527:
45677 +       .string "d_alias"
45678 +.LASF448:
45679 +       .string "send_IPI_all"
45680 +.LASF620:
45681 +       .string "PIDTYPE_PID"
45682 +.LASF1583:
45683 +       .string "self"
45684 +.LASF647:
45685 +       .string "i_ctime"
45686 +.LASF1408:
45687 +       .string "fl_flags"
45688 +.LASF1087:
45689 +       .string "dev_release"
45690 +.LASF690:
45691 +       .string "hiwater_rss"
45692 +.LASF1313:
45693 +       .string "get_xip_page"
45694 +.LASF1436:
45695 +       .string "lock_manager_operations"
45696 +.LASF651:
45697 +       .string "i_mode"
45698 +.LASF501:
45699 +       .string "__count"
45700 +.LASF376:
45701 +       .string "entry"
45702 +.LASF71:
45703 +       .string "__fsh"
45704 +.LASF1154:
45705 +       .string "nameidata"
45706 +.LASF674:
45707 +       .string "mm_rb"
45708 +.LASF6:
45709 +       .string "__kernel_size_t"
45710 +.LASF281:
45711 +       .string "splice_pipe"
45712 +.LASF193:
45713 +       .string "ptrace_children"
45714 +.LASF488:
45715 +       .string "_band"
45716 +.LASF1169:
45717 +       .string "ia_mode"
45718 +.LASF23:
45719 +       .string "short int"
45720 +.LASF30:
45721 +       .string "__kernel_dev_t"
45722 +.LASF1483:
45723 +       .string "get_name"
45724 +.LASF1549:
45725 +       .string "current_thread_info"
45726 +.LASF423:
45727 +       .string "check_apicid_present"
45728 +.LASF434:
45729 +       .string "mpc_apic_id"
45730 +.LASF601:
45731 +       .string "kmem_cache"
45732 +.LASF495:
45733 +       .string "si_signo"
45734 +.LASF1606:
45735 +       .string "prelock_base"
45736 +.LASF1152:
45737 +       .string "s_subtype"
45738 +.LASF149:
45739 +       .string "error_code"
45740 +.LASF106:
45741 +       .string "file"
45742 +.LASF1174:
45743 +       .string "ia_mtime"
45744 +.LASF1081:
45745 +       .string "interfaces"
45746 +.LASF640:
45747 +       .string "i_gid"
45748 +.LASF1403:
45749 +       .string "fl_block"
45750 +.LASF192:
45751 +       .string "tasks"
45752 +.LASF1085:
45753 +       .string "dev_uevent"
45754 +.LASF292:
45755 +       .string "atomic_long_t"
45756 +.LASF397:
45757 +       .string "archdata"
45758 +.LASF979:
45759 +       .string "sysfs_ops"
45760 +.LASF863:
45761 +       .string "cstime"
45762 +.LASF451:
45763 +       .string "sem_undo"
45764 +.LASF848:
45765 +       .string "curr_target"
45766 +.LASF179:
45767 +       .string "array"
45768 +.LASF1517:
45769 +       .string "tvec_root_s"
45770 +.LASF480:
45771 +       .string "_overrun"
45772 +.LASF272:
45773 +       .string "io_context"
45774 +.LASF816:
45775 +       .string "mmap_size"
45776 +.LASF60:
45777 +       .string "vm86_regs"
45778 +.LASF163:
45779 +       .string "preempt_count"
45780 +.LASF960:
45781 +       .string "bug_list"
45782 +.LASF254:
45783 +       .string "sas_ss_size"
45784 +.LASF1212:
45785 +       .string "d_rtbtimer"
45786 +.LASF210:
45787 +       .string "thread_group"
45788 +.LASF65:
45789 +       .string "orig_eax"
45790 +.LASF416:
45791 +       .string "apic_id_registered"
45792 +.LASF1662:
45793 +       .string "__ksymtab_avenrun"
45794 +.LASF1283:
45795 +       .string "write_info"
45796 +.LASF1141:
45797 +       .string "s_files"
45798 +.LASF719:
45799 +       .string "core_done"
45800 +.LASF1121:
45801 +       .string "s_maxbytes"
45802 +.LASF1386:
45803 +       .string "ioctl"
45804 +.LASF46:
45805 +       .string "gfp_t"
45806 +.LASF1224:
45807 +       .string "qs_pad"
45808 +.LASF577:
45809 +       .string "node_mem_map"
45810 +.LASF1245:
45811 +       .string "qf_fmt_id"
45812 +.LASF1390:
45813 +       .string "fsync"
45814 +.LASF1629:
45815 +       .string "sys_gettid"
45816 +.LASF672:
45817 +       .string "mm_struct"
45818 +.LASF242:
45819 +       .string "total_link_count"
45820 +.LASF976:
45821 +       .string "kset"
45822 +.LASF152:
45823 +       .string "v86flags"
45824 +.LASF187:
45825 +       .string "sleep_type"
45826 +.LASF83:
45827 +       .string "___orig_eip"
45828 +.LASF1668:
45829 +       .string "__ksymtab_schedule_timeout_uninterruptible"
45830 +.LASF1011:
45831 +       .string "exception_table_entry"
45832 +.LASF1140:
45833 +       .string "s_anon"
45834 +.LASF914:
45835 +       .string "Elf32_Half"
45836 +.LASF967:
45837 +       .string "num_symtab"
45838 +.LASF3:
45839 +       .string "long int"
45840 +.LASF943:
45841 +       .string "unused_gpl_crcs"
45842 +.LASF714:
45843 +       .string "token_priority"
45844 +.LASF503:
45845 +       .string "sigpending"
45846 +.LASF1525:
45847 +       .string "INIT_LIST_HEAD"
45848 +.LASF422:
45849 +       .string "check_apicid_used"
45850 +.LASF936:
45851 +       .string "num_gpl_syms"
45852 +.LASF1045:
45853 +       .string "start"
45854 +.LASF706:
45855 +       .string "arg_start"
45856 +.LASF1029:
45857 +       .string "startup"
45858 +.LASF1345:
45859 +       .string "bd_part_count"
45860 +.LASF873:
45861 +       .string "tty_struct"
45862 +.LASF1442:
45863 +       .string "fl_change"
45864 +.LASF392:
45865 +       .string "power"
45866 +.LASF983:
45867 +       .string "uevent_ops"
45868 +.LASF1066:
45869 +       .string "dev_attrs"
45870 +.LASF602:
45871 +       .string "cache_sizes"
45872 +.LASF1205:
45873 +       .string "d_btimer"
45874 +.LASF1317:
45875 +       .string "address_space"
45876 +.LASF969:
45877 +       .string "sect_attrs"
45878 +.LASF1304:
45879 +       .string "writepages"
45880 +.LASF1233:
45881 +       .string "v1_mem_dqinfo"
45882 +.LASF101:
45883 +       .string "___vm86_ds"
45884 +.LASF337:
45885 +       .string "mpc_checksum"
45886 +.LASF786:
45887 +       .string "ki_iovec"
45888 +.LASF1372:
45889 +       .string "setattr"
45890 +.LASF804:
45891 +       .string "f_ep_lock"
45892 +.LASF1528:
45893 +       .string "__list_del"
45894 +.LASF169:
45895 +       .string "state"
45896 +.LASF795:
45897 +       .string "f_mode"
45898 +.LASF1667:
45899 +       .string "__kstrtab_schedule_timeout_uninterruptible"
45900 +.LASF100:
45901 +       .string "___vm86_es"
45902 +.LASF118:
45903 +       .string "totalhigh"
45904 +.LASF233:
45905 +       .string "cap_effective"
45906 +.LASF1356:
45907 +       .string "cdev"
45908 +.LASF778:
45909 +       .string "ki_wait"
45910 +.LASF360:
45911 +       .string "trans_global"
45912 +.LASF1380:
45913 +       .string "llseek"
45914 +.LASF81:
45915 +       .string "pt_regs"
45916 +.LASF245:
45917 +       .string "files"
45918 +.LASF270:
45919 +       .string "reclaim_state"
45920 +.LASF1266:
45921 +       .string "write_file_info"
45922 +.LASF1063:
45923 +       .string "klist_drivers"
45924 +.LASF544:
45925 +       .string "batch"
45926 +.LASF1069:
45927 +       .string "drivers_probe_attr"
45928 +.LASF1144:
45929 +       .string "s_instances"
45930 +.LASF579:
45931 +       .string "node_start_pfn"
45932 +.LASF499:
45933 +       .string "siginfo_t"
45934 +.LASF1655:
45935 +       .string "__kstrtab_del_timer"
45936 +.LASF133:
45937 +       .string "ftop"
45938 +.LASF1335:
45939 +       .string "bd_openers"
45940 +.LASF102:
45941 +       .string "___vm86_fs"
45942 +.LASF693:
45943 +       .string "locked_vm"
45944 +.LASF1316:
45945 +       .string "writeback_control"
45946 +.LASF649:
45947 +       .string "i_blocks"
45948 +.LASF1550:
45949 +       .string "list_empty"
45950 +.LASF1354:
45951 +       .string "i_bdev"
45952 +.LASF1197:
45953 +       .string "d_id"
45954 +.LASF1309:
45955 +       .string "bmap"
45956 +.LASF1476:
45957 +       .string "clear_inode"
45958 +.LASF1539:
45959 +       .string "kmalloc_node"
45960 +.LASF1302:
45961 +       .string "readpage"
45962 +.LASF485:
45963 +       .string "_utime"
45964 +.LASF54:
45965 +       .string "time"
45966 +.LASF103:
45967 +       .string "___vm86_gs"
45968 +.LASF69:
45969 +       .string "__esh"
45970 +.LASF144:
45971 +       .string "tls_array"
45972 +.LASF1272:
45973 +       .string "initialize"
45974 +.LASF1162:
45975 +       .string "radix_tree_root"
45976 +.LASF260:
45977 +       .string "seccomp"
45978 +.LASF776:
45979 +       .string "ki_obj"
45980 +.LASF9:
45981 +       .string "__kernel_clock_t"
45982 +.LASF1630:
45983 +       .string "mod_timer"
45984 +.LASF1601:
45985 +       .string "seconds"
45986 +.LASF665:
45987 +       .string "i_dnotify"
45988 +.LASF492:
45989 +       .string "_sigfault"
45990 +.LASF1373:
45991 +       .string "getattr"
45992 +.LASF1519:
45993 +       .string "tvec_base_t"
45994 +.LASF184:
45995 +       .string "last_interrupted"
45996 +.LASF338:
45997 +       .string "mpc_oem"
45998 +.LASF1472:
45999 +       .string "write_super_lockfs"
46000 +.LASF412:
46001 +       .string "smp_call_function_mask"
46002 +.LASF1098:
46003 +       .string "left"
46004 +.LASF701:
46005 +       .string "end_code"
46006 +.LASF1109:
46007 +       .string "d_revalidate"
46008 +.LASF1681:
46009 +       .string "per_cpu__current_task"
46010 +.LASF1377:
46011 +       .string "removexattr"
46012 +.LASF1135:
46013 +       .string "s_active"
46014 +.LASF764:
46015 +       .string "iov_base"
46016 +.LASF712:
46017 +       .string "context"
46018 +.LASF1658:
46019 +       .string "__ksymtab_try_to_del_timer_sync"
46020 +.LASF575:
46021 +       .string "node_zonelists"
46022 +.LASF507:
46023 +       .string "locked_shm"
46024 +.LASF901:
46025 +       .string "steal"
46026 +.LASF1277:
46027 +       .string "free_inode"
46028 +.LASF1041:
46029 +       .string "handler"
46030 +.LASF1043:
46031 +       .string "proc_dir_entry"
46032 +.LASF89:
46033 +       .string "___ebp"
46034 +.LASF1443:
46035 +       .string "nfs_lock_info"
46036 +.LASF92:
46037 +       .string "___es"
46038 +.LASF300:
46039 +       .string "tv_nsec"
46040 +.LASF483:
46041 +       .string "_sys_private"
46042 +.LASF531:
46043 +       .string "d_fsdata"
46044 +.LASF380:
46045 +       .string "knode_driver"
46046 +.LASF1195:
46047 +       .string "d_version"
46048 +.LASF951:
46049 +       .string "module_core"
46050 +.LASF436:
46051 +       .string "check_phys_apicid_present"
46052 +.LASF680:
46053 +       .string "cached_hole_size"
46054 +.LASF917:
46055 +       .string "st_name"
46056 +.LASF732:
46057 +       .string "expires"
46058 +.LASF1374:
46059 +       .string "setxattr"
46060 +.LASF1649:
46061 +       .string "__kstrtab_init_timer_deferrable"
46062 +.LASF277:
46063 +       .string "robust_list"
46064 +.LASF1042:
46065 +       .string "dev_id"
46066 +.LASF206:
46067 +       .string "children"
46068 +.LASF1275:
46069 +       .string "alloc_inode"
46070 +.LASF266:
46071 +       .string "pi_blocked_on"
46072 +.LASF1327:
46073 +       .string "writeback_index"
46074 +.LASF128:
46075 +       .string "mxcsr"
46076 +.LASF832:
46077 +       .string "anon_vma_node"
46078 +.LASF1527:
46079 +       .string "list_add_tail"
46080 +.LASF498:
46081 +       .string "_sifields"
46082 +.LASF569:
46083 +       .string "zone_pgdat"
46084 +.LASF922:
46085 +       .string "st_shndx"
46086 +.LASF783:
46087 +       .string "ki_buf"
46088 +.LASF218:
46089 +       .string "nivcsw"
46090 +.LASF175:
46091 +       .string "prio"
46092 +.LASF1166:
46093 +       .string "radix_tree_node"
46094 +.LASF275:
46095 +       .string "io_wait"
46096 +.LASF372:
46097 +       .string "should_wakeup"
46098 +.LASF633:
46099 +       .string "i_list"
46100 +.LASF439:
46101 +       .string "mpc_oem_bus_info"
46102 +.LASF1226:
46103 +       .string "qs_gquota"
46104 +.LASF606:
46105 +       .string "rcu_head"
46106 +.LASF1571:
46107 +       .string "__ptr"
46108 +.LASF334:
46109 +       .string "mpc_signature"
46110 +.LASF744:
46111 +       .string "hrtimer_clock_base"
46112 +.LASF759:
46113 +       .string "work"
46114 +.LASF756:
46115 +       .string "work_func_t"
46116 +.LASF1376:
46117 +       .string "listxattr"
46118 +.LASF1050:
46119 +       .string "klist_node"
46120 +.LASF136:
46121 +       .string "no_update"
46122 +.LASF462:
46123 +       .string "__signalfn_t"
46124 +.LASF1112:
46125 +       .string "d_release"
46126 +.LASF1399:
46127 +       .string "splice_read"
46128 +.LASF1427:
46129 +       .string "prev_offset"
46130 +.LASF767:
46131 +       .string "ki_run_list"
46132 +.LASF608:
46133 +       .string "quiescbatch"
46134 +.LASF256:
46135 +       .string "notifier_data"
46136 +.LASF1638:
46137 +       .string "per_cpu__tvec_bases"
46138 +.LASF1361:
46139 +       .string "unlink"
46140 +.LASF403:
46141 +       .string "groups"
46142 +.LASF1105:
46143 +       .string "hash"
46144 +.LASF1560:
46145 +       .string "new_base"
46146 +.LASF114:
46147 +       .string "bufferram"
46148 +.LASF35:
46149 +       .string "clockid_t"
46150 +.LASF331:
46151 +       .string "cputime_t"
46152 +.LASF1715:
46153 +       .string "swapper_space"
46154 +.LASF1132:
46155 +       .string "s_count"
46156 +.LASF932:
46157 +       .string "syms"
46158 +.LASF667:
46159 +       .string "i_state"
46160 +.LASF341:
46161 +       .string "mpc_oemsize"
46162 +.LASF566:
46163 +       .string "wait_table"
46164 +.LASF1010:
46165 +       .string "module_param_attrs"
46166 +.LASF343:
46167 +       .string "mpc_lapic"
46168 +.LASF303:
46169 +       .string "rb_right"
46170 +.LASF774:
46171 +       .string "ki_retry"
46172 +.LASF1165:
46173 +       .string "rnode"
46174 +.LASF19:
46175 +       .string "signed char"
46176 +.LASF112:
46177 +       .string "freeram"
46178 +.LASF656:
46179 +       .string "i_fop"
46180 +.LASF887:
46181 +       .string "ra_pages"
46182 +.LASF944:
46183 +       .string "gpl_future_syms"
46184 +.LASF1693:
46185 +       .string "acpi_pci_disabled"
46186 +.LASF1672:
46187 +       .string "__ksymtab_msleep"
46188 +.LASF1614:
46189 +       .string "msleep"
46190 +.LASF935:
46191 +       .string "gpl_syms"
46192 +.LASF1540:
46193 +       .string "__constant_c_and_count_memset"
46194 +.LASF1039:
46195 +       .string "typename"
46196 +.LASF209:
46197 +       .string "pids"
46198 +.LASF1580:
46199 +       .string "__func__"
46200 +.LASF322:
46201 +       .string "count"
46202 +.LASF1529:
46203 +       .string "list_replace_init"
46204 +.LASF1186:
46205 +       .string "dqb_btime"
46206 +.LASF1543:
46207 +       .string "set_running_timer"
46208 +.LASF589:
46209 +       .string "zonelist_cache"
46210 +.LASF957:
46211 +       .string "arch"
46212 +.LASF1660:
46213 +       .string "__ksymtab_del_timer_sync"
46214 +.LASF1664:
46215 +       .string "__ksymtab_schedule_timeout"
46216 +.LASF1457:
46217 +       .string "kill_sb"
46218 +.LASF1414:
46219 +       .string "fl_ops"
46220 +.LASF1564:
46221 +       .string "original"
46222 +.LASF490:
46223 +       .string "_timer"
46224 +.LASF484:
46225 +       .string "_status"
46226 +.LASF150:
46227 +       .string "i387"
46228 +.LASF904:
46229 +       .string "irqs"
46230 +.LASF1284:
46231 +       .string "quotactl_ops"
46232 +.LASF294:
46233 +       .string "sequence"
46234 +.LASF1181:
46235 +       .string "dqb_bsoftlimit"
46236 +.LASF1613:
46237 +       .string "schedule_timeout_uninterruptible"
46238 +.LASF526:
46239 +       .string "d_subdirs"
46240 +.LASF671:
46241 +       .string "i_private"
46242 +.LASF806:
46243 +       .string "kioctx"
46244 +.LASF854:
46245 +       .string "posix_timers"
46246 +.LASF796:
46247 +       .string "f_pos"
46248 +.LASF311:
46249 +       .string "hlist_node"
46250 +.LASF1147:
46251 +       .string "s_wait_unfrozen"
46252 +.LASF491:
46253 +       .string "_sigchld"
46254 +.LASF530:
46255 +       .string "d_sb"
46256 +.LASF22:
46257 +       .string "__s16"
46258 +.LASF240:
46259 +       .string "comm"
46260 +.LASF859:
46261 +       .string "pgrp"
46262 +.LASF1035:
46263 +       .string "set_affinity"
46264 +.LASF1568:
46265 +       .string "round_jiffies_relative"
46266 +.LASF682:
46267 +       .string "mm_users"
46268 +.LASF473:
46269 +       .string "sigval"
46270 +.LASF1005:
46271 +       .string "module_sect_attr"
46272 +.LASF1340:
46273 +       .string "bd_holders"
46274 +.LASF1582:
46275 +       .string "timer_cpu_notify"
46276 +.LASF1545:
46277 +       .string "clear_pending"
46278 +.LASF728:
46279 +       .string "ktime"
46280 +.LASF1673:
46281 +       .string "__kstrtab_msleep_interruptible"
46282 +.LASF1023:
46283 +       .string "wake_depth"
46284 +.LASF1653:
46285 +       .string "__kstrtab_mod_timer"
46286 +.LASF1570:
46287 +       .string "init_timer"
46288 +.LASF842:
46289 +       .string "signalfd_list"
46290 +.LASF1259:
46291 +       .string "dq_off"
46292 +.LASF80:
46293 +       .string "int21_revectored"
46294 +.LASF1497:
46295 +       .string "inuse"
46296 +.LASF1495:
46297 +       .string "kstatfs"
46298 +.LASF1173:
46299 +       .string "ia_atime"
46300 +.LASF1694:
46301 +       .string "skip_ioapic_setup"
46302 +.LASF799:
46303 +       .string "f_gid"
46304 +.LASF600:
46305 +       .string "head"
46306 +.LASF813:
46307 +       .string "max_reqs"
46308 +.LASF1184:
46309 +       .string "dqb_isoftlimit"
46310 +.LASF1720:
46311 +       .string "/usr/src/linux-2.6.22.19-chopstix"
46312 +.LASF1018:
46313 +       .string "chip"
46314 +.LASF1260:
46315 +       .string "dq_flags"
46316 +.LASF697:
46317 +       .string "reserved_vm"
46318 +.LASF33:
46319 +       .string "pid_t"
46320 +.LASF109:
46321 +       .string "uptime"
46322 +.LASF72:
46323 +       .string "__gsh"
46324 +.LASF43:
46325 +       .string "clock_t"
46326 +.LASF369:
46327 +       .string "dev_pm_info"
46328 +.LASF1569:
46329 +       .string "internal_add_timer"
46330 +.LASF229:
46331 +       .string "egid"
46332 +.LASF822:
46333 +       .string "mm_counter_t"
46334 +.LASF13:
46335 +       .string "__kernel_uid32_t"
46336 +.LASF1230:
46337 +       .string "qs_rtbtimelimit"
46338 +.LASF259:
46339 +       .string "audit_context"
46340 +.LASF1489:
46341 +       .string "filldir_t"
46342 +.LASF204:
46343 +       .string "real_parent"
46344 +.LASF1639:
46345 +       .string "__kstrtab___round_jiffies"
46346 +.LASF897:
46347 +       .string "system"
46348 +.LASF1453:
46349 +       .string "fa_file"
46350 +.LASF1378:
46351 +       .string "truncate_range"
46352 +.LASF1159:
46353 +       .string "create_mode"
46354 +.LASF1243:
46355 +       .string "dqi_dirty_list"
46356 +.LASF25:
46357 +       .string "__s32"
46358 +.LASF385:
46359 +       .string "is_registered"
46360 +.LASF319:
46361 +       .string "__wait_queue_head"
46362 +.LASF1099:
46363 +       .string "right"
46364 +.LASF1700:
46365 +       .string "per_cpu__rcu_data"
46366 +.LASF1321:
46367 +       .string "i_mmap_writable"
46368 +.LASF1607:
46369 +       .string "try_to_del_timer_sync"
46370 +.LASF561:
46371 +       .string "all_unreclaimable"
46372 +.LASF1447:
46373 +       .string "nfs_fl"
46374 +.LASF1471:
46375 +       .string "sync_fs"
46376 +.LASF51:
46377 +       .string "arg3"
46378 +.LASF1210:
46379 +       .string "d_rtb_softlimit"
46380 +.LASF425:
46381 +       .string "no_ioapic_check"
46382 +.LASF781:
46383 +       .string "ki_opcode"
46384 +.LASF1142:
46385 +       .string "s_bdev"
46386 +.LASF1437:
46387 +       .string "fl_compare_owner"
46388 +.LASF1022:
46389 +       .string "depth"
46390 +.LASF1451:
46391 +       .string "fa_fd"
46392 +.LASF963:
46393 +       .string "modules_which_use_me"
46394 +.LASF1512:
46395 +       .string "vm_event_state"
46396 +.LASF1584:
46397 +       .string "hcpu"
46398 +.LASF1689:
46399 +       .string "__FIXADDR_TOP"
46400 +.LASF1711:
46401 +       .string "ioport_resource"
46402 +.LASF926:
46403 +       .string "mkobj"
46404 +.LASF916:
46405 +       .string "elf32_sym"
46406 +.LASF1287:
46407 +       .string "quota_sync"
46408 +.LASF643:
46409 +       .string "i_size"
46410 +.LASF613:
46411 +       .string "qlen"
46412 +.LASF1524:
46413 +       .string "list_replace"
46414 +.LASF1682:
46415 +       .string "xtime"
46416 +.LASF288:
46417 +       .string "spinlock_t"
46418 +.LASF724:
46419 +       .string "node_list"
46420 +.LASF607:
46421 +       .string "rcu_data"
46422 +.LASF199:
46423 +       .string "exit_signal"
46424 +.LASF1510:
46425 +       .string "populate"
46426 +.LASF1626:
46427 +       .string "sys_geteuid"
46428 +.LASF1202:
46429 +       .string "d_bcount"
46430 +.LASF457:
46431 +       .string "refcnt"
46432 +.LASF757:
46433 +       .string "work_struct"
46434 +.LASF974:
46435 +       .string "kobject"
46436 +.LASF1551:
46437 +       .string "read_seqbegin"
46438 +.LASF1189:
46439 +       .string "if_dqinfo"
46440 +.LASF121:
46441 +       .string "bits"
46442 +.LASF984:
46443 +       .string "kobj_type"
46444 +.LASF405:
46445 +       .string "smp_ops"
46446 +.LASF1024:
46447 +       .string "irq_count"
46448 +.LASF53:
46449 +       .string "flags"
46450 +.LASF224:
46451 +       .string "it_sched_expires"
46452 +.LASF196:
46453 +       .string "binfmt"
46454 +.LASF1261:
46455 +       .string "dq_type"
46456 +.LASF237:
46457 +       .string "user"
46458 +.LASF861:
46459 +       .string "leader"
46460 +.LASF1678:
46461 +       .string "cpu_possible_map"
46462 +.LASF381:
46463 +       .string "knode_bus"
46464 +.LASF228:
46465 +       .string "fsuid"
46466 +.LASF653:
46467 +       .string "i_mutex"
46468 +.LASF1187:
46469 +       .string "dqb_itime"
46470 +.LASF1211:
46471 +       .string "d_rtbcount"
46472 +.LASF513:
46473 +       .string "altroot"
46474 +.LASF1179:
46475 +       .string "if_dqblk"
46476 +.LASF314:
46477 +       .string "__wait_queue"
46478 +.LASF605:
46479 +       .string "cs_dmacachep"
46480 +.LASF145:
46481 +       .string "esp0"
46482 +.LASF12:
46483 +       .string "char"
46484 +.LASF1252:
46485 +       .string "dq_free"
46486 +.LASF970:
46487 +       .string "percpu"
46488 +.LASF304:
46489 +       .string "rb_left"
46490 +.LASF52:
46491 +       .string "uaddr"
46492 +.LASF1262:
46493 +       .string "dq_dqb"
46494 +.LASF827:
46495 +       .string "vm_next"
46496 +.LASF1028:
46497 +       .string "irq_chip"
46498 +.LASF1216:
46499 +       .string "fs_qfilestat"
46500 +.LASF741:
46501 +       .string "HRTIMER_NORESTART"
46502 +.LASF1618:
46503 +       .string "msleep_interruptible"
46504 +.LASF390:
46505 +       .string "driver_data"
46506 +.LASF1222:
46507 +       .string "qs_version"
46508 +.LASF1363:
46509 +       .string "mkdir"
46510 +.LASF230:
46511 +       .string "sgid"
46512 +.LASF675:
46513 +       .string "mmap_cache"
46514 +.LASF952:
46515 +       .string "init_size"
46516 +.LASF1116:
46517 +       .string "s_list"
46518 +.LASF280:
46519 +       .string "fs_excl"
46520 +.LASF1204:
46521 +       .string "d_itimer"
46522 +.LASF4:
46523 +       .string "__kernel_mode_t"
46524 +.LASF168:
46525 +       .string "task_struct"
46526 +.LASF116:
46527 +       .string "freeswap"
46528 +.LASF793:
46529 +       .string "f_count"
46530 +.LASF61:
46531 +       .string "__null_ds"
46532 +.LASF1153:
46533 +       .string "dcookie_struct"
46534 +.LASF769:
46535 +       .string "ki_users"
46536 +.LASF1392:
46537 +       .string "fasync"
46538 +.LASF521:
46539 +       .string "d_inode"
46540 +.LASF27:
46541 +       .string "__s64"
46542 +.LASF1368:
46543 +       .string "follow_link"
46544 +.LASF586:
46545 +       .string "zonelist"
46546 +.LASF603:
46547 +       .string "cs_size"
46548 +.LASF181:
46549 +       .string "sleep_avg"
46550 +.LASF1677:
46551 +       .string "per_cpu__this_cpu_off"
46552 +.LASF834:
46553 +       .string "vm_ops"
46554 +.LASF127:
46555 +       .string "i387_fxsave_struct"
46556 +.LASF1383:
46557 +       .string "aio_read"
46558 +.LASF62:
46559 +       .string "__null_es"
46560 +.LASF189:
46561 +       .string "cpus_allowed"
46562 +.LASF167:
46563 +       .string "supervisor_stack"
46564 +.LASF872:
46565 +       .string "rlim"
46566 +.LASF1113:
46567 +       .string "d_iput"
46568 +.LASF948:
46569 +       .string "extable"
46570 +.LASF771:
46571 +       .string "ki_filp"
46572 +.LASF849:
46573 +       .string "shared_pending"
46574 +.LASF1594:
46575 +       .string "sav_total"
46576 +.LASF528:
46577 +       .string "d_time"
46578 +.LASF1111:
46579 +       .string "d_delete"
46580 +.LASF105:
46581 +       .string "bug_addr"
46582 +.LASF63:
46583 +       .string "__null_fs"
46584 +.LASF1240:
46585 +       .string "v2_i"
46586 +.LASF1572:
46587 +       .string "timer_set_deferrable"
46588 +.LASF1445:
46589 +       .string "nfs4_lock_info"
46590 +.LASF999:
46591 +       .string "drivers_dir"
46592 +.LASF615:
46593 +       .string "curtail"
46594 +.LASF1044:
46595 +       .string "resource"
46596 +.LASF1160:
46597 +       .string "open"
46598 +.LASF1102:
46599 +       .string "prio_tree_root"
46600 +.LASF1301:
46601 +       .string "writepage"
46602 +.LASF716:
46603 +       .string "dumpable"
46604 +.LASF514:
46605 +       .string "rootmnt"
46606 +.LASF110:
46607 +       .string "loads"
46608 +.LASF550:
46609 +       .string "pages_low"
46610 +.LASF64:
46611 +       .string "__null_gs"
46612 +.LASF961:
46613 +       .string "bug_table"
46614 +.LASF902:
46615 +       .string "kernel_stat"
46616 +.LASF1127:
46617 +       .string "s_flags"
46618 +.LASF1339:
46619 +       .string "bd_holder"
46620 +.LASF1389:
46621 +       .string "flush"
46622 +.LASF1610:
46623 +       .string "schedule_timeout"
46624 +.LASF626:
46625 +       .string "mode"
46626 +.LASF1058:
46627 +       .string "bus_type"
46628 +.LASF652:
46629 +       .string "i_lock"
46630 +.LASF553:
46631 +       .string "pageset"
46632 +.LASF972:
46633 +       .string "attribute_group"
46634 +.LASF1706:
46635 +       .string "per_cpu__kstat"
46636 +.LASF669:
46637 +       .string "i_flags"
46638 +.LASF1064:
46639 +       .string "bus_notifier"
46640 +.LASF398:
46641 +       .string "devres_lock"
46642 +.LASF1691:
46643 +       .string "acpi_disabled"
46644 +.LASF123:
46645 +       .string "desc_struct"
46646 +.LASF1201:
46647 +       .string "d_ino_softlimit"
46648 +.LASF635:
46649 +       .string "i_dentry"
46650 +.LASF1401:
46651 +       .string "fl_next"
46652 +.LASF324:
46653 +       .string "wait_list"
46654 +.LASF458:
46655 +       .string "proc_list"
46656 +.LASF1052:
46657 +       .string "n_node"
46658 +.LASF1391:
46659 +       .string "aio_fsync"
46660 +.LASF1484:
46661 +       .string "get_parent"
46662 +.LASF611:
46663 +       .string "nxtlist"
46664 +.LASF154:
46665 +       .string "saved_esp0"
46666 +.LASF705:
46667 +       .string "start_stack"
46668 +.LASF1597:
46669 +       .string "sys_sysinfo"
46670 +.LASF1108:
46671 +       .string "dentry_operations"
46672 +.LASF623:
46673 +       .string "PIDTYPE_MAX"
46674 +.LASF221:
46675 +       .string "maj_flt"
46676 +.LASF891:
46677 +       .string "unplug_io_fn"
46678 +.LASF287:
46679 +       .string "raw_lock"
46680 +.LASF465:
46681 +       .string "__sigrestore_t"
46682 +.LASF1307:
46683 +       .string "prepare_write"
46684 +.LASF1611:
46685 +       .string "timeout"
46686 +.LASF708:
46687 +       .string "env_start"
46688 +.LASF1299:
46689 +       .string "dqptr_sem"
46690 +.LASF1270:
46691 +       .string "release_dqblk"
46692 +.LASF124:
46693 +       .string "i387_fsave_struct"
46694 +.LASF1622:
46695 +       .string "espec"
46696 +.LASF1479:
46697 +       .string "show_stats"
46698 +.LASF1698:
46699 +       .string "contig_page_data"
46700 +.LASF318:
46701 +       .string "wait_queue_func_t"
46702 +.LASF845:
46703 +       .string "signal_struct"
46704 +.LASF1707:
46705 +       .string "per_cpu__gdt_page"
46706 +.LASF636:
46707 +       .string "i_ino"
46708 +.LASF241:
46709 +       .string "link_count"
46710 +.LASF782:
46711 +       .string "ki_nbytes"
46712 +.LASF1449:
46713 +       .string "fasync_struct"
46714 +.LASF374:
46715 +       .string "saved_state"
46716 +.LASF808:
46717 +       .string "dead"
46718 +.LASF522:
46719 +       .string "d_hash"
46720 +.LASF1293:
46721 +       .string "set_xstate"
46722 +.LASF1100:
46723 +       .string "prio_tree_node"
46724 +.LASF696:
46725 +       .string "stack_vm"
46726 +.LASF591:
46727 +       .string "_count"
46728 +.LASF1089:
46729 +       .string "class_device_attribute"
46730 +.LASF24:
46731 +       .string "__u16"
46732 +.LASF55:
46733 +       .string "futex"
46734 +.LASF180:
46735 +       .string "ioprio"
46736 +.LASF755:
46737 +       .string "task_io_accounting"
46738 +.LASF236:
46739 +       .string "keep_capabilities"
46740 +.LASF426:
46741 +       .string "init_apic_ldr"
46742 +.LASF639:
46743 +       .string "i_uid"
46744 +.LASF147:
46745 +       .string "debugreg"
46746 +.LASF829:
46747 +       .string "vm_flags"
46748 +.LASF333:
46749 +       .string "mp_config_table"
46750 +.LASF912:
46751 +       .string "gdt_page"
46752 +.LASF585:
46753 +       .string "kswapd_max_order"
46754 +.LASF48:
46755 +       .string "arg0"
46756 +.LASF50:
46757 +       .string "arg2"
46758 +.LASF57:
46759 +       .string "pgprot"
46760 +.LASF1288:
46761 +       .string "get_info"
46762 +.LASF475:
46763 +       .string "sival_ptr"
46764 +.LASF191:
46765 +       .string "first_time_slice"
46766 +.LASF90:
46767 +       .string "___eax"
46768 +.LASF584:
46769 +       .string "kswapd"
46770 +.LASF847:
46771 +       .string "wait_chldexit"
46772 +.LASF142:
46773 +       .string "mm_segment_t"
46774 +.LASF1196:
46775 +       .string "d_fieldmask"
46776 +.LASF41:
46777 +       .string "ssize_t"
46778 +.LASF971:
46779 +       .string "args"
46780 +.LASF74:
46781 +       .string "__map"
46782 +.LASF26:
46783 +       .string "__u32"
46784 +.LASF84:
46785 +       .string "___ebx"
46786 +.LASF122:
46787 +       .string "cpumask_t"
46788 +.LASF1644:
46789 +       .string "__ksymtab_round_jiffies"
46790 +.LASF763:
46791 +       .string "iovec"
46792 +.LASF870:
46793 +       .string "cinblock"
46794 +.LASF1538:
46795 +       .string "oldbit"
46796 +.LASF581:
46797 +       .string "node_spanned_pages"
46798 +.LASF1566:
46799 +       .string "__round_jiffies_relative"
46800 +.LASF1344:
46801 +       .string "bd_part"
46802 +.LASF1635:
46803 +       .string "__ksymtab_jiffies_64"
46804 +.LASF992:
46805 +       .string "value"
46806 +.LASF445:
46807 +       .string "cpu_mask_to_apicid"
46808 +.LASF1555:
46809 +       .string "active_tasks"
46810 +.LASF440:
46811 +       .string "mpc_oem_pci_bus"
46812 +.LASF885:
46813 +       .string "rt_mutex_waiter"
46814 +.LASF447:
46815 +       .string "send_IPI_allbutself"
46816 +.LASF1156:
46817 +       .string "saved_names"
46818 +.LASF784:
46819 +       .string "ki_left"
46820 +.LASF85:
46821 +       .string "___ecx"
46822 +.LASF1683:
46823 +       .string "wall_to_monotonic"
46824 +.LASF1454:
46825 +       .string "file_system_type"
46826 +.LASF1177:
46827 +       .string "qid_t"
46828 +.LASF198:
46829 +       .string "exit_code"
46830 +.LASF99:
46831 +       .string "___ss"
46832 +.LASF1467:
46833 +       .string "drop_inode"
46834 +.LASF511:
46835 +       .string "umask"
46836 +.LASF421:
46837 +       .string "apic_destination_logical"
46838 +.LASF148:
46839 +       .string "trap_no"
46840 +.LASF172:
46841 +       .string "ptrace"
46842 +.LASF393:
46843 +       .string "dma_mask"
46844 +.LASF758:
46845 +       .string "delayed_work"
46846 +.LASF1554:
46847 +       .string "ticks"
46848 +.LASF1641:
46849 +       .string "__kstrtab___round_jiffies_relative"
46850 +.LASF205:
46851 +       .string "parent"
46852 +.LASF88:
46853 +       .string "___edi"
46854 +.LASF841:
46855 +       .string "siglock"
46856 +.LASF1616:
46857 +       .string "schedule_timeout_interruptible"
46858 +.LASF1051:
46859 +       .string "n_klist"
46860 +.LASF86:
46861 +       .string "___edx"
46862 +.LASF750:
46863 +       .string "get_softirq_time"
46864 +.LASF493:
46865 +       .string "_sigpoll"
46866 +.LASF389:
46867 +       .string "driver"
46868 +.LASF0:
46869 +       .string "unsigned int"
46870 +.LASF309:
46871 +       .string "hlist_head"
46872 +.LASF824:
46873 +       .string "vm_mm"
46874 +.LASF138:
46875 +       .string "entry_eip"
46876 +       .ident  "GCC: (GNU) 4.1.1 (Gentoo 4.1.1-r3)"
46877 +       .section        .note.GNU-stack,"",@progbits
46878 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/mm/memory.c linux-2.6.22-590/mm/memory.c
46879 --- linux-2.6.22-580/mm/memory.c        2009-02-18 09:56:03.000000000 -0500
46880 +++ linux-2.6.22-590/mm/memory.c        2009-02-18 09:57:23.000000000 -0500
46881 @@ -59,6 +59,7 @@
46882  
46883  #include <linux/swapops.h>
46884  #include <linux/elf.h>
46885 +#include <linux/arrays.h>
46886  
46887  #ifndef CONFIG_NEED_MULTIPLE_NODES
46888  /* use the per-pgdat data instead for discontigmem - mbligh */
46889 @@ -2601,6 +2602,15 @@
46890         return ret;
46891  }
46892  
46893 +extern void (*rec_event)(void *,unsigned int);
46894 +struct event_spec {
46895 +       unsigned long pc;
46896 +       unsigned long dcookie; 
46897 +       unsigned count;
46898 +       unsigned char reason;
46899 +};
46900 +
46901 +
46902  /*
46903   * By the time we get here, we already hold the mm semaphore
46904   */
46905 @@ -2630,6 +2640,24 @@
46906         if (!pte)
46907                 return VM_FAULT_OOM;
46908  
46909 +#ifdef CONFIG_CHOPSTIX
46910 +       if (rec_event) {
46911 +               struct event event;
46912 +               struct event_spec espec;
46913 +        struct pt_regs *regs;
46914 +        unsigned int pc;
46915 +        regs = task_pt_regs(current);
46916 +        pc = regs->eip & (unsigned int) ~4095;
46917 +
46918 +               espec.reason = 0; /* alloc */
46919 +               event.event_data=&espec;
46920 +               event.task = current;
46921 +               espec.pc=pc;
46922 +               event.event_type=5; 
46923 +               (*rec_event)(&event, 1);
46924 +       }
46925 +#endif
46926 +
46927         return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
46928  }
46929  
46930 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/mm/memory.c.orig linux-2.6.22-590/mm/memory.c.orig
46931 --- linux-2.6.22-580/mm/memory.c.orig   1969-12-31 19:00:00.000000000 -0500
46932 +++ linux-2.6.22-590/mm/memory.c.orig   2009-02-18 09:56:03.000000000 -0500
46933 @@ -0,0 +1,2841 @@
46934 +/*
46935 + *  linux/mm/memory.c
46936 + *
46937 + *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
46938 + */
46939 +
46940 +/*
46941 + * demand-loading started 01.12.91 - seems it is high on the list of
46942 + * things wanted, and it should be easy to implement. - Linus
46943 + */
46944 +
46945 +/*
46946 + * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
46947 + * pages started 02.12.91, seems to work. - Linus.
46948 + *
46949 + * Tested sharing by executing about 30 /bin/sh: under the old kernel it
46950 + * would have taken more than the 6M I have free, but it worked well as
46951 + * far as I could see.
46952 + *
46953 + * Also corrected some "invalidate()"s - I wasn't doing enough of them.
46954 + */
46955 +
46956 +/*
46957 + * Real VM (paging to/from disk) started 18.12.91. Much more work and
46958 + * thought has to go into this. Oh, well..
46959 + * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
46960 + *             Found it. Everything seems to work now.
46961 + * 20.12.91  -  Ok, making the swap-device changeable like the root.
46962 + */
46963 +
46964 +/*
46965 + * 05.04.94  -  Multi-page memory management added for v1.1.
46966 + *             Idea by Alex Bligh (alex@cconcepts.co.uk)
46967 + *
46968 + * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
46969 + *             (Gerhard.Wichert@pdb.siemens.de)
46970 + *
46971 + * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
46972 + */
46973 +
46974 +#include <linux/kernel_stat.h>
46975 +#include <linux/mm.h>
46976 +#include <linux/hugetlb.h>
46977 +#include <linux/mman.h>
46978 +#include <linux/swap.h>
46979 +#include <linux/highmem.h>
46980 +#include <linux/pagemap.h>
46981 +#include <linux/rmap.h>
46982 +#include <linux/module.h>
46983 +#include <linux/delayacct.h>
46984 +#include <linux/init.h>
46985 +#include <linux/writeback.h>
46986 +
46987 +#include <asm/pgalloc.h>
46988 +#include <asm/uaccess.h>
46989 +#include <asm/tlb.h>
46990 +#include <asm/tlbflush.h>
46991 +#include <asm/pgtable.h>
46992 +
46993 +#include <linux/swapops.h>
46994 +#include <linux/elf.h>
46995 +
46996 +#ifndef CONFIG_NEED_MULTIPLE_NODES
46997 +/* use the per-pgdat data instead for discontigmem - mbligh */
46998 +unsigned long max_mapnr;
46999 +struct page *mem_map;
47000 +
47001 +EXPORT_SYMBOL(max_mapnr);
47002 +EXPORT_SYMBOL(mem_map);
47003 +#endif
47004 +
47005 +unsigned long num_physpages;
47006 +/*
47007 + * A number of key systems in x86 including ioremap() rely on the assumption
47008 + * that high_memory defines the upper bound on direct map memory, then end
47009 + * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
47010 + * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
47011 + * and ZONE_HIGHMEM.
47012 + */
47013 +void * high_memory;
47014 +unsigned long vmalloc_earlyreserve;
47015 +
47016 +EXPORT_SYMBOL(num_physpages);
47017 +EXPORT_SYMBOL(high_memory);
47018 +EXPORT_SYMBOL(vmalloc_earlyreserve);
47019 +
47020 +int randomize_va_space __read_mostly = 1;
47021 +
47022 +static int __init disable_randmaps(char *s)
47023 +{
47024 +       randomize_va_space = 0;
47025 +       return 1;
47026 +}
47027 +__setup("norandmaps", disable_randmaps);
47028 +
47029 +
47030 +/*
47031 + * If a p?d_bad entry is found while walking page tables, report
47032 + * the error, before resetting entry to p?d_none.  Usually (but
47033 + * very seldom) called out from the p?d_none_or_clear_bad macros.
47034 + */
47035 +
47036 +void pgd_clear_bad(pgd_t *pgd)
47037 +{
47038 +       pgd_ERROR(*pgd);
47039 +       pgd_clear(pgd);
47040 +}
47041 +
47042 +void pud_clear_bad(pud_t *pud)
47043 +{
47044 +       pud_ERROR(*pud);
47045 +       pud_clear(pud);
47046 +}
47047 +
47048 +void pmd_clear_bad(pmd_t *pmd)
47049 +{
47050 +       pmd_ERROR(*pmd);
47051 +       pmd_clear(pmd);
47052 +}
47053 +
47054 +/*
47055 + * Note: this doesn't free the actual pages themselves. That
47056 + * has been handled earlier when unmapping all the memory regions.
47057 + */
47058 +static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
47059 +{
47060 +       struct page *page = pmd_page(*pmd);
47061 +       pmd_clear(pmd);
47062 +       pte_lock_deinit(page);
47063 +       pte_free_tlb(tlb, page);
47064 +       dec_zone_page_state(page, NR_PAGETABLE);
47065 +       tlb->mm->nr_ptes--;
47066 +}
47067 +
47068 +static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
47069 +                               unsigned long addr, unsigned long end,
47070 +                               unsigned long floor, unsigned long ceiling)
47071 +{
47072 +       pmd_t *pmd;
47073 +       unsigned long next;
47074 +       unsigned long start;
47075 +
47076 +       start = addr;
47077 +       pmd = pmd_offset(pud, addr);
47078 +       do {
47079 +               next = pmd_addr_end(addr, end);
47080 +               if (pmd_none_or_clear_bad(pmd))
47081 +                       continue;
47082 +               free_pte_range(tlb, pmd);
47083 +       } while (pmd++, addr = next, addr != end);
47084 +
47085 +       start &= PUD_MASK;
47086 +       if (start < floor)
47087 +               return;
47088 +       if (ceiling) {
47089 +               ceiling &= PUD_MASK;
47090 +               if (!ceiling)
47091 +                       return;
47092 +       }
47093 +       if (end - 1 > ceiling - 1)
47094 +               return;
47095 +
47096 +       pmd = pmd_offset(pud, start);
47097 +       pud_clear(pud);
47098 +       pmd_free_tlb(tlb, pmd);
47099 +}
47100 +
47101 +static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
47102 +                               unsigned long addr, unsigned long end,
47103 +                               unsigned long floor, unsigned long ceiling)
47104 +{
47105 +       pud_t *pud;
47106 +       unsigned long next;
47107 +       unsigned long start;
47108 +
47109 +       start = addr;
47110 +       pud = pud_offset(pgd, addr);
47111 +       do {
47112 +               next = pud_addr_end(addr, end);
47113 +               if (pud_none_or_clear_bad(pud))
47114 +                       continue;
47115 +               free_pmd_range(tlb, pud, addr, next, floor, ceiling);
47116 +       } while (pud++, addr = next, addr != end);
47117 +
47118 +       start &= PGDIR_MASK;
47119 +       if (start < floor)
47120 +               return;
47121 +       if (ceiling) {
47122 +               ceiling &= PGDIR_MASK;
47123 +               if (!ceiling)
47124 +                       return;
47125 +       }
47126 +       if (end - 1 > ceiling - 1)
47127 +               return;
47128 +
47129 +       pud = pud_offset(pgd, start);
47130 +       pgd_clear(pgd);
47131 +       pud_free_tlb(tlb, pud);
47132 +}
47133 +
47134 +/*
47135 + * This function frees user-level page tables of a process.
47136 + *
47137 + * Must be called with pagetable lock held.
47138 + */
47139 +void free_pgd_range(struct mmu_gather **tlb,
47140 +                       unsigned long addr, unsigned long end,
47141 +                       unsigned long floor, unsigned long ceiling)
47142 +{
47143 +       pgd_t *pgd;
47144 +       unsigned long next;
47145 +       unsigned long start;
47146 +
47147 +       /*
47148 +        * The next few lines have given us lots of grief...
47149 +        *
47150 +        * Why are we testing PMD* at this top level?  Because often
47151 +        * there will be no work to do at all, and we'd prefer not to
47152 +        * go all the way down to the bottom just to discover that.
47153 +        *
47154 +        * Why all these "- 1"s?  Because 0 represents both the bottom
47155 +        * of the address space and the top of it (using -1 for the
47156 +        * top wouldn't help much: the masks would do the wrong thing).
47157 +        * The rule is that addr 0 and floor 0 refer to the bottom of
47158 +        * the address space, but end 0 and ceiling 0 refer to the top
47159 +        * Comparisons need to use "end - 1" and "ceiling - 1" (though
47160 +        * that end 0 case should be mythical).
47161 +        *
47162 +        * Wherever addr is brought up or ceiling brought down, we must
47163 +        * be careful to reject "the opposite 0" before it confuses the
47164 +        * subsequent tests.  But what about where end is brought down
47165 +        * by PMD_SIZE below? no, end can't go down to 0 there.
47166 +        *
47167 +        * Whereas we round start (addr) and ceiling down, by different
47168 +        * masks at different levels, in order to test whether a table
47169 +        * now has no other vmas using it, so can be freed, we don't
47170 +        * bother to round floor or end up - the tests don't need that.
47171 +        */
47172 +
47173 +       addr &= PMD_MASK;
47174 +       if (addr < floor) {
47175 +               addr += PMD_SIZE;
47176 +               if (!addr)
47177 +                       return;
47178 +       }
47179 +       if (ceiling) {
47180 +               ceiling &= PMD_MASK;
47181 +               if (!ceiling)
47182 +                       return;
47183 +       }
47184 +       if (end - 1 > ceiling - 1)
47185 +               end -= PMD_SIZE;
47186 +       if (addr > end - 1)
47187 +               return;
47188 +
47189 +       start = addr;
47190 +       pgd = pgd_offset((*tlb)->mm, addr);
47191 +       do {
47192 +               next = pgd_addr_end(addr, end);
47193 +               if (pgd_none_or_clear_bad(pgd))
47194 +                       continue;
47195 +               free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
47196 +       } while (pgd++, addr = next, addr != end);
47197 +
47198 +       if (!(*tlb)->fullmm)
47199 +               flush_tlb_pgtables((*tlb)->mm, start, end);
47200 +}
47201 +
47202 +void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
47203 +               unsigned long floor, unsigned long ceiling)
47204 +{
47205 +       while (vma) {
47206 +               struct vm_area_struct *next = vma->vm_next;
47207 +               unsigned long addr = vma->vm_start;
47208 +
47209 +               /*
47210 +                * Hide vma from rmap and vmtruncate before freeing pgtables
47211 +                */
47212 +               anon_vma_unlink(vma);
47213 +               unlink_file_vma(vma);
47214 +
47215 +               if (is_vm_hugetlb_page(vma)) {
47216 +                       hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
47217 +                               floor, next? next->vm_start: ceiling);
47218 +               } else {
47219 +                       /*
47220 +                        * Optimization: gather nearby vmas into one call down
47221 +                        */
47222 +                       while (next && next->vm_start <= vma->vm_end + PMD_SIZE
47223 +                              && !is_vm_hugetlb_page(next)) {
47224 +                               vma = next;
47225 +                               next = vma->vm_next;
47226 +                               anon_vma_unlink(vma);
47227 +                               unlink_file_vma(vma);
47228 +                       }
47229 +                       free_pgd_range(tlb, addr, vma->vm_end,
47230 +                               floor, next? next->vm_start: ceiling);
47231 +               }
47232 +               vma = next;
47233 +       }
47234 +}
47235 +
47236 +int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
47237 +{
47238 +       struct page *new = pte_alloc_one(mm, address);
47239 +       if (!new)
47240 +               return -ENOMEM;
47241 +
47242 +       pte_lock_init(new);
47243 +       spin_lock(&mm->page_table_lock);
47244 +       if (pmd_present(*pmd)) {        /* Another has populated it */
47245 +               pte_lock_deinit(new);
47246 +               pte_free(new);
47247 +       } else {
47248 +               mm->nr_ptes++;
47249 +               inc_zone_page_state(new, NR_PAGETABLE);
47250 +               pmd_populate(mm, pmd, new);
47251 +       }
47252 +       spin_unlock(&mm->page_table_lock);
47253 +       return 0;
47254 +}
47255 +
47256 +int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
47257 +{
47258 +       pte_t *new = pte_alloc_one_kernel(&init_mm, address);
47259 +       if (!new)
47260 +               return -ENOMEM;
47261 +
47262 +       spin_lock(&init_mm.page_table_lock);
47263 +       if (pmd_present(*pmd))          /* Another has populated it */
47264 +               pte_free_kernel(new);
47265 +       else
47266 +               pmd_populate_kernel(&init_mm, pmd, new);
47267 +       spin_unlock(&init_mm.page_table_lock);
47268 +       return 0;
47269 +}
47270 +
47271 +static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
47272 +{
47273 +       if (file_rss)
47274 +               add_mm_counter(mm, file_rss, file_rss);
47275 +       if (anon_rss)
47276 +               add_mm_counter(mm, anon_rss, anon_rss);
47277 +}
47278 +
47279 +/*
47280 + * This function is called to print an error when a bad pte
47281 + * is found. For example, we might have a PFN-mapped pte in
47282 + * a region that doesn't allow it.
47283 + *
47284 + * The calling function must still handle the error.
47285 + */
47286 +void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
47287 +{
47288 +       printk(KERN_ERR "Bad pte = %08llx, process = %s, "
47289 +                       "vm_flags = %lx, vaddr = %lx\n",
47290 +               (long long)pte_val(pte),
47291 +               (vma->vm_mm == current->mm ? current->comm : "???"),
47292 +               vma->vm_flags, vaddr);
47293 +       dump_stack();
47294 +}
47295 +
47296 +static inline int is_cow_mapping(unsigned int flags)
47297 +{
47298 +       return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
47299 +}
47300 +
47301 +/*
47302 + * This function gets the "struct page" associated with a pte.
47303 + *
47304 + * NOTE! Some mappings do not have "struct pages". A raw PFN mapping
47305 + * will have each page table entry just pointing to a raw page frame
47306 + * number, and as far as the VM layer is concerned, those do not have
47307 + * pages associated with them - even if the PFN might point to memory
47308 + * that otherwise is perfectly fine and has a "struct page".
47309 + *
47310 + * The way we recognize those mappings is through the rules set up
47311 + * by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set,
47312 + * and the vm_pgoff will point to the first PFN mapped: thus every
47313 + * page that is a raw mapping will always honor the rule
47314 + *
47315 + *     pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
47316 + *
47317 + * and if that isn't true, the page has been COW'ed (in which case it
47318 + * _does_ have a "struct page" associated with it even if it is in a
47319 + * VM_PFNMAP range).
47320 + */
47321 +struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
47322 +{
47323 +       unsigned long pfn = pte_pfn(pte);
47324 +
47325 +       if (unlikely(vma->vm_flags & VM_PFNMAP)) {
47326 +               unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
47327 +               if (pfn == vma->vm_pgoff + off)
47328 +                       return NULL;
47329 +               if (!is_cow_mapping(vma->vm_flags))
47330 +                       return NULL;
47331 +       }
47332 +
47333 +       /*
47334 +        * Add some anal sanity checks for now. Eventually,
47335 +        * we should just do "return pfn_to_page(pfn)", but
47336 +        * in the meantime we check that we get a valid pfn,
47337 +        * and that the resulting page looks ok.
47338 +        */
47339 +       if (unlikely(!pfn_valid(pfn))) {
47340 +               print_bad_pte(vma, pte, addr);
47341 +               return NULL;
47342 +       }
47343 +
47344 +       /*
47345 +        * NOTE! We still have PageReserved() pages in the page 
47346 +        * tables. 
47347 +        *
47348 +        * The PAGE_ZERO() pages and various VDSO mappings can
47349 +        * cause them to exist.
47350 +        */
47351 +       return pfn_to_page(pfn);
47352 +}
47353 +
47354 +/*
47355 + * copy one vm_area from one task to the other. Assumes the page tables
47356 + * already present in the new task to be cleared in the whole range
47357 + * covered by this vma.
47358 + */
47359 +
47360 +static inline void
47361 +copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
47362 +               pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
47363 +               unsigned long addr, int *rss)
47364 +{
47365 +       unsigned long vm_flags = vma->vm_flags;
47366 +       pte_t pte = *src_pte;
47367 +       struct page *page;
47368 +
47369 +       /* pte contains position in swap or file, so copy. */
47370 +       if (unlikely(!pte_present(pte))) {
47371 +               if (!pte_file(pte)) {
47372 +                       swp_entry_t entry = pte_to_swp_entry(pte);
47373 +
47374 +                       swap_duplicate(entry);
47375 +                       /* make sure dst_mm is on swapoff's mmlist. */
47376 +                       if (unlikely(list_empty(&dst_mm->mmlist))) {
47377 +                               spin_lock(&mmlist_lock);
47378 +                               if (list_empty(&dst_mm->mmlist))
47379 +                                       list_add(&dst_mm->mmlist,
47380 +                                                &src_mm->mmlist);
47381 +                               spin_unlock(&mmlist_lock);
47382 +                       }
47383 +                       if (is_write_migration_entry(entry) &&
47384 +                                       is_cow_mapping(vm_flags)) {
47385 +                               /*
47386 +                                * COW mappings require pages in both parent
47387 +                                * and child to be set to read.
47388 +                                */
47389 +                               make_migration_entry_read(&entry);
47390 +                               pte = swp_entry_to_pte(entry);
47391 +                               set_pte_at(src_mm, addr, src_pte, pte);
47392 +                       }
47393 +               }
47394 +               goto out_set_pte;
47395 +       }
47396 +
47397 +       /*
47398 +        * If it's a COW mapping, write protect it both
47399 +        * in the parent and the child
47400 +        */
47401 +       if (is_cow_mapping(vm_flags)) {
47402 +               ptep_set_wrprotect(src_mm, addr, src_pte);
47403 +               pte = pte_wrprotect(pte);
47404 +       }
47405 +
47406 +       /*
47407 +        * If it's a shared mapping, mark it clean in
47408 +        * the child
47409 +        */
47410 +       if (vm_flags & VM_SHARED)
47411 +               pte = pte_mkclean(pte);
47412 +       pte = pte_mkold(pte);
47413 +
47414 +       page = vm_normal_page(vma, addr, pte);
47415 +       if (page) {
47416 +               get_page(page);
47417 +               page_dup_rmap(page, vma, addr);
47418 +               rss[!!PageAnon(page)]++;
47419 +       }
47420 +
47421 +out_set_pte:
47422 +       set_pte_at(dst_mm, addr, dst_pte, pte);
47423 +}
47424 +
47425 +static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
47426 +               pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
47427 +               unsigned long addr, unsigned long end)
47428 +{
47429 +       pte_t *src_pte, *dst_pte;
47430 +       spinlock_t *src_ptl, *dst_ptl;
47431 +       int progress = 0;
47432 +       int rss[2];
47433 +
47434 +       if (!vx_rss_avail(dst_mm, ((end - addr)/PAGE_SIZE + 1)))
47435 +               return -ENOMEM;
47436 +
47437 +again:
47438 +       rss[1] = rss[0] = 0;
47439 +       dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
47440 +       if (!dst_pte)
47441 +               return -ENOMEM;
47442 +       src_pte = pte_offset_map_nested(src_pmd, addr);
47443 +       src_ptl = pte_lockptr(src_mm, src_pmd);
47444 +       spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
47445 +       arch_enter_lazy_mmu_mode();
47446 +
47447 +       do {
47448 +               /*
47449 +                * We are holding two locks at this point - either of them
47450 +                * could generate latencies in another task on another CPU.
47451 +                */
47452 +               if (progress >= 32) {
47453 +                       progress = 0;
47454 +                       if (need_resched() ||
47455 +                           need_lockbreak(src_ptl) ||
47456 +                           need_lockbreak(dst_ptl))
47457 +                               break;
47458 +               }
47459 +               if (pte_none(*src_pte)) {
47460 +                       progress++;
47461 +                       continue;
47462 +               }
47463 +               copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
47464 +               progress += 8;
47465 +       } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
47466 +
47467 +       arch_leave_lazy_mmu_mode();
47468 +       spin_unlock(src_ptl);
47469 +       pte_unmap_nested(src_pte - 1);
47470 +       add_mm_rss(dst_mm, rss[0], rss[1]);
47471 +       pte_unmap_unlock(dst_pte - 1, dst_ptl);
47472 +       cond_resched();
47473 +       if (addr != end)
47474 +               goto again;
47475 +       return 0;
47476 +}
47477 +
47478 +static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
47479 +               pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
47480 +               unsigned long addr, unsigned long end)
47481 +{
47482 +       pmd_t *src_pmd, *dst_pmd;
47483 +       unsigned long next;
47484 +
47485 +       dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
47486 +       if (!dst_pmd)
47487 +               return -ENOMEM;
47488 +       src_pmd = pmd_offset(src_pud, addr);
47489 +       do {
47490 +               next = pmd_addr_end(addr, end);
47491 +               if (pmd_none_or_clear_bad(src_pmd))
47492 +                       continue;
47493 +               if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
47494 +                                               vma, addr, next))
47495 +                       return -ENOMEM;
47496 +       } while (dst_pmd++, src_pmd++, addr = next, addr != end);
47497 +       return 0;
47498 +}
47499 +
47500 +static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
47501 +               pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
47502 +               unsigned long addr, unsigned long end)
47503 +{
47504 +       pud_t *src_pud, *dst_pud;
47505 +       unsigned long next;
47506 +
47507 +       dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
47508 +       if (!dst_pud)
47509 +               return -ENOMEM;
47510 +       src_pud = pud_offset(src_pgd, addr);
47511 +       do {
47512 +               next = pud_addr_end(addr, end);
47513 +               if (pud_none_or_clear_bad(src_pud))
47514 +                       continue;
47515 +               if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
47516 +                                               vma, addr, next))
47517 +                       return -ENOMEM;
47518 +       } while (dst_pud++, src_pud++, addr = next, addr != end);
47519 +       return 0;
47520 +}
47521 +
47522 +int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
47523 +               struct vm_area_struct *vma)
47524 +{
47525 +       pgd_t *src_pgd, *dst_pgd;
47526 +       unsigned long next;
47527 +       unsigned long addr = vma->vm_start;
47528 +       unsigned long end = vma->vm_end;
47529 +
47530 +       /*
47531 +        * Don't copy ptes where a page fault will fill them correctly.
47532 +        * Fork becomes much lighter when there are big shared or private
47533 +        * readonly mappings. The tradeoff is that copy_page_range is more
47534 +        * efficient than faulting.
47535 +        */
47536 +       if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
47537 +               if (!vma->anon_vma)
47538 +                       return 0;
47539 +       }
47540 +
47541 +       if (is_vm_hugetlb_page(vma))
47542 +               return copy_hugetlb_page_range(dst_mm, src_mm, vma);
47543 +
47544 +       dst_pgd = pgd_offset(dst_mm, addr);
47545 +       src_pgd = pgd_offset(src_mm, addr);
47546 +       do {
47547 +               next = pgd_addr_end(addr, end);
47548 +               if (pgd_none_or_clear_bad(src_pgd))
47549 +                       continue;
47550 +               if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
47551 +                                               vma, addr, next))
47552 +                       return -ENOMEM;
47553 +       } while (dst_pgd++, src_pgd++, addr = next, addr != end);
47554 +       return 0;
47555 +}
47556 +
47557 +static unsigned long zap_pte_range(struct mmu_gather *tlb,
47558 +                               struct vm_area_struct *vma, pmd_t *pmd,
47559 +                               unsigned long addr, unsigned long end,
47560 +                               long *zap_work, struct zap_details *details)
47561 +{
47562 +       struct mm_struct *mm = tlb->mm;
47563 +       pte_t *pte;
47564 +       spinlock_t *ptl;
47565 +       int file_rss = 0;
47566 +       int anon_rss = 0;
47567 +
47568 +       pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
47569 +       arch_enter_lazy_mmu_mode();
47570 +       do {
47571 +               pte_t ptent = *pte;
47572 +               if (pte_none(ptent)) {
47573 +                       (*zap_work)--;
47574 +                       continue;
47575 +               }
47576 +
47577 +               (*zap_work) -= PAGE_SIZE;
47578 +
47579 +               if (pte_present(ptent)) {
47580 +                       struct page *page;
47581 +
47582 +                       page = vm_normal_page(vma, addr, ptent);
47583 +                       if (unlikely(details) && page) {
47584 +                               /*
47585 +                                * unmap_shared_mapping_pages() wants to
47586 +                                * invalidate cache without truncating:
47587 +                                * unmap shared but keep private pages.
47588 +                                */
47589 +                               if (details->check_mapping &&
47590 +                                   details->check_mapping != page->mapping)
47591 +                                       continue;
47592 +                               /*
47593 +                                * Each page->index must be checked when
47594 +                                * invalidating or truncating nonlinear.
47595 +                                */
47596 +                               if (details->nonlinear_vma &&
47597 +                                   (page->index < details->first_index ||
47598 +                                    page->index > details->last_index))
47599 +                                       continue;
47600 +                       }
47601 +                       ptent = ptep_get_and_clear_full(mm, addr, pte,
47602 +                                                       tlb->fullmm);
47603 +                       tlb_remove_tlb_entry(tlb, pte, addr);
47604 +                       if (unlikely(!page))
47605 +                               continue;
47606 +                       if (unlikely(details) && details->nonlinear_vma
47607 +                           && linear_page_index(details->nonlinear_vma,
47608 +                                               addr) != page->index)
47609 +                               set_pte_at(mm, addr, pte,
47610 +                                          pgoff_to_pte(page->index));
47611 +                       if (PageAnon(page))
47612 +                               anon_rss--;
47613 +                       else {
47614 +                               if (pte_dirty(ptent))
47615 +                                       set_page_dirty(page);
47616 +                               if (pte_young(ptent))
47617 +                                       SetPageReferenced(page);
47618 +                               file_rss--;
47619 +                       }
47620 +                       page_remove_rmap(page, vma);
47621 +                       tlb_remove_page(tlb, page);
47622 +                       continue;
47623 +               }
47624 +               /*
47625 +                * If details->check_mapping, we leave swap entries;
47626 +                * if details->nonlinear_vma, we leave file entries.
47627 +                */
47628 +               if (unlikely(details))
47629 +                       continue;
47630 +               if (!pte_file(ptent))
47631 +                       free_swap_and_cache(pte_to_swp_entry(ptent));
47632 +               pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
47633 +       } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
47634 +
47635 +       add_mm_rss(mm, file_rss, anon_rss);
47636 +       arch_leave_lazy_mmu_mode();
47637 +       pte_unmap_unlock(pte - 1, ptl);
47638 +
47639 +       return addr;
47640 +}
47641 +
47642 +static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
47643 +                               struct vm_area_struct *vma, pud_t *pud,
47644 +                               unsigned long addr, unsigned long end,
47645 +                               long *zap_work, struct zap_details *details)
47646 +{
47647 +       pmd_t *pmd;
47648 +       unsigned long next;
47649 +
47650 +       pmd = pmd_offset(pud, addr);
47651 +       do {
47652 +               next = pmd_addr_end(addr, end);
47653 +               if (pmd_none_or_clear_bad(pmd)) {
47654 +                       (*zap_work)--;
47655 +                       continue;
47656 +               }
47657 +               next = zap_pte_range(tlb, vma, pmd, addr, next,
47658 +                                               zap_work, details);
47659 +       } while (pmd++, addr = next, (addr != end && *zap_work > 0));
47660 +
47661 +       return addr;
47662 +}
47663 +
47664 +static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
47665 +                               struct vm_area_struct *vma, pgd_t *pgd,
47666 +                               unsigned long addr, unsigned long end,
47667 +                               long *zap_work, struct zap_details *details)
47668 +{
47669 +       pud_t *pud;
47670 +       unsigned long next;
47671 +
47672 +       pud = pud_offset(pgd, addr);
47673 +       do {
47674 +               next = pud_addr_end(addr, end);
47675 +               if (pud_none_or_clear_bad(pud)) {
47676 +                       (*zap_work)--;
47677 +                       continue;
47678 +               }
47679 +               next = zap_pmd_range(tlb, vma, pud, addr, next,
47680 +                                               zap_work, details);
47681 +       } while (pud++, addr = next, (addr != end && *zap_work > 0));
47682 +
47683 +       return addr;
47684 +}
47685 +
47686 +static unsigned long unmap_page_range(struct mmu_gather *tlb,
47687 +                               struct vm_area_struct *vma,
47688 +                               unsigned long addr, unsigned long end,
47689 +                               long *zap_work, struct zap_details *details)
47690 +{
47691 +       pgd_t *pgd;
47692 +       unsigned long next;
47693 +
47694 +       if (details && !details->check_mapping && !details->nonlinear_vma)
47695 +               details = NULL;
47696 +
47697 +       BUG_ON(addr >= end);
47698 +       tlb_start_vma(tlb, vma);
47699 +       pgd = pgd_offset(vma->vm_mm, addr);
47700 +       do {
47701 +               next = pgd_addr_end(addr, end);
47702 +               if (pgd_none_or_clear_bad(pgd)) {
47703 +                       (*zap_work)--;
47704 +                       continue;
47705 +               }
47706 +               next = zap_pud_range(tlb, vma, pgd, addr, next,
47707 +                                               zap_work, details);
47708 +       } while (pgd++, addr = next, (addr != end && *zap_work > 0));
47709 +       tlb_end_vma(tlb, vma);
47710 +
47711 +       return addr;
47712 +}
47713 +
47714 +#ifdef CONFIG_PREEMPT
47715 +# define ZAP_BLOCK_SIZE        (8 * PAGE_SIZE)
47716 +#else
47717 +/* No preempt: go for improved straight-line efficiency */
47718 +# define ZAP_BLOCK_SIZE        (1024 * PAGE_SIZE)
47719 +#endif
47720 +
47721 +/**
47722 + * unmap_vmas - unmap a range of memory covered by a list of vma's
47723 + * @tlbp: address of the caller's struct mmu_gather
47724 + * @vma: the starting vma
47725 + * @start_addr: virtual address at which to start unmapping
47726 + * @end_addr: virtual address at which to end unmapping
47727 + * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
47728 + * @details: details of nonlinear truncation or shared cache invalidation
47729 + *
47730 + * Returns the end address of the unmapping (restart addr if interrupted).
47731 + *
47732 + * Unmap all pages in the vma list.
47733 + *
47734 + * We aim to not hold locks for too long (for scheduling latency reasons).
47735 + * So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
47736 + * return the ending mmu_gather to the caller.
47737 + *
47738 + * Only addresses between `start' and `end' will be unmapped.
47739 + *
47740 + * The VMA list must be sorted in ascending virtual address order.
47741 + *
47742 + * unmap_vmas() assumes that the caller will flush the whole unmapped address
47743 + * range after unmap_vmas() returns.  So the only responsibility here is to
47744 + * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
47745 + * drops the lock and schedules.
47746 + */
47747 +unsigned long unmap_vmas(struct mmu_gather **tlbp,
47748 +               struct vm_area_struct *vma, unsigned long start_addr,
47749 +               unsigned long end_addr, unsigned long *nr_accounted,
47750 +               struct zap_details *details)
47751 +{
47752 +       long zap_work = ZAP_BLOCK_SIZE;
47753 +       unsigned long tlb_start = 0;    /* For tlb_finish_mmu */
47754 +       int tlb_start_valid = 0;
47755 +       unsigned long start = start_addr;
47756 +       spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
47757 +       int fullmm = (*tlbp)->fullmm;
47758 +
47759 +       for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
47760 +               unsigned long end;
47761 +
47762 +               start = max(vma->vm_start, start_addr);
47763 +               if (start >= vma->vm_end)
47764 +                       continue;
47765 +               end = min(vma->vm_end, end_addr);
47766 +               if (end <= vma->vm_start)
47767 +                       continue;
47768 +
47769 +               if (vma->vm_flags & VM_ACCOUNT)
47770 +                       *nr_accounted += (end - start) >> PAGE_SHIFT;
47771 +
47772 +               while (start != end) {
47773 +                       if (!tlb_start_valid) {
47774 +                               tlb_start = start;
47775 +                               tlb_start_valid = 1;
47776 +                       }
47777 +
47778 +                       if (unlikely(is_vm_hugetlb_page(vma))) {
47779 +                               unmap_hugepage_range(vma, start, end);
47780 +                               zap_work -= (end - start) /
47781 +                                               (HPAGE_SIZE / PAGE_SIZE);
47782 +                               start = end;
47783 +                       } else
47784 +                               start = unmap_page_range(*tlbp, vma,
47785 +                                               start, end, &zap_work, details);
47786 +
47787 +                       if (zap_work > 0) {
47788 +                               BUG_ON(start != end);
47789 +                               break;
47790 +                       }
47791 +
47792 +                       tlb_finish_mmu(*tlbp, tlb_start, start);
47793 +
47794 +                       if (need_resched() ||
47795 +                               (i_mmap_lock && need_lockbreak(i_mmap_lock))) {
47796 +                               if (i_mmap_lock) {
47797 +                                       *tlbp = NULL;
47798 +                                       goto out;
47799 +                               }
47800 +                               cond_resched();
47801 +                       }
47802 +
47803 +                       *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
47804 +                       tlb_start_valid = 0;
47805 +                       zap_work = ZAP_BLOCK_SIZE;
47806 +               }
47807 +       }
47808 +out:
47809 +       return start;   /* which is now the end (or restart) address */
47810 +}
47811 +
47812 +/**
47813 + * zap_page_range - remove user pages in a given range
47814 + * @vma: vm_area_struct holding the applicable pages
47815 + * @address: starting address of pages to zap
47816 + * @size: number of bytes to zap
47817 + * @details: details of nonlinear truncation or shared cache invalidation
47818 + */
47819 +unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
47820 +               unsigned long size, struct zap_details *details)
47821 +{
47822 +       struct mm_struct *mm = vma->vm_mm;
47823 +       struct mmu_gather *tlb;
47824 +       unsigned long end = address + size;
47825 +       unsigned long nr_accounted = 0;
47826 +
47827 +       lru_add_drain();
47828 +       tlb = tlb_gather_mmu(mm, 0);
47829 +       update_hiwater_rss(mm);
47830 +       end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
47831 +       if (tlb)
47832 +               tlb_finish_mmu(tlb, address, end);
47833 +       return end;
47834 +}
47835 +
47836 +/*
47837 + * Do a quick page-table lookup for a single page.
47838 + */
47839 +struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
47840 +                       unsigned int flags)
47841 +{
47842 +       pgd_t *pgd;
47843 +       pud_t *pud;
47844 +       pmd_t *pmd;
47845 +       pte_t *ptep, pte;
47846 +       spinlock_t *ptl;
47847 +       struct page *page;
47848 +       struct mm_struct *mm = vma->vm_mm;
47849 +
47850 +       page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
47851 +       if (!IS_ERR(page)) {
47852 +               BUG_ON(flags & FOLL_GET);
47853 +               goto out;
47854 +       }
47855 +
47856 +       page = NULL;
47857 +       pgd = pgd_offset(mm, address);
47858 +       if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
47859 +               goto no_page_table;
47860 +
47861 +       pud = pud_offset(pgd, address);
47862 +       if (pud_none(*pud) || unlikely(pud_bad(*pud)))
47863 +               goto no_page_table;
47864 +       
47865 +       pmd = pmd_offset(pud, address);
47866 +       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
47867 +               goto no_page_table;
47868 +
47869 +       if (pmd_huge(*pmd)) {
47870 +               BUG_ON(flags & FOLL_GET);
47871 +               page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
47872 +               goto out;
47873 +       }
47874 +
47875 +       ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
47876 +       if (!ptep)
47877 +               goto out;
47878 +
47879 +       pte = *ptep;
47880 +       if (!pte_present(pte))
47881 +               goto unlock;
47882 +       if ((flags & FOLL_WRITE) && !pte_write(pte))
47883 +               goto unlock;
47884 +       page = vm_normal_page(vma, address, pte);
47885 +       if (unlikely(!page))
47886 +               goto unlock;
47887 +
47888 +       if (flags & FOLL_GET)
47889 +               get_page(page);
47890 +       if (flags & FOLL_TOUCH) {
47891 +               if ((flags & FOLL_WRITE) &&
47892 +                   !pte_dirty(pte) && !PageDirty(page))
47893 +                       set_page_dirty(page);
47894 +               mark_page_accessed(page);
47895 +       }
47896 +unlock:
47897 +       pte_unmap_unlock(ptep, ptl);
47898 +out:
47899 +       return page;
47900 +
47901 +no_page_table:
47902 +       /*
47903 +        * When core dumping an enormous anonymous area that nobody
47904 +        * has touched so far, we don't want to allocate page tables.
47905 +        */
47906 +       if (flags & FOLL_ANON) {
47907 +               page = ZERO_PAGE(address);
47908 +               if (flags & FOLL_GET)
47909 +                       get_page(page);
47910 +               BUG_ON(flags & FOLL_WRITE);
47911 +       }
47912 +       return page;
47913 +}
47914 +
47915 +int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
47916 +               unsigned long start, int len, int write, int force,
47917 +               struct page **pages, struct vm_area_struct **vmas)
47918 +{
47919 +       int i;
47920 +       unsigned int vm_flags;
47921 +
47922 +       if (len <= 0)
47923 +               return 0;
47924 +       /* 
47925 +        * Require read or write permissions.
47926 +        * If 'force' is set, we only require the "MAY" flags.
47927 +        */
47928 +       vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
47929 +       vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
47930 +       i = 0;
47931 +
47932 +       do {
47933 +               struct vm_area_struct *vma;
47934 +               unsigned int foll_flags;
47935 +
47936 +               vma = find_extend_vma(mm, start);
47937 +               if (!vma && in_gate_area(tsk, start)) {
47938 +                       unsigned long pg = start & PAGE_MASK;
47939 +                       struct vm_area_struct *gate_vma = get_gate_vma(tsk);
47940 +                       pgd_t *pgd;
47941 +                       pud_t *pud;
47942 +                       pmd_t *pmd;
47943 +                       pte_t *pte;
47944 +                       if (write) /* user gate pages are read-only */
47945 +                               return i ? : -EFAULT;
47946 +                       if (pg > TASK_SIZE)
47947 +                               pgd = pgd_offset_k(pg);
47948 +                       else
47949 +                               pgd = pgd_offset_gate(mm, pg);
47950 +                       BUG_ON(pgd_none(*pgd));
47951 +                       pud = pud_offset(pgd, pg);
47952 +                       BUG_ON(pud_none(*pud));
47953 +                       pmd = pmd_offset(pud, pg);
47954 +                       if (pmd_none(*pmd))
47955 +                               return i ? : -EFAULT;
47956 +                       pte = pte_offset_map(pmd, pg);
47957 +                       if (pte_none(*pte)) {
47958 +                               pte_unmap(pte);
47959 +                               return i ? : -EFAULT;
47960 +                       }
47961 +                       if (pages) {
47962 +                               struct page *page = vm_normal_page(gate_vma, start, *pte);
47963 +                               pages[i] = page;
47964 +                               if (page)
47965 +                                       get_page(page);
47966 +                       }
47967 +                       pte_unmap(pte);
47968 +                       if (vmas)
47969 +                               vmas[i] = gate_vma;
47970 +                       i++;
47971 +                       start += PAGE_SIZE;
47972 +                       len--;
47973 +                       continue;
47974 +               }
47975 +
47976 +               if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
47977 +                               || !(vm_flags & vma->vm_flags))
47978 +                       return i ? : -EFAULT;
47979 +
47980 +               if (is_vm_hugetlb_page(vma)) {
47981 +                       i = follow_hugetlb_page(mm, vma, pages, vmas,
47982 +                                               &start, &len, i);
47983 +                       continue;
47984 +               }
47985 +
47986 +               foll_flags = FOLL_TOUCH;
47987 +               if (pages)
47988 +                       foll_flags |= FOLL_GET;
47989 +               if (!write && !(vma->vm_flags & VM_LOCKED) &&
47990 +                   (!vma->vm_ops || !vma->vm_ops->nopage))
47991 +                       foll_flags |= FOLL_ANON;
47992 +
47993 +               do {
47994 +                       struct page *page;
47995 +
47996 +                       if (write)
47997 +                               foll_flags |= FOLL_WRITE;
47998 +
47999 +                       cond_resched();
48000 +                       while (!(page = follow_page(vma, start, foll_flags))) {
48001 +                               int ret;
48002 +                               ret = __handle_mm_fault(mm, vma, start,
48003 +                                               foll_flags & FOLL_WRITE);
48004 +                               /*
48005 +                                * The VM_FAULT_WRITE bit tells us that do_wp_page has
48006 +                                * broken COW when necessary, even if maybe_mkwrite
48007 +                                * decided not to set pte_write. We can thus safely do
48008 +                                * subsequent page lookups as if they were reads.
48009 +                                */
48010 +                               if (ret & VM_FAULT_WRITE)
48011 +                                       foll_flags &= ~FOLL_WRITE;
48012 +                               
48013 +                               switch (ret & ~VM_FAULT_WRITE) {
48014 +                               case VM_FAULT_MINOR:
48015 +                                       tsk->min_flt++;
48016 +                                       break;
48017 +                               case VM_FAULT_MAJOR:
48018 +                                       tsk->maj_flt++;
48019 +                                       break;
48020 +                               case VM_FAULT_SIGBUS:
48021 +                                       return i ? i : -EFAULT;
48022 +                               case VM_FAULT_OOM:
48023 +                                       return i ? i : -ENOMEM;
48024 +                               default:
48025 +                                       BUG();
48026 +                               }
48027 +                               cond_resched();
48028 +                       }
48029 +                       if (pages) {
48030 +                               pages[i] = page;
48031 +
48032 +                               flush_anon_page(vma, page, start);
48033 +                               flush_dcache_page(page);
48034 +                       }
48035 +                       if (vmas)
48036 +                               vmas[i] = vma;
48037 +                       i++;
48038 +                       start += PAGE_SIZE;
48039 +                       len--;
48040 +               } while (len && start < vma->vm_end);
48041 +       } while (len);
48042 +       return i;
48043 +}
48044 +EXPORT_SYMBOL(get_user_pages);
48045 +
48046 +static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
48047 +                       unsigned long addr, unsigned long end, pgprot_t prot)
48048 +{
48049 +       pte_t *pte;
48050 +       spinlock_t *ptl;
48051 +       int err = 0;
48052 +
48053 +       pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
48054 +       if (!pte)
48055 +               return -EAGAIN;
48056 +       arch_enter_lazy_mmu_mode();
48057 +       do {
48058 +               struct page *page = ZERO_PAGE(addr);
48059 +               pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
48060 +
48061 +               if (unlikely(!pte_none(*pte))) {
48062 +                       err = -EEXIST;
48063 +                       pte++;
48064 +                       break;
48065 +               }
48066 +               page_cache_get(page);
48067 +               page_add_file_rmap(page);
48068 +               inc_mm_counter(mm, file_rss);
48069 +               set_pte_at(mm, addr, pte, zero_pte);
48070 +       } while (pte++, addr += PAGE_SIZE, addr != end);
48071 +       arch_leave_lazy_mmu_mode();
48072 +       pte_unmap_unlock(pte - 1, ptl);
48073 +       return err;
48074 +}
48075 +
48076 +static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
48077 +                       unsigned long addr, unsigned long end, pgprot_t prot)
48078 +{
48079 +       pmd_t *pmd;
48080 +       unsigned long next;
48081 +       int err;
48082 +
48083 +       pmd = pmd_alloc(mm, pud, addr);
48084 +       if (!pmd)
48085 +               return -EAGAIN;
48086 +       do {
48087 +               next = pmd_addr_end(addr, end);
48088 +               err = zeromap_pte_range(mm, pmd, addr, next, prot);
48089 +               if (err)
48090 +                       break;
48091 +       } while (pmd++, addr = next, addr != end);
48092 +       return err;
48093 +}
48094 +
48095 +static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
48096 +                       unsigned long addr, unsigned long end, pgprot_t prot)
48097 +{
48098 +       pud_t *pud;
48099 +       unsigned long next;
48100 +       int err;
48101 +
48102 +       pud = pud_alloc(mm, pgd, addr);
48103 +       if (!pud)
48104 +               return -EAGAIN;
48105 +       do {
48106 +               next = pud_addr_end(addr, end);
48107 +               err = zeromap_pmd_range(mm, pud, addr, next, prot);
48108 +               if (err)
48109 +                       break;
48110 +       } while (pud++, addr = next, addr != end);
48111 +       return err;
48112 +}
48113 +
48114 +int zeromap_page_range(struct vm_area_struct *vma,
48115 +                       unsigned long addr, unsigned long size, pgprot_t prot)
48116 +{
48117 +       pgd_t *pgd;
48118 +       unsigned long next;
48119 +       unsigned long end = addr + size;
48120 +       struct mm_struct *mm = vma->vm_mm;
48121 +       int err;
48122 +
48123 +       BUG_ON(addr >= end);
48124 +       pgd = pgd_offset(mm, addr);
48125 +       flush_cache_range(vma, addr, end);
48126 +       do {
48127 +               next = pgd_addr_end(addr, end);
48128 +               err = zeromap_pud_range(mm, pgd, addr, next, prot);
48129 +               if (err)
48130 +                       break;
48131 +       } while (pgd++, addr = next, addr != end);
48132 +       return err;
48133 +}
48134 +
48135 +pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)
48136 +{
48137 +       pgd_t * pgd = pgd_offset(mm, addr);
48138 +       pud_t * pud = pud_alloc(mm, pgd, addr);
48139 +       if (pud) {
48140 +               pmd_t * pmd = pmd_alloc(mm, pud, addr);
48141 +               if (pmd)
48142 +                       return pte_alloc_map_lock(mm, pmd, addr, ptl);
48143 +       }
48144 +       return NULL;
48145 +}
48146 +
48147 +/*
48148 + * This is the old fallback for page remapping.
48149 + *
48150 + * For historical reasons, it only allows reserved pages. Only
48151 + * old drivers should use this, and they needed to mark their
48152 + * pages reserved for the old functions anyway.
48153 + */
48154 +static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot)
48155 +{
48156 +       int retval;
48157 +       pte_t *pte;
48158 +       spinlock_t *ptl;  
48159 +
48160 +       retval = -EINVAL;
48161 +       if (PageAnon(page))
48162 +               goto out;
48163 +       retval = -ENOMEM;
48164 +       flush_dcache_page(page);
48165 +       pte = get_locked_pte(mm, addr, &ptl);
48166 +       if (!pte)
48167 +               goto out;
48168 +       retval = -EBUSY;
48169 +       if (!pte_none(*pte))
48170 +               goto out_unlock;
48171 +
48172 +       /* Ok, finally just insert the thing.. */
48173 +       get_page(page);
48174 +       inc_mm_counter(mm, file_rss);
48175 +       page_add_file_rmap(page);
48176 +       set_pte_at(mm, addr, pte, mk_pte(page, prot));
48177 +
48178 +       retval = 0;
48179 +out_unlock:
48180 +       pte_unmap_unlock(pte, ptl);
48181 +out:
48182 +       return retval;
48183 +}
48184 +
48185 +/**
48186 + * vm_insert_page - insert single page into user vma
48187 + * @vma: user vma to map to
48188 + * @addr: target user address of this page
48189 + * @page: source kernel page
48190 + *
48191 + * This allows drivers to insert individual pages they've allocated
48192 + * into a user vma.
48193 + *
48194 + * The page has to be a nice clean _individual_ kernel allocation.
48195 + * If you allocate a compound page, you need to have marked it as
48196 + * such (__GFP_COMP), or manually just split the page up yourself
48197 + * (see split_page()).
48198 + *
48199 + * NOTE! Traditionally this was done with "remap_pfn_range()" which
48200 + * took an arbitrary page protection parameter. This doesn't allow
48201 + * that. Your vma protection will have to be set up correctly, which
48202 + * means that if you want a shared writable mapping, you'd better
48203 + * ask for a shared writable mapping!
48204 + *
48205 + * The page does not need to be reserved.
48206 + */
48207 +int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page)
48208 +{
48209 +       if (addr < vma->vm_start || addr >= vma->vm_end)
48210 +               return -EFAULT;
48211 +       if (!page_count(page))
48212 +               return -EINVAL;
48213 +       vma->vm_flags |= VM_INSERTPAGE;
48214 +       return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
48215 +}
48216 +EXPORT_SYMBOL(vm_insert_page);
48217 +
48218 +/**
48219 + * vm_insert_pfn - insert single pfn into user vma
48220 + * @vma: user vma to map to
48221 + * @addr: target user address of this page
48222 + * @pfn: source kernel pfn
48223 + *
48224 + * Similar to vm_inert_page, this allows drivers to insert individual pages
48225 + * they've allocated into a user vma. Same comments apply.
48226 + *
48227 + * This function should only be called from a vm_ops->fault handler, and
48228 + * in that case the handler should return NULL.
48229 + */
48230 +int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
48231 +               unsigned long pfn)
48232 +{
48233 +       struct mm_struct *mm = vma->vm_mm;
48234 +       int retval;
48235 +       pte_t *pte, entry;
48236 +       spinlock_t *ptl;
48237 +
48238 +       BUG_ON(!(vma->vm_flags & VM_PFNMAP));
48239 +       BUG_ON(is_cow_mapping(vma->vm_flags));
48240 +
48241 +       retval = -ENOMEM;
48242 +       pte = get_locked_pte(mm, addr, &ptl);
48243 +       if (!pte)
48244 +               goto out;
48245 +       retval = -EBUSY;
48246 +       if (!pte_none(*pte))
48247 +               goto out_unlock;
48248 +
48249 +       /* Ok, finally just insert the thing.. */
48250 +       entry = pfn_pte(pfn, vma->vm_page_prot);
48251 +       set_pte_at(mm, addr, pte, entry);
48252 +       update_mmu_cache(vma, addr, entry);
48253 +
48254 +       retval = 0;
48255 +out_unlock:
48256 +       pte_unmap_unlock(pte, ptl);
48257 +
48258 +out:
48259 +       return retval;
48260 +}
48261 +EXPORT_SYMBOL(vm_insert_pfn);
48262 +
48263 +/*
48264 + * maps a range of physical memory into the requested pages. the old
48265 + * mappings are removed. any references to nonexistent pages results
48266 + * in null mappings (currently treated as "copy-on-access")
48267 + */
48268 +static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
48269 +                       unsigned long addr, unsigned long end,
48270 +                       unsigned long pfn, pgprot_t prot)
48271 +{
48272 +       pte_t *pte;
48273 +       spinlock_t *ptl;
48274 +
48275 +       pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
48276 +       if (!pte)
48277 +               return -ENOMEM;
48278 +       arch_enter_lazy_mmu_mode();
48279 +       do {
48280 +               BUG_ON(!pte_none(*pte));
48281 +               set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
48282 +               pfn++;
48283 +       } while (pte++, addr += PAGE_SIZE, addr != end);
48284 +       arch_leave_lazy_mmu_mode();
48285 +       pte_unmap_unlock(pte - 1, ptl);
48286 +       return 0;
48287 +}
48288 +
48289 +static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
48290 +                       unsigned long addr, unsigned long end,
48291 +                       unsigned long pfn, pgprot_t prot)
48292 +{
48293 +       pmd_t *pmd;
48294 +       unsigned long next;
48295 +
48296 +       pfn -= addr >> PAGE_SHIFT;
48297 +       pmd = pmd_alloc(mm, pud, addr);
48298 +       if (!pmd)
48299 +               return -ENOMEM;
48300 +       do {
48301 +               next = pmd_addr_end(addr, end);
48302 +               if (remap_pte_range(mm, pmd, addr, next,
48303 +                               pfn + (addr >> PAGE_SHIFT), prot))
48304 +                       return -ENOMEM;
48305 +       } while (pmd++, addr = next, addr != end);
48306 +       return 0;
48307 +}
48308 +
48309 +static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
48310 +                       unsigned long addr, unsigned long end,
48311 +                       unsigned long pfn, pgprot_t prot)
48312 +{
48313 +       pud_t *pud;
48314 +       unsigned long next;
48315 +
48316 +       pfn -= addr >> PAGE_SHIFT;
48317 +       pud = pud_alloc(mm, pgd, addr);
48318 +       if (!pud)
48319 +               return -ENOMEM;
48320 +       do {
48321 +               next = pud_addr_end(addr, end);
48322 +               if (remap_pmd_range(mm, pud, addr, next,
48323 +                               pfn + (addr >> PAGE_SHIFT), prot))
48324 +                       return -ENOMEM;
48325 +       } while (pud++, addr = next, addr != end);
48326 +       return 0;
48327 +}
48328 +
48329 +/**
48330 + * remap_pfn_range - remap kernel memory to userspace
48331 + * @vma: user vma to map to
48332 + * @addr: target user address to start at
48333 + * @pfn: physical address of kernel memory
48334 + * @size: size of map area
48335 + * @prot: page protection flags for this mapping
48336 + *
48337 + *  Note: this is only safe if the mm semaphore is held when called.
48338 + */
48339 +int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
48340 +                   unsigned long pfn, unsigned long size, pgprot_t prot)
48341 +{
48342 +       pgd_t *pgd;
48343 +       unsigned long next;
48344 +       unsigned long end = addr + PAGE_ALIGN(size);
48345 +       struct mm_struct *mm = vma->vm_mm;
48346 +       int err;
48347 +
48348 +       /*
48349 +        * Physically remapped pages are special. Tell the
48350 +        * rest of the world about it:
48351 +        *   VM_IO tells people not to look at these pages
48352 +        *      (accesses can have side effects).
48353 +        *   VM_RESERVED is specified all over the place, because
48354 +        *      in 2.4 it kept swapout's vma scan off this vma; but
48355 +        *      in 2.6 the LRU scan won't even find its pages, so this
48356 +        *      flag means no more than count its pages in reserved_vm,
48357 +        *      and omit it from core dump, even when VM_IO turned off.
48358 +        *   VM_PFNMAP tells the core MM that the base pages are just
48359 +        *      raw PFN mappings, and do not have a "struct page" associated
48360 +        *      with them.
48361 +        *
48362 +        * There's a horrible special case to handle copy-on-write
48363 +        * behaviour that some programs depend on. We mark the "original"
48364 +        * un-COW'ed pages by matching them up with "vma->vm_pgoff".
48365 +        */
48366 +       if (is_cow_mapping(vma->vm_flags)) {
48367 +               if (addr != vma->vm_start || end != vma->vm_end)
48368 +                       return -EINVAL;
48369 +               vma->vm_pgoff = pfn;
48370 +       }
48371 +
48372 +       vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
48373 +
48374 +       BUG_ON(addr >= end);
48375 +       pfn -= addr >> PAGE_SHIFT;
48376 +       pgd = pgd_offset(mm, addr);
48377 +       flush_cache_range(vma, addr, end);
48378 +       do {
48379 +               next = pgd_addr_end(addr, end);
48380 +               err = remap_pud_range(mm, pgd, addr, next,
48381 +                               pfn + (addr >> PAGE_SHIFT), prot);
48382 +               if (err)
48383 +                       break;
48384 +       } while (pgd++, addr = next, addr != end);
48385 +       return err;
48386 +}
48387 +EXPORT_SYMBOL(remap_pfn_range);
48388 +
48389 +static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
48390 +                                    unsigned long addr, unsigned long end,
48391 +                                    pte_fn_t fn, void *data)
48392 +{
48393 +       pte_t *pte;
48394 +       int err;
48395 +       struct page *pmd_page;
48396 +       spinlock_t *uninitialized_var(ptl);
48397 +
48398 +       pte = (mm == &init_mm) ?
48399 +               pte_alloc_kernel(pmd, addr) :
48400 +               pte_alloc_map_lock(mm, pmd, addr, &ptl);
48401 +       if (!pte)
48402 +               return -ENOMEM;
48403 +
48404 +       BUG_ON(pmd_huge(*pmd));
48405 +
48406 +       pmd_page = pmd_page(*pmd);
48407 +
48408 +       do {
48409 +               err = fn(pte, pmd_page, addr, data);
48410 +               if (err)
48411 +                       break;
48412 +       } while (pte++, addr += PAGE_SIZE, addr != end);
48413 +
48414 +       if (mm != &init_mm)
48415 +               pte_unmap_unlock(pte-1, ptl);
48416 +       return err;
48417 +}
48418 +
48419 +static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
48420 +                                    unsigned long addr, unsigned long end,
48421 +                                    pte_fn_t fn, void *data)
48422 +{
48423 +       pmd_t *pmd;
48424 +       unsigned long next;
48425 +       int err;
48426 +
48427 +       pmd = pmd_alloc(mm, pud, addr);
48428 +       if (!pmd)
48429 +               return -ENOMEM;
48430 +       do {
48431 +               next = pmd_addr_end(addr, end);
48432 +               err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
48433 +               if (err)
48434 +                       break;
48435 +       } while (pmd++, addr = next, addr != end);
48436 +       return err;
48437 +}
48438 +
48439 +static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
48440 +                                    unsigned long addr, unsigned long end,
48441 +                                    pte_fn_t fn, void *data)
48442 +{
48443 +       pud_t *pud;
48444 +       unsigned long next;
48445 +       int err;
48446 +
48447 +       pud = pud_alloc(mm, pgd, addr);
48448 +       if (!pud)
48449 +               return -ENOMEM;
48450 +       do {
48451 +               next = pud_addr_end(addr, end);
48452 +               err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
48453 +               if (err)
48454 +                       break;
48455 +       } while (pud++, addr = next, addr != end);
48456 +       return err;
48457 +}
48458 +
48459 +/*
48460 + * Scan a region of virtual memory, filling in page tables as necessary
48461 + * and calling a provided function on each leaf page table.
48462 + */
48463 +int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
48464 +                       unsigned long size, pte_fn_t fn, void *data)
48465 +{
48466 +       pgd_t *pgd;
48467 +       unsigned long next;
48468 +       unsigned long end = addr + size;
48469 +       int err;
48470 +
48471 +       BUG_ON(addr >= end);
48472 +       pgd = pgd_offset(mm, addr);
48473 +       do {
48474 +               next = pgd_addr_end(addr, end);
48475 +               err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
48476 +               if (err)
48477 +                       break;
48478 +       } while (pgd++, addr = next, addr != end);
48479 +       return err;
48480 +}
48481 +EXPORT_SYMBOL_GPL(apply_to_page_range);
48482 +
48483 +/*
48484 + * handle_pte_fault chooses page fault handler according to an entry
48485 + * which was read non-atomically.  Before making any commitment, on
48486 + * those architectures or configurations (e.g. i386 with PAE) which
48487 + * might give a mix of unmatched parts, do_swap_page and do_file_page
48488 + * must check under lock before unmapping the pte and proceeding
48489 + * (but do_wp_page is only called after already making such a check;
48490 + * and do_anonymous_page and do_no_page can safely check later on).
48491 + */
48492 +static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
48493 +                               pte_t *page_table, pte_t orig_pte)
48494 +{
48495 +       int same = 1;
48496 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
48497 +       if (sizeof(pte_t) > sizeof(unsigned long)) {
48498 +               spinlock_t *ptl = pte_lockptr(mm, pmd);
48499 +               spin_lock(ptl);
48500 +               same = pte_same(*page_table, orig_pte);
48501 +               spin_unlock(ptl);
48502 +       }
48503 +#endif
48504 +       pte_unmap(page_table);
48505 +       return same;
48506 +}
48507 +
48508 +/*
48509 + * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
48510 + * servicing faults for write access.  In the normal case, do always want
48511 + * pte_mkwrite.  But get_user_pages can cause write faults for mappings
48512 + * that do not have writing enabled, when used by access_process_vm.
48513 + */
48514 +static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
48515 +{
48516 +       if (likely(vma->vm_flags & VM_WRITE))
48517 +               pte = pte_mkwrite(pte);
48518 +       return pte;
48519 +}
48520 +
48521 +static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
48522 +{
48523 +       /*
48524 +        * If the source page was a PFN mapping, we don't have
48525 +        * a "struct page" for it. We do a best-effort copy by
48526 +        * just copying from the original user address. If that
48527 +        * fails, we just zero-fill it. Live with it.
48528 +        */
48529 +       if (unlikely(!src)) {
48530 +               void *kaddr = kmap_atomic(dst, KM_USER0);
48531 +               void __user *uaddr = (void __user *)(va & PAGE_MASK);
48532 +
48533 +               /*
48534 +                * This really shouldn't fail, because the page is there
48535 +                * in the page tables. But it might just be unreadable,
48536 +                * in which case we just give up and fill the result with
48537 +                * zeroes.
48538 +                */
48539 +               if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
48540 +                       memset(kaddr, 0, PAGE_SIZE);
48541 +               kunmap_atomic(kaddr, KM_USER0);
48542 +               flush_dcache_page(dst);
48543 +               return;
48544 +
48545 +       }
48546 +       copy_user_highpage(dst, src, va, vma);
48547 +}
48548 +
48549 +/*
48550 + * This routine handles present pages, when users try to write
48551 + * to a shared page. It is done by copying the page to a new address
48552 + * and decrementing the shared-page counter for the old page.
48553 + *
48554 + * Note that this routine assumes that the protection checks have been
48555 + * done by the caller (the low-level page fault routine in most cases).
48556 + * Thus we can safely just mark it writable once we've done any necessary
48557 + * COW.
48558 + *
48559 + * We also mark the page dirty at this point even though the page will
48560 + * change only once the write actually happens. This avoids a few races,
48561 + * and potentially makes it more efficient.
48562 + *
48563 + * We enter with non-exclusive mmap_sem (to exclude vma changes,
48564 + * but allow concurrent faults), with pte both mapped and locked.
48565 + * We return with mmap_sem still held, but pte unmapped and unlocked.
48566 + */
48567 +static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
48568 +               unsigned long address, pte_t *page_table, pmd_t *pmd,
48569 +               spinlock_t *ptl, pte_t orig_pte)
48570 +{
48571 +       struct page *old_page, *new_page;
48572 +       pte_t entry;
48573 +       int reuse = 0, ret = VM_FAULT_MINOR;
48574 +       struct page *dirty_page = NULL;
48575 +
48576 +       old_page = vm_normal_page(vma, address, orig_pte);
48577 +       if (!old_page)
48578 +               goto gotten;
48579 +
48580 +       /*
48581 +        * Take out anonymous pages first, anonymous shared vmas are
48582 +        * not dirty accountable.
48583 +        */
48584 +       if (PageAnon(old_page)) {
48585 +               if (!TestSetPageLocked(old_page)) {
48586 +                       reuse = can_share_swap_page(old_page);
48587 +                       unlock_page(old_page);
48588 +               }
48589 +       } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
48590 +                                       (VM_WRITE|VM_SHARED))) {
48591 +               /*
48592 +                * Only catch write-faults on shared writable pages,
48593 +                * read-only shared pages can get COWed by
48594 +                * get_user_pages(.write=1, .force=1).
48595 +                */
48596 +               if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
48597 +                       /*
48598 +                        * Notify the address space that the page is about to
48599 +                        * become writable so that it can prohibit this or wait
48600 +                        * for the page to get into an appropriate state.
48601 +                        *
48602 +                        * We do this without the lock held, so that it can
48603 +                        * sleep if it needs to.
48604 +                        */
48605 +                       page_cache_get(old_page);
48606 +                       pte_unmap_unlock(page_table, ptl);
48607 +
48608 +                       if (vma->vm_ops->page_mkwrite(vma, old_page) < 0)
48609 +                               goto unwritable_page;
48610 +
48611 +                       /*
48612 +                        * Since we dropped the lock we need to revalidate
48613 +                        * the PTE as someone else may have changed it.  If
48614 +                        * they did, we just return, as we can count on the
48615 +                        * MMU to tell us if they didn't also make it writable.
48616 +                        */
48617 +                       page_table = pte_offset_map_lock(mm, pmd, address,
48618 +                                                        &ptl);
48619 +                       page_cache_release(old_page);
48620 +                       if (!pte_same(*page_table, orig_pte))
48621 +                               goto unlock;
48622 +               }
48623 +               dirty_page = old_page;
48624 +               get_page(dirty_page);
48625 +               reuse = 1;
48626 +       }
48627 +
48628 +       if (reuse) {
48629 +               flush_cache_page(vma, address, pte_pfn(orig_pte));
48630 +               entry = pte_mkyoung(orig_pte);
48631 +               entry = maybe_mkwrite(pte_mkdirty(entry), vma);
48632 +               if (ptep_set_access_flags(vma, address, page_table, entry,1)) {
48633 +                       update_mmu_cache(vma, address, entry);
48634 +                       lazy_mmu_prot_update(entry);
48635 +               }
48636 +               ret |= VM_FAULT_WRITE;
48637 +               goto unlock;
48638 +       }
48639 +
48640 +       /*
48641 +        * Ok, we need to copy. Oh, well..
48642 +        */
48643 +       page_cache_get(old_page);
48644 +gotten:
48645 +       pte_unmap_unlock(page_table, ptl);
48646 +
48647 +       if (unlikely(anon_vma_prepare(vma)))
48648 +               goto oom;
48649 +       if (old_page == ZERO_PAGE(address)) {
48650 +               new_page = alloc_zeroed_user_highpage(vma, address);
48651 +               if (!new_page)
48652 +                       goto oom;
48653 +       } else {
48654 +               new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
48655 +               if (!new_page)
48656 +                       goto oom;
48657 +               cow_user_page(new_page, old_page, address, vma);
48658 +       }
48659 +
48660 +       /*
48661 +        * Re-check the pte - we dropped the lock
48662 +        */
48663 +       page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
48664 +       if (likely(pte_same(*page_table, orig_pte))) {
48665 +               if (old_page) {
48666 +                       page_remove_rmap(old_page, vma);
48667 +                       if (!PageAnon(old_page)) {
48668 +                               dec_mm_counter(mm, file_rss);
48669 +                               inc_mm_counter(mm, anon_rss);
48670 +                       }
48671 +               } else
48672 +                       inc_mm_counter(mm, anon_rss);
48673 +               flush_cache_page(vma, address, pte_pfn(orig_pte));
48674 +               entry = mk_pte(new_page, vma->vm_page_prot);
48675 +               entry = maybe_mkwrite(pte_mkdirty(entry), vma);
48676 +               lazy_mmu_prot_update(entry);
48677 +               /*
48678 +                * Clear the pte entry and flush it first, before updating the
48679 +                * pte with the new entry. This will avoid a race condition
48680 +                * seen in the presence of one thread doing SMC and another
48681 +                * thread doing COW.
48682 +                */
48683 +               ptep_clear_flush(vma, address, page_table);
48684 +               set_pte_at(mm, address, page_table, entry);
48685 +               update_mmu_cache(vma, address, entry);
48686 +               lru_cache_add_active(new_page);
48687 +               page_add_new_anon_rmap(new_page, vma, address);
48688 +
48689 +               /* Free the old page.. */
48690 +               new_page = old_page;
48691 +               ret |= VM_FAULT_WRITE;
48692 +       }
48693 +       if (new_page)
48694 +               page_cache_release(new_page);
48695 +       if (old_page)
48696 +               page_cache_release(old_page);
48697 +unlock:
48698 +       pte_unmap_unlock(page_table, ptl);
48699 +       if (dirty_page) {
48700 +               set_page_dirty_balance(dirty_page);
48701 +               put_page(dirty_page);
48702 +       }
48703 +       return ret;
48704 +oom:
48705 +       if (old_page)
48706 +               page_cache_release(old_page);
48707 +       return VM_FAULT_OOM;
48708 +
48709 +unwritable_page:
48710 +       page_cache_release(old_page);
48711 +       return VM_FAULT_SIGBUS;
48712 +}
48713 +
48714 +/*
48715 + * Helper functions for unmap_mapping_range().
48716 + *
48717 + * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
48718 + *
48719 + * We have to restart searching the prio_tree whenever we drop the lock,
48720 + * since the iterator is only valid while the lock is held, and anyway
48721 + * a later vma might be split and reinserted earlier while lock dropped.
48722 + *
48723 + * The list of nonlinear vmas could be handled more efficiently, using
48724 + * a placeholder, but handle it in the same way until a need is shown.
48725 + * It is important to search the prio_tree before nonlinear list: a vma
48726 + * may become nonlinear and be shifted from prio_tree to nonlinear list
48727 + * while the lock is dropped; but never shifted from list to prio_tree.
48728 + *
48729 + * In order to make forward progress despite restarting the search,
48730 + * vm_truncate_count is used to mark a vma as now dealt with, so we can
48731 + * quickly skip it next time around.  Since the prio_tree search only
48732 + * shows us those vmas affected by unmapping the range in question, we
48733 + * can't efficiently keep all vmas in step with mapping->truncate_count:
48734 + * so instead reset them all whenever it wraps back to 0 (then go to 1).
48735 + * mapping->truncate_count and vma->vm_truncate_count are protected by
48736 + * i_mmap_lock.
48737 + *
48738 + * In order to make forward progress despite repeatedly restarting some
48739 + * large vma, note the restart_addr from unmap_vmas when it breaks out:
48740 + * and restart from that address when we reach that vma again.  It might
48741 + * have been split or merged, shrunk or extended, but never shifted: so
48742 + * restart_addr remains valid so long as it remains in the vma's range.
48743 + * unmap_mapping_range forces truncate_count to leap over page-aligned
48744 + * values so we can save vma's restart_addr in its truncate_count field.
48745 + */
48746 +#define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
48747 +
48748 +static void reset_vma_truncate_counts(struct address_space *mapping)
48749 +{
48750 +       struct vm_area_struct *vma;
48751 +       struct prio_tree_iter iter;
48752 +
48753 +       vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
48754 +               vma->vm_truncate_count = 0;
48755 +       list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
48756 +               vma->vm_truncate_count = 0;
48757 +}
48758 +
48759 +static int unmap_mapping_range_vma(struct vm_area_struct *vma,
48760 +               unsigned long start_addr, unsigned long end_addr,
48761 +               struct zap_details *details)
48762 +{
48763 +       unsigned long restart_addr;
48764 +       int need_break;
48765 +
48766 +again:
48767 +       restart_addr = vma->vm_truncate_count;
48768 +       if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
48769 +               start_addr = restart_addr;
48770 +               if (start_addr >= end_addr) {
48771 +                       /* Top of vma has been split off since last time */
48772 +                       vma->vm_truncate_count = details->truncate_count;
48773 +                       return 0;
48774 +               }
48775 +       }
48776 +
48777 +       restart_addr = zap_page_range(vma, start_addr,
48778 +                                       end_addr - start_addr, details);
48779 +       need_break = need_resched() ||
48780 +                       need_lockbreak(details->i_mmap_lock);
48781 +
48782 +       if (restart_addr >= end_addr) {
48783 +               /* We have now completed this vma: mark it so */
48784 +               vma->vm_truncate_count = details->truncate_count;
48785 +               if (!need_break)
48786 +                       return 0;
48787 +       } else {
48788 +               /* Note restart_addr in vma's truncate_count field */
48789 +               vma->vm_truncate_count = restart_addr;
48790 +               if (!need_break)
48791 +                       goto again;
48792 +       }
48793 +
48794 +       spin_unlock(details->i_mmap_lock);
48795 +       cond_resched();
48796 +       spin_lock(details->i_mmap_lock);
48797 +       return -EINTR;
48798 +}
48799 +
48800 +static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
48801 +                                           struct zap_details *details)
48802 +{
48803 +       struct vm_area_struct *vma;
48804 +       struct prio_tree_iter iter;
48805 +       pgoff_t vba, vea, zba, zea;
48806 +
48807 +restart:
48808 +       vma_prio_tree_foreach(vma, &iter, root,
48809 +                       details->first_index, details->last_index) {
48810 +               /* Skip quickly over those we have already dealt with */
48811 +               if (vma->vm_truncate_count == details->truncate_count)
48812 +                       continue;
48813 +
48814 +               vba = vma->vm_pgoff;
48815 +               vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
48816 +               /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
48817 +               zba = details->first_index;
48818 +               if (zba < vba)
48819 +                       zba = vba;
48820 +               zea = details->last_index;
48821 +               if (zea > vea)
48822 +                       zea = vea;
48823 +
48824 +               if (unmap_mapping_range_vma(vma,
48825 +                       ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
48826 +                       ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
48827 +                               details) < 0)
48828 +                       goto restart;
48829 +       }
48830 +}
48831 +
48832 +static inline void unmap_mapping_range_list(struct list_head *head,
48833 +                                           struct zap_details *details)
48834 +{
48835 +       struct vm_area_struct *vma;
48836 +
48837 +       /*
48838 +        * In nonlinear VMAs there is no correspondence between virtual address
48839 +        * offset and file offset.  So we must perform an exhaustive search
48840 +        * across *all* the pages in each nonlinear VMA, not just the pages
48841 +        * whose virtual address lies outside the file truncation point.
48842 +        */
48843 +restart:
48844 +       list_for_each_entry(vma, head, shared.vm_set.list) {
48845 +               /* Skip quickly over those we have already dealt with */
48846 +               if (vma->vm_truncate_count == details->truncate_count)
48847 +                       continue;
48848 +               details->nonlinear_vma = vma;
48849 +               if (unmap_mapping_range_vma(vma, vma->vm_start,
48850 +                                       vma->vm_end, details) < 0)
48851 +                       goto restart;
48852 +       }
48853 +}
48854 +
48855 +/**
48856 + * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
48857 + * @mapping: the address space containing mmaps to be unmapped.
48858 + * @holebegin: byte in first page to unmap, relative to the start of
48859 + * the underlying file.  This will be rounded down to a PAGE_SIZE
48860 + * boundary.  Note that this is different from vmtruncate(), which
48861 + * must keep the partial page.  In contrast, we must get rid of
48862 + * partial pages.
48863 + * @holelen: size of prospective hole in bytes.  This will be rounded
48864 + * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
48865 + * end of the file.
48866 + * @even_cows: 1 when truncating a file, unmap even private COWed pages;
48867 + * but 0 when invalidating pagecache, don't throw away private data.
48868 + */
48869 +void unmap_mapping_range(struct address_space *mapping,
48870 +               loff_t const holebegin, loff_t const holelen, int even_cows)
48871 +{
48872 +       struct zap_details details;
48873 +       pgoff_t hba = holebegin >> PAGE_SHIFT;
48874 +       pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
48875 +
48876 +       /* Check for overflow. */
48877 +       if (sizeof(holelen) > sizeof(hlen)) {
48878 +               long long holeend =
48879 +                       (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
48880 +               if (holeend & ~(long long)ULONG_MAX)
48881 +                       hlen = ULONG_MAX - hba + 1;
48882 +       }
48883 +
48884 +       details.check_mapping = even_cows? NULL: mapping;
48885 +       details.nonlinear_vma = NULL;
48886 +       details.first_index = hba;
48887 +       details.last_index = hba + hlen - 1;
48888 +       if (details.last_index < details.first_index)
48889 +               details.last_index = ULONG_MAX;
48890 +       details.i_mmap_lock = &mapping->i_mmap_lock;
48891 +
48892 +       spin_lock(&mapping->i_mmap_lock);
48893 +
48894 +       /* serialize i_size write against truncate_count write */
48895 +       smp_wmb();
48896 +       /* Protect against page faults, and endless unmapping loops */
48897 +       mapping->truncate_count++;
48898 +       /*
48899 +        * For archs where spin_lock has inclusive semantics like ia64
48900 +        * this smp_mb() will prevent to read pagetable contents
48901 +        * before the truncate_count increment is visible to
48902 +        * other cpus.
48903 +        */
48904 +       smp_mb();
48905 +       if (unlikely(is_restart_addr(mapping->truncate_count))) {
48906 +               if (mapping->truncate_count == 0)
48907 +                       reset_vma_truncate_counts(mapping);
48908 +               mapping->truncate_count++;
48909 +       }
48910 +       details.truncate_count = mapping->truncate_count;
48911 +
48912 +       if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
48913 +               unmap_mapping_range_tree(&mapping->i_mmap, &details);
48914 +       if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
48915 +               unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
48916 +       spin_unlock(&mapping->i_mmap_lock);
48917 +}
48918 +EXPORT_SYMBOL(unmap_mapping_range);
48919 +
48920 +/**
48921 + * vmtruncate - unmap mappings "freed" by truncate() syscall
48922 + * @inode: inode of the file used
48923 + * @offset: file offset to start truncating
48924 + *
48925 + * NOTE! We have to be ready to update the memory sharing
48926 + * between the file and the memory map for a potential last
48927 + * incomplete page.  Ugly, but necessary.
48928 + */
48929 +int vmtruncate(struct inode * inode, loff_t offset)
48930 +{
48931 +       struct address_space *mapping = inode->i_mapping;
48932 +       unsigned long limit;
48933 +
48934 +       if (inode->i_size < offset)
48935 +               goto do_expand;
48936 +       /*
48937 +        * truncation of in-use swapfiles is disallowed - it would cause
48938 +        * subsequent swapout to scribble on the now-freed blocks.
48939 +        */
48940 +       if (IS_SWAPFILE(inode))
48941 +               goto out_busy;
48942 +       i_size_write(inode, offset);
48943 +       unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
48944 +       truncate_inode_pages(mapping, offset);
48945 +       goto out_truncate;
48946 +
48947 +do_expand:
48948 +       limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
48949 +       if (limit != RLIM_INFINITY && offset > limit)
48950 +               goto out_sig;
48951 +       if (offset > inode->i_sb->s_maxbytes)
48952 +               goto out_big;
48953 +       i_size_write(inode, offset);
48954 +
48955 +out_truncate:
48956 +       if (inode->i_op && inode->i_op->truncate)
48957 +               inode->i_op->truncate(inode);
48958 +       return 0;
48959 +out_sig:
48960 +       send_sig(SIGXFSZ, current, 0);
48961 +out_big:
48962 +       return -EFBIG;
48963 +out_busy:
48964 +       return -ETXTBSY;
48965 +}
48966 +EXPORT_SYMBOL(vmtruncate);
48967 +
48968 +int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
48969 +{
48970 +       struct address_space *mapping = inode->i_mapping;
48971 +
48972 +       /*
48973 +        * If the underlying filesystem is not going to provide
48974 +        * a way to truncate a range of blocks (punch a hole) -
48975 +        * we should return failure right now.
48976 +        */
48977 +       if (!inode->i_op || !inode->i_op->truncate_range)
48978 +               return -ENOSYS;
48979 +
48980 +       mutex_lock(&inode->i_mutex);
48981 +       down_write(&inode->i_alloc_sem);
48982 +       unmap_mapping_range(mapping, offset, (end - offset), 1);
48983 +       truncate_inode_pages_range(mapping, offset, end);
48984 +       inode->i_op->truncate_range(inode, offset, end);
48985 +       up_write(&inode->i_alloc_sem);
48986 +       mutex_unlock(&inode->i_mutex);
48987 +
48988 +       return 0;
48989 +}
48990 +
48991 +/**
48992 + * swapin_readahead - swap in pages in hope we need them soon
48993 + * @entry: swap entry of this memory
48994 + * @addr: address to start
48995 + * @vma: user vma this addresses belong to
48996 + *
48997 + * Primitive swap readahead code. We simply read an aligned block of
48998 + * (1 << page_cluster) entries in the swap area. This method is chosen
48999 + * because it doesn't cost us any seek time.  We also make sure to queue
49000 + * the 'original' request together with the readahead ones...
49001 + *
49002 + * This has been extended to use the NUMA policies from the mm triggering
49003 + * the readahead.
49004 + *
49005 + * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
49006 + */
49007 +void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
49008 +{
49009 +#ifdef CONFIG_NUMA
49010 +       struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
49011 +#endif
49012 +       int i, num;
49013 +       struct page *new_page;
49014 +       unsigned long offset;
49015 +
49016 +       /*
49017 +        * Get the number of handles we should do readahead io to.
49018 +        */
49019 +       num = valid_swaphandles(entry, &offset);
49020 +       for (i = 0; i < num; offset++, i++) {
49021 +               /* Ok, do the async read-ahead now */
49022 +               new_page = read_swap_cache_async(swp_entry(swp_type(entry),
49023 +                                                          offset), vma, addr);
49024 +               if (!new_page)
49025 +                       break;
49026 +               page_cache_release(new_page);
49027 +#ifdef CONFIG_NUMA
49028 +               /*
49029 +                * Find the next applicable VMA for the NUMA policy.
49030 +                */
49031 +               addr += PAGE_SIZE;
49032 +               if (addr == 0)
49033 +                       vma = NULL;
49034 +               if (vma) {
49035 +                       if (addr >= vma->vm_end) {
49036 +                               vma = next_vma;
49037 +                               next_vma = vma ? vma->vm_next : NULL;
49038 +                       }
49039 +                       if (vma && addr < vma->vm_start)
49040 +                               vma = NULL;
49041 +               } else {
49042 +                       if (next_vma && addr >= next_vma->vm_start) {
49043 +                               vma = next_vma;
49044 +                               next_vma = vma->vm_next;
49045 +                       }
49046 +               }
49047 +#endif
49048 +       }
49049 +       lru_add_drain();        /* Push any new pages onto the LRU now */
49050 +}
49051 +
49052 +/*
49053 + * We enter with non-exclusive mmap_sem (to exclude vma changes,
49054 + * but allow concurrent faults), and pte mapped but not yet locked.
49055 + * We return with mmap_sem still held, but pte unmapped and unlocked.
49056 + */
49057 +static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
49058 +               unsigned long address, pte_t *page_table, pmd_t *pmd,
49059 +               int write_access, pte_t orig_pte)
49060 +{
49061 +       spinlock_t *ptl;
49062 +       struct page *page;
49063 +       swp_entry_t entry;
49064 +       pte_t pte;
49065 +       int ret = VM_FAULT_MINOR;
49066 +
49067 +       if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
49068 +               goto out;
49069 +
49070 +       entry = pte_to_swp_entry(orig_pte);
49071 +       if (is_migration_entry(entry)) {
49072 +               migration_entry_wait(mm, pmd, address);
49073 +               goto out;
49074 +       }
49075 +       delayacct_set_flag(DELAYACCT_PF_SWAPIN);
49076 +       page = lookup_swap_cache(entry);
49077 +       if (!page) {
49078 +               grab_swap_token(); /* Contend for token _before_ read-in */
49079 +               swapin_readahead(entry, address, vma);
49080 +               page = read_swap_cache_async(entry, vma, address);
49081 +               if (!page) {
49082 +                       /*
49083 +                        * Back out if somebody else faulted in this pte
49084 +                        * while we released the pte lock.
49085 +                        */
49086 +                       page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
49087 +                       if (likely(pte_same(*page_table, orig_pte)))
49088 +                               ret = VM_FAULT_OOM;
49089 +                       delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
49090 +                       goto unlock;
49091 +               }
49092 +
49093 +               /* Had to read the page from swap area: Major fault */
49094 +               ret = VM_FAULT_MAJOR;
49095 +               count_vm_event(PGMAJFAULT);
49096 +       }
49097 +
49098 +       if (!vx_rss_avail(mm, 1)) {
49099 +               ret = VM_FAULT_OOM;
49100 +               goto out;
49101 +       }
49102 +
49103 +       delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
49104 +       mark_page_accessed(page);
49105 +       lock_page(page);
49106 +
49107 +       /*
49108 +        * Back out if somebody else already faulted in this pte.
49109 +        */
49110 +       page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
49111 +       if (unlikely(!pte_same(*page_table, orig_pte)))
49112 +               goto out_nomap;
49113 +
49114 +       if (unlikely(!PageUptodate(page))) {
49115 +               ret = VM_FAULT_SIGBUS;
49116 +               goto out_nomap;
49117 +       }
49118 +
49119 +       /* The page isn't present yet, go ahead with the fault. */
49120 +
49121 +       inc_mm_counter(mm, anon_rss);
49122 +       pte = mk_pte(page, vma->vm_page_prot);
49123 +       if (write_access && can_share_swap_page(page)) {
49124 +               pte = maybe_mkwrite(pte_mkdirty(pte), vma);
49125 +               write_access = 0;
49126 +       }
49127 +
49128 +       flush_icache_page(vma, page);
49129 +       set_pte_at(mm, address, page_table, pte);
49130 +       page_add_anon_rmap(page, vma, address);
49131 +
49132 +       swap_free(entry);
49133 +       if (vm_swap_full())
49134 +               remove_exclusive_swap_page(page);
49135 +       unlock_page(page);
49136 +
49137 +       if (write_access) {
49138 +               if (do_wp_page(mm, vma, address,
49139 +                               page_table, pmd, ptl, pte) == VM_FAULT_OOM)
49140 +                       ret = VM_FAULT_OOM;
49141 +               goto out;
49142 +       }
49143 +
49144 +       /* No need to invalidate - it was non-present before */
49145 +       update_mmu_cache(vma, address, pte);
49146 +       lazy_mmu_prot_update(pte);
49147 +unlock:
49148 +       pte_unmap_unlock(page_table, ptl);
49149 +out:
49150 +       return ret;
49151 +out_nomap:
49152 +       pte_unmap_unlock(page_table, ptl);
49153 +       unlock_page(page);
49154 +       page_cache_release(page);
49155 +       return ret;
49156 +}
49157 +
49158 +/*
49159 + * We enter with non-exclusive mmap_sem (to exclude vma changes,
49160 + * but allow concurrent faults), and pte mapped but not yet locked.
49161 + * We return with mmap_sem still held, but pte unmapped and unlocked.
49162 + */
49163 +static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
49164 +               unsigned long address, pte_t *page_table, pmd_t *pmd,
49165 +               int write_access)
49166 +{
49167 +       struct page *page;
49168 +       spinlock_t *ptl;
49169 +       pte_t entry;
49170 +
49171 +       if (write_access) {
49172 +               /* Allocate our own private page. */
49173 +               pte_unmap(page_table);
49174 +
49175 +               if (!vx_rss_avail(mm, 1))
49176 +                       goto oom;
49177 +               if (unlikely(anon_vma_prepare(vma)))
49178 +                       goto oom;
49179 +               page = alloc_zeroed_user_highpage(vma, address);
49180 +               if (!page)
49181 +                       goto oom;
49182 +
49183 +               entry = mk_pte(page, vma->vm_page_prot);
49184 +               entry = maybe_mkwrite(pte_mkdirty(entry), vma);
49185 +
49186 +               page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
49187 +               if (!pte_none(*page_table))
49188 +                       goto release;
49189 +               inc_mm_counter(mm, anon_rss);
49190 +               lru_cache_add_active(page);
49191 +               page_add_new_anon_rmap(page, vma, address);
49192 +       } else {
49193 +               /* Map the ZERO_PAGE - vm_page_prot is readonly */
49194 +               page = ZERO_PAGE(address);
49195 +               page_cache_get(page);
49196 +               entry = mk_pte(page, vma->vm_page_prot);
49197 +
49198 +               ptl = pte_lockptr(mm, pmd);
49199 +               spin_lock(ptl);
49200 +               if (!pte_none(*page_table))
49201 +                       goto release;
49202 +               inc_mm_counter(mm, file_rss);
49203 +               page_add_file_rmap(page);
49204 +       }
49205 +
49206 +       set_pte_at(mm, address, page_table, entry);
49207 +
49208 +       /* No need to invalidate - it was non-present before */
49209 +       update_mmu_cache(vma, address, entry);
49210 +       lazy_mmu_prot_update(entry);
49211 +unlock:
49212 +       pte_unmap_unlock(page_table, ptl);
49213 +       return VM_FAULT_MINOR;
49214 +release:
49215 +       page_cache_release(page);
49216 +       goto unlock;
49217 +oom:
49218 +       return VM_FAULT_OOM;
49219 +}
49220 +
49221 +/*
49222 + * do_no_page() tries to create a new page mapping. It aggressively
49223 + * tries to share with existing pages, but makes a separate copy if
49224 + * the "write_access" parameter is true in order to avoid the next
49225 + * page fault.
49226 + *
49227 + * As this is called only for pages that do not currently exist, we
49228 + * do not need to flush old virtual caches or the TLB.
49229 + *
49230 + * We enter with non-exclusive mmap_sem (to exclude vma changes,
49231 + * but allow concurrent faults), and pte mapped but not yet locked.
49232 + * We return with mmap_sem still held, but pte unmapped and unlocked.
49233 + */
49234 +static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
49235 +               unsigned long address, pte_t *page_table, pmd_t *pmd,
49236 +               int write_access)
49237 +{
49238 +       spinlock_t *ptl;
49239 +       struct page *new_page;
49240 +       struct address_space *mapping = NULL;
49241 +       pte_t entry;
49242 +       unsigned int sequence = 0;
49243 +       int ret = VM_FAULT_MINOR;
49244 +       int anon = 0;
49245 +       struct page *dirty_page = NULL;
49246 +
49247 +       pte_unmap(page_table);
49248 +       BUG_ON(vma->vm_flags & VM_PFNMAP);
49249 +
49250 +       if (!vx_rss_avail(mm, 1))
49251 +               return VM_FAULT_OOM;
49252 +
49253 +       if (vma->vm_file) {
49254 +               mapping = vma->vm_file->f_mapping;
49255 +               sequence = mapping->truncate_count;
49256 +               smp_rmb(); /* serializes i_size against truncate_count */
49257 +       }
49258 +retry:
49259 +       new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
49260 +       /*
49261 +        * No smp_rmb is needed here as long as there's a full
49262 +        * spin_lock/unlock sequence inside the ->nopage callback
49263 +        * (for the pagecache lookup) that acts as an implicit
49264 +        * smp_mb() and prevents the i_size read to happen
49265 +        * after the next truncate_count read.
49266 +        */
49267 +
49268 +       /* no page was available -- either SIGBUS, OOM or REFAULT */
49269 +       if (unlikely(new_page == NOPAGE_SIGBUS))
49270 +               return VM_FAULT_SIGBUS;
49271 +       else if (unlikely(new_page == NOPAGE_OOM))
49272 +               return VM_FAULT_OOM;
49273 +       else if (unlikely(new_page == NOPAGE_REFAULT))
49274 +               return VM_FAULT_MINOR;
49275 +
49276 +       /*
49277 +        * Should we do an early C-O-W break?
49278 +        */
49279 +       if (write_access) {
49280 +               if (!(vma->vm_flags & VM_SHARED)) {
49281 +                       struct page *page;
49282 +
49283 +                       if (unlikely(anon_vma_prepare(vma)))
49284 +                               goto oom;
49285 +                       page = alloc_page_vma(GFP_HIGHUSER, vma, address);
49286 +                       if (!page)
49287 +                               goto oom;
49288 +                       copy_user_highpage(page, new_page, address, vma);
49289 +                       page_cache_release(new_page);
49290 +                       new_page = page;
49291 +                       anon = 1;
49292 +
49293 +               } else {
49294 +                       /* if the page will be shareable, see if the backing
49295 +                        * address space wants to know that the page is about
49296 +                        * to become writable */
49297 +                       if (vma->vm_ops->page_mkwrite &&
49298 +                           vma->vm_ops->page_mkwrite(vma, new_page) < 0
49299 +                           ) {
49300 +                               page_cache_release(new_page);
49301 +                               return VM_FAULT_SIGBUS;
49302 +                       }
49303 +               }
49304 +       }
49305 +
49306 +       page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
49307 +       /*
49308 +        * For a file-backed vma, someone could have truncated or otherwise
49309 +        * invalidated this page.  If unmap_mapping_range got called,
49310 +        * retry getting the page.
49311 +        */
49312 +       if (mapping && unlikely(sequence != mapping->truncate_count)) {
49313 +               pte_unmap_unlock(page_table, ptl);
49314 +               page_cache_release(new_page);
49315 +               cond_resched();
49316 +               sequence = mapping->truncate_count;
49317 +               smp_rmb();
49318 +               goto retry;
49319 +       }
49320 +
49321 +       /*
49322 +        * This silly early PAGE_DIRTY setting removes a race
49323 +        * due to the bad i386 page protection. But it's valid
49324 +        * for other architectures too.
49325 +        *
49326 +        * Note that if write_access is true, we either now have
49327 +        * an exclusive copy of the page, or this is a shared mapping,
49328 +        * so we can make it writable and dirty to avoid having to
49329 +        * handle that later.
49330 +        */
49331 +       /* Only go through if we didn't race with anybody else... */
49332 +       if (pte_none(*page_table)) {
49333 +               flush_icache_page(vma, new_page);
49334 +               entry = mk_pte(new_page, vma->vm_page_prot);
49335 +               if (write_access)
49336 +                       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
49337 +               set_pte_at(mm, address, page_table, entry);
49338 +               if (anon) {
49339 +                       inc_mm_counter(mm, anon_rss);
49340 +                       lru_cache_add_active(new_page);
49341 +                       page_add_new_anon_rmap(new_page, vma, address);
49342 +               } else {
49343 +                       inc_mm_counter(mm, file_rss);
49344 +                       page_add_file_rmap(new_page);
49345 +                       if (write_access) {
49346 +                               dirty_page = new_page;
49347 +                               get_page(dirty_page);
49348 +                       }
49349 +               }
49350 +       } else {
49351 +               /* One of our sibling threads was faster, back out. */
49352 +               page_cache_release(new_page);
49353 +               goto unlock;
49354 +       }
49355 +
49356 +       /* no need to invalidate: a not-present page shouldn't be cached */
49357 +       update_mmu_cache(vma, address, entry);
49358 +       lazy_mmu_prot_update(entry);
49359 +unlock:
49360 +       pte_unmap_unlock(page_table, ptl);
49361 +       if (dirty_page) {
49362 +               set_page_dirty_balance(dirty_page);
49363 +               put_page(dirty_page);
49364 +       }
49365 +       return ret;
49366 +oom:
49367 +       page_cache_release(new_page);
49368 +       return VM_FAULT_OOM;
49369 +}
49370 +
49371 +/*
49372 + * do_no_pfn() tries to create a new page mapping for a page without
49373 + * a struct_page backing it
49374 + *
49375 + * As this is called only for pages that do not currently exist, we
49376 + * do not need to flush old virtual caches or the TLB.
49377 + *
49378 + * We enter with non-exclusive mmap_sem (to exclude vma changes,
49379 + * but allow concurrent faults), and pte mapped but not yet locked.
49380 + * We return with mmap_sem still held, but pte unmapped and unlocked.
49381 + *
49382 + * It is expected that the ->nopfn handler always returns the same pfn
49383 + * for a given virtual mapping.
49384 + *
49385 + * Mark this `noinline' to prevent it from bloating the main pagefault code.
49386 + */
49387 +static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
49388 +                    unsigned long address, pte_t *page_table, pmd_t *pmd,
49389 +                    int write_access)
49390 +{
49391 +       spinlock_t *ptl;
49392 +       pte_t entry;
49393 +       unsigned long pfn;
49394 +       int ret = VM_FAULT_MINOR;
49395 +
49396 +       pte_unmap(page_table);
49397 +       BUG_ON(!(vma->vm_flags & VM_PFNMAP));
49398 +       BUG_ON(is_cow_mapping(vma->vm_flags));
49399 +
49400 +       pfn = vma->vm_ops->nopfn(vma, address & PAGE_MASK);
49401 +       if (unlikely(pfn == NOPFN_OOM))
49402 +               return VM_FAULT_OOM;
49403 +       else if (unlikely(pfn == NOPFN_SIGBUS))
49404 +               return VM_FAULT_SIGBUS;
49405 +       else if (unlikely(pfn == NOPFN_REFAULT))
49406 +               return VM_FAULT_MINOR;
49407 +
49408 +       page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
49409 +
49410 +       /* Only go through if we didn't race with anybody else... */
49411 +       if (pte_none(*page_table)) {
49412 +               entry = pfn_pte(pfn, vma->vm_page_prot);
49413 +               if (write_access)
49414 +                       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
49415 +               set_pte_at(mm, address, page_table, entry);
49416 +       }
49417 +       pte_unmap_unlock(page_table, ptl);
49418 +       return ret;
49419 +}
49420 +
49421 +/*
49422 + * Fault of a previously existing named mapping. Repopulate the pte
49423 + * from the encoded file_pte if possible. This enables swappable
49424 + * nonlinear vmas.
49425 + *
49426 + * We enter with non-exclusive mmap_sem (to exclude vma changes,
49427 + * but allow concurrent faults), and pte mapped but not yet locked.
49428 + * We return with mmap_sem still held, but pte unmapped and unlocked.
49429 + */
49430 +static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma,
49431 +               unsigned long address, pte_t *page_table, pmd_t *pmd,
49432 +               int write_access, pte_t orig_pte)
49433 +{
49434 +       pgoff_t pgoff;
49435 +       int err;
49436 +
49437 +       if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
49438 +               return VM_FAULT_MINOR;
49439 +
49440 +       if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
49441 +               /*
49442 +                * Page table corrupted: show pte and kill process.
49443 +                */
49444 +               print_bad_pte(vma, orig_pte, address);
49445 +               return VM_FAULT_OOM;
49446 +       }
49447 +       /* We can then assume vm->vm_ops && vma->vm_ops->populate */
49448 +
49449 +       pgoff = pte_to_pgoff(orig_pte);
49450 +       err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE,
49451 +                                       vma->vm_page_prot, pgoff, 0);
49452 +       if (err == -ENOMEM)
49453 +               return VM_FAULT_OOM;
49454 +       if (err)
49455 +               return VM_FAULT_SIGBUS;
49456 +       return VM_FAULT_MAJOR;
49457 +}
49458 +
49459 +/*
49460 + * These routines also need to handle stuff like marking pages dirty
49461 + * and/or accessed for architectures that don't do it in hardware (most
49462 + * RISC architectures).  The early dirtying is also good on the i386.
49463 + *
49464 + * There is also a hook called "update_mmu_cache()" that architectures
49465 + * with external mmu caches can use to update those (ie the Sparc or
49466 + * PowerPC hashed page tables that act as extended TLBs).
49467 + *
49468 + * We enter with non-exclusive mmap_sem (to exclude vma changes,
49469 + * but allow concurrent faults), and pte mapped but not yet locked.
49470 + * We return with mmap_sem still held, but pte unmapped and unlocked.
49471 + */
49472 +static inline int handle_pte_fault(struct mm_struct *mm,
49473 +               struct vm_area_struct *vma, unsigned long address,
49474 +               pte_t *pte, pmd_t *pmd, int write_access)
49475 +{
49476 +       pte_t entry;
49477 +       spinlock_t *ptl;
49478 +       int ret, type = VXPT_UNKNOWN;
49479 +
49480 +       entry = *pte;
49481 +       if (!pte_present(entry)) {
49482 +               if (pte_none(entry)) {
49483 +                       if (vma->vm_ops) {
49484 +                               if (vma->vm_ops->nopage)
49485 +                                       return do_no_page(mm, vma, address,
49486 +                                                         pte, pmd,
49487 +                                                         write_access);
49488 +                               if (unlikely(vma->vm_ops->nopfn))
49489 +                                       return do_no_pfn(mm, vma, address, pte,
49490 +                                                        pmd, write_access);
49491 +                       }
49492 +                       return do_anonymous_page(mm, vma, address,
49493 +                                                pte, pmd, write_access);
49494 +               }
49495 +               if (pte_file(entry))
49496 +                       return do_file_page(mm, vma, address,
49497 +                                       pte, pmd, write_access, entry);
49498 +               return do_swap_page(mm, vma, address,
49499 +                                       pte, pmd, write_access, entry);
49500 +       }
49501 +
49502 +       ptl = pte_lockptr(mm, pmd);
49503 +       spin_lock(ptl);
49504 +       if (unlikely(!pte_same(*pte, entry)))
49505 +               goto unlock;
49506 +       if (write_access) {
49507 +               if (!pte_write(entry)) {
49508 +                       ret = do_wp_page(mm, vma, address,
49509 +                                       pte, pmd, ptl, entry);
49510 +                       type = VXPT_WRITE;
49511 +                       goto out;
49512 +               }
49513 +               entry = pte_mkdirty(entry);
49514 +       }
49515 +       entry = pte_mkyoung(entry);
49516 +       if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
49517 +               update_mmu_cache(vma, address, entry);
49518 +               lazy_mmu_prot_update(entry);
49519 +       } else {
49520 +               /*
49521 +                * This is needed only for protection faults but the arch code
49522 +                * is not yet telling us if this is a protection fault or not.
49523 +                * This still avoids useless tlb flushes for .text page faults
49524 +                * with threads.
49525 +                */
49526 +               if (write_access)
49527 +                       flush_tlb_page(vma, address);
49528 +       }
49529 +unlock:
49530 +       pte_unmap_unlock(pte, ptl);
49531 +       ret = VM_FAULT_MINOR;
49532 +out:
49533 +       vx_page_fault(mm, vma, type, ret);
49534 +       return ret;
49535 +}
49536 +
49537 +/*
49538 + * By the time we get here, we already hold the mm semaphore
49539 + */
49540 +int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
49541 +               unsigned long address, int write_access)
49542 +{
49543 +       pgd_t *pgd;
49544 +       pud_t *pud;
49545 +       pmd_t *pmd;
49546 +       pte_t *pte;
49547 +
49548 +       __set_current_state(TASK_RUNNING);
49549 +
49550 +       count_vm_event(PGFAULT);
49551 +
49552 +       if (unlikely(is_vm_hugetlb_page(vma)))
49553 +               return hugetlb_fault(mm, vma, address, write_access);
49554 +
49555 +       pgd = pgd_offset(mm, address);
49556 +       pud = pud_alloc(mm, pgd, address);
49557 +       if (!pud)
49558 +               return VM_FAULT_OOM;
49559 +       pmd = pmd_alloc(mm, pud, address);
49560 +       if (!pmd)
49561 +               return VM_FAULT_OOM;
49562 +       pte = pte_alloc_map(mm, pmd, address);
49563 +       if (!pte)
49564 +               return VM_FAULT_OOM;
49565 +
49566 +       return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
49567 +}
49568 +
49569 +EXPORT_SYMBOL_GPL(__handle_mm_fault);
49570 +
49571 +#ifndef __PAGETABLE_PUD_FOLDED
49572 +/*
49573 + * Allocate page upper directory.
49574 + * We've already handled the fast-path in-line.
49575 + */
49576 +int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
49577 +{
49578 +       pud_t *new = pud_alloc_one(mm, address);
49579 +       if (!new)
49580 +               return -ENOMEM;
49581 +
49582 +       spin_lock(&mm->page_table_lock);
49583 +       if (pgd_present(*pgd))          /* Another has populated it */
49584 +               pud_free(new);
49585 +       else
49586 +               pgd_populate(mm, pgd, new);
49587 +       spin_unlock(&mm->page_table_lock);
49588 +       return 0;
49589 +}
49590 +#endif /* __PAGETABLE_PUD_FOLDED */
49591 +
49592 +#ifndef __PAGETABLE_PMD_FOLDED
49593 +/*
49594 + * Allocate page middle directory.
49595 + * We've already handled the fast-path in-line.
49596 + */
49597 +int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
49598 +{
49599 +       pmd_t *new = pmd_alloc_one(mm, address);
49600 +       if (!new)
49601 +               return -ENOMEM;
49602 +
49603 +       spin_lock(&mm->page_table_lock);
49604 +#ifndef __ARCH_HAS_4LEVEL_HACK
49605 +       if (pud_present(*pud))          /* Another has populated it */
49606 +               pmd_free(new);
49607 +       else
49608 +               pud_populate(mm, pud, new);
49609 +#else
49610 +       if (pgd_present(*pud))          /* Another has populated it */
49611 +               pmd_free(new);
49612 +       else
49613 +               pgd_populate(mm, pud, new);
49614 +#endif /* __ARCH_HAS_4LEVEL_HACK */
49615 +       spin_unlock(&mm->page_table_lock);
49616 +       return 0;
49617 +}
49618 +#endif /* __PAGETABLE_PMD_FOLDED */
49619 +
49620 +int make_pages_present(unsigned long addr, unsigned long end)
49621 +{
49622 +       int ret, len, write;
49623 +       struct vm_area_struct * vma;
49624 +
49625 +       vma = find_vma(current->mm, addr);
49626 +       if (!vma)
49627 +               return -1;
49628 +       write = (vma->vm_flags & VM_WRITE) != 0;
49629 +       BUG_ON(addr >= end);
49630 +       BUG_ON(end > vma->vm_end);
49631 +       len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
49632 +       ret = get_user_pages(current, current->mm, addr,
49633 +                       len, write, 0, NULL, NULL);
49634 +       if (ret < 0)
49635 +               return ret;
49636 +       return ret == len ? 0 : -1;
49637 +}
49638 +
49639 +/* 
49640 + * Map a vmalloc()-space virtual address to the physical page.
49641 + */
49642 +struct page * vmalloc_to_page(void * vmalloc_addr)
49643 +{
49644 +       unsigned long addr = (unsigned long) vmalloc_addr;
49645 +       struct page *page = NULL;
49646 +       pgd_t *pgd = pgd_offset_k(addr);
49647 +       pud_t *pud;
49648 +       pmd_t *pmd;
49649 +       pte_t *ptep, pte;
49650 +  
49651 +       if (!pgd_none(*pgd)) {
49652 +               pud = pud_offset(pgd, addr);
49653 +               if (!pud_none(*pud)) {
49654 +                       pmd = pmd_offset(pud, addr);
49655 +                       if (!pmd_none(*pmd)) {
49656 +                               ptep = pte_offset_map(pmd, addr);
49657 +                               pte = *ptep;
49658 +                               if (pte_present(pte))
49659 +                                       page = pte_page(pte);
49660 +                               pte_unmap(ptep);
49661 +                       }
49662 +               }
49663 +       }
49664 +       return page;
49665 +}
49666 +
49667 +EXPORT_SYMBOL(vmalloc_to_page);
49668 +
49669 +/*
49670 + * Map a vmalloc()-space virtual address to the physical page frame number.
49671 + */
49672 +unsigned long vmalloc_to_pfn(void * vmalloc_addr)
49673 +{
49674 +       return page_to_pfn(vmalloc_to_page(vmalloc_addr));
49675 +}
49676 +
49677 +EXPORT_SYMBOL(vmalloc_to_pfn);
49678 +
49679 +#if !defined(__HAVE_ARCH_GATE_AREA)
49680 +
49681 +#if defined(AT_SYSINFO_EHDR)
49682 +static struct vm_area_struct gate_vma;
49683 +
49684 +static int __init gate_vma_init(void)
49685 +{
49686 +       gate_vma.vm_mm = NULL;
49687 +       gate_vma.vm_start = FIXADDR_USER_START;
49688 +       gate_vma.vm_end = FIXADDR_USER_END;
49689 +       gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
49690 +       gate_vma.vm_page_prot = __P101;
49691 +       /*
49692 +        * Make sure the vDSO gets into every core dump.
49693 +        * Dumping its contents makes post-mortem fully interpretable later
49694 +        * without matching up the same kernel and hardware config to see
49695 +        * what PC values meant.
49696 +        */
49697 +       gate_vma.vm_flags |= VM_ALWAYSDUMP;
49698 +       return 0;
49699 +}
49700 +__initcall(gate_vma_init);
49701 +#endif
49702 +
49703 +struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
49704 +{
49705 +#ifdef AT_SYSINFO_EHDR
49706 +       return &gate_vma;
49707 +#else
49708 +       return NULL;
49709 +#endif
49710 +}
49711 +
49712 +int in_gate_area_no_task(unsigned long addr)
49713 +{
49714 +#ifdef AT_SYSINFO_EHDR
49715 +       if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
49716 +               return 1;
49717 +#endif
49718 +       return 0;
49719 +}
49720 +
49721 +#endif /* __HAVE_ARCH_GATE_AREA */
49722 +
49723 +/*
49724 + * Access another process' address space.
49725 + * Source/target buffer must be kernel space,
49726 + * Do not walk the page table directly, use get_user_pages
49727 + */
49728 +int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
49729 +{
49730 +       struct mm_struct *mm;
49731 +       struct vm_area_struct *vma;
49732 +       struct page *page;
49733 +       void *old_buf = buf;
49734 +
49735 +       mm = get_task_mm(tsk);
49736 +       if (!mm)
49737 +               return 0;
49738 +
49739 +       down_read(&mm->mmap_sem);
49740 +       /* ignore errors, just check how much was sucessfully transfered */
49741 +       while (len) {
49742 +               int bytes, ret, offset;
49743 +               void *maddr;
49744 +
49745 +               ret = get_user_pages(tsk, mm, addr, 1,
49746 +                               write, 1, &page, &vma);
49747 +               if (ret <= 0)
49748 +                       break;
49749 +
49750 +               bytes = len;
49751 +               offset = addr & (PAGE_SIZE-1);
49752 +               if (bytes > PAGE_SIZE-offset)
49753 +                       bytes = PAGE_SIZE-offset;
49754 +
49755 +               maddr = kmap(page);
49756 +               if (write) {
49757 +                       copy_to_user_page(vma, page, addr,
49758 +                                         maddr + offset, buf, bytes);
49759 +                       set_page_dirty_lock(page);
49760 +               } else {
49761 +                       copy_from_user_page(vma, page, addr,
49762 +                                           buf, maddr + offset, bytes);
49763 +               }
49764 +               kunmap(page);
49765 +               page_cache_release(page);
49766 +               len -= bytes;
49767 +               buf += bytes;
49768 +               addr += bytes;
49769 +       }
49770 +       up_read(&mm->mmap_sem);
49771 +       mmput(mm);
49772 +
49773 +       return buf - old_buf;
49774 +}
49775 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/mm/slab.c linux-2.6.22-590/mm/slab.c
49776 --- linux-2.6.22-580/mm/slab.c  2009-02-18 09:56:03.000000000 -0500
49777 +++ linux-2.6.22-590/mm/slab.c  2009-02-18 10:00:42.000000000 -0500
49778 @@ -110,11 +110,13 @@
49779  #include       <linux/fault-inject.h>
49780  #include       <linux/rtmutex.h>
49781  #include       <linux/reciprocal_div.h>
49782 +#include <linux/arrays.h>
49783  
49784  #include       <asm/cacheflush.h>
49785  #include       <asm/tlbflush.h>
49786  #include       <asm/page.h>
49787  
49788 +
49789  /*
49790   * DEBUG       - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
49791   *               0 for faster, smaller code (especially in the critical paths).
49792 @@ -249,6 +251,14 @@
49793         void *addr;
49794  };
49795  
49796 +extern void (*rec_event)(void *,unsigned int);
49797 +struct event_spec {
49798 +       unsigned long pc;
49799 +       unsigned long dcookie; 
49800 +       unsigned count;
49801 +       unsigned char reason;
49802 +};
49803 +
49804  /*
49805   * struct array_cache
49806   *
49807 @@ -3443,6 +3453,19 @@
49808         local_irq_restore(save_flags);
49809         objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
49810         prefetchw(objp);
49811 +#ifdef CONFIG_CHOPSTIX
49812 +       if (rec_event && objp) {
49813 +               struct event event;
49814 +               struct event_spec espec;
49815 +
49816 +               espec.reason = 0; /* alloc */
49817 +               event.event_data=&espec;
49818 +               event.task = current;
49819 +               espec.pc=caller;
49820 +               event.event_type=5; 
49821 +               (*rec_event)(&event, cachep->buffer_size);
49822 +       }
49823 +#endif
49824  
49825         return objp;
49826  }
49827 @@ -3549,12 +3572,26 @@
49828   * Release an obj back to its cache. If the obj has a constructed state, it must
49829   * be in this state _before_ it is released.  Called with disabled ints.
49830   */
49831 -static inline void __cache_free(struct kmem_cache *cachep, void *objp)
49832 +static inline void __cache_free(struct kmem_cache *cachep, void *objp, void *caller)
49833  {
49834         struct array_cache *ac = cpu_cache_get(cachep);
49835  
49836         check_irq_off();
49837 -       objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
49838 +       objp = cache_free_debugcheck(cachep, objp, caller);
49839 +    #ifdef CONFIG_CHOPSTIX
49840 +       if (rec_event && objp) {
49841 +               struct event event;
49842 +               struct event_spec espec;
49843 +     
49844 +               espec.reason = 1; /* free */
49845 +               event.event_data=&espec;
49846 +               event.task = current;
49847 +               espec.pc=caller;
49848 +               event.event_type=4; 
49849 +               (*rec_event)(&event, cachep->buffer_size);
49850 +       }
49851 +    #endif
49852 +
49853         vx_slab_free(cachep);
49854  
49855         if (cache_free_alien(cachep, objp))
49856 @@ -3651,16 +3688,19 @@
49857                         __builtin_return_address(0));
49858  }
49859  EXPORT_SYMBOL(kmem_cache_alloc_node);
49860 -
49861  static __always_inline void *
49862  __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
49863  {
49864         struct kmem_cache *cachep;
49865 +       void *ret;
49866 +
49867  
49868         cachep = kmem_find_general_cachep(size, flags);
49869         if (unlikely(cachep == NULL))
49870                 return NULL;
49871 -       return kmem_cache_alloc_node(cachep, flags, node);
49872 +       ret = kmem_cache_alloc_node(cachep, flags, node);
49873 +       
49874 +       return ret;
49875  }
49876  
49877  #ifdef CONFIG_DEBUG_SLAB
49878 @@ -3696,6 +3736,7 @@
49879                                           void *caller)
49880  {
49881         struct kmem_cache *cachep;
49882 +       void *ret;
49883  
49884         /* If you want to save a few bytes .text space: replace
49885          * __ with kmem_.
49886 @@ -3705,9 +3746,10 @@
49887         cachep = __find_general_cachep(size, flags);
49888         if (unlikely(cachep == NULL))
49889                 return NULL;
49890 -       return __cache_alloc(cachep, flags, caller);
49891 -}
49892 +       ret = __cache_alloc(cachep, flags, caller);
49893  
49894 +       return ret;
49895 +}
49896  
49897  #ifdef CONFIG_DEBUG_SLAB
49898  void *__kmalloc(size_t size, gfp_t flags)
49899 @@ -3723,10 +3765,17 @@
49900  EXPORT_SYMBOL(__kmalloc_track_caller);
49901  
49902  #else
49903 +#ifdef CONFIG_CHOPSTIX
49904 +void *__kmalloc(size_t size, gfp_t flags)
49905 +{
49906 +       return __do_kmalloc(size, flags, __builtin_return_address(0));
49907 +}
49908 +#else
49909  void *__kmalloc(size_t size, gfp_t flags)
49910  {
49911         return __do_kmalloc(size, flags, NULL);
49912  }
49913 +#endif
49914  EXPORT_SYMBOL(__kmalloc);
49915  #endif
49916  
49917 @@ -3792,7 +3841,7 @@
49918  
49919         local_irq_save(flags);
49920         debug_check_no_locks_freed(objp, obj_size(cachep));
49921 -       __cache_free(cachep, objp);
49922 +       __cache_free(cachep, objp,__builtin_return_address(0));
49923         local_irq_restore(flags);
49924  }
49925  EXPORT_SYMBOL(kmem_cache_free);
49926 @@ -3817,7 +3866,7 @@
49927         kfree_debugcheck(objp);
49928         c = virt_to_cache(objp);
49929         debug_check_no_locks_freed(objp, obj_size(c));
49930 -       __cache_free(c, (void *)objp);
49931 +       __cache_free(c, (void *)objp,__builtin_return_address(0));
49932         local_irq_restore(flags);
49933  }
49934  EXPORT_SYMBOL(kfree);
49935 diff -Nurb --exclude='*.swp' --exclude=tags --exclude='*.patch' --exclude='*.diff' linux-2.6.22-580/mm/slab.c.orig linux-2.6.22-590/mm/slab.c.orig
49936 --- linux-2.6.22-580/mm/slab.c.orig     1969-12-31 19:00:00.000000000 -0500
49937 +++ linux-2.6.22-590/mm/slab.c.orig     2009-02-18 09:56:03.000000000 -0500
49938 @@ -0,0 +1,4523 @@
49939 +/*
49940 + * linux/mm/slab.c
49941 + * Written by Mark Hemment, 1996/97.
49942 + * (markhe@nextd.demon.co.uk)
49943 + *
49944 + * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
49945 + *
49946 + * Major cleanup, different bufctl logic, per-cpu arrays
49947 + *     (c) 2000 Manfred Spraul
49948 + *
49949 + * Cleanup, make the head arrays unconditional, preparation for NUMA
49950 + *     (c) 2002 Manfred Spraul
49951 + *
49952 + * An implementation of the Slab Allocator as described in outline in;
49953 + *     UNIX Internals: The New Frontiers by Uresh Vahalia
49954 + *     Pub: Prentice Hall      ISBN 0-13-101908-2
49955 + * or with a little more detail in;
49956 + *     The Slab Allocator: An Object-Caching Kernel Memory Allocator
49957 + *     Jeff Bonwick (Sun Microsystems).
49958 + *     Presented at: USENIX Summer 1994 Technical Conference
49959 + *
49960 + * The memory is organized in caches, one cache for each object type.
49961 + * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
49962 + * Each cache consists out of many slabs (they are small (usually one
49963 + * page long) and always contiguous), and each slab contains multiple
49964 + * initialized objects.
49965 + *
49966 + * This means, that your constructor is used only for newly allocated
49967 + * slabs and you must pass objects with the same intializations to
49968 + * kmem_cache_free.
49969 + *
49970 + * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
49971 + * normal). If you need a special memory type, then must create a new
49972 + * cache for that memory type.
49973 + *
49974 + * In order to reduce fragmentation, the slabs are sorted in 3 groups:
49975 + *   full slabs with 0 free objects
49976 + *   partial slabs
49977 + *   empty slabs with no allocated objects
49978 + *
49979 + * If partial slabs exist, then new allocations come from these slabs,
49980 + * otherwise from empty slabs or new slabs are allocated.
49981 + *
49982 + * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
49983 + * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
49984 + *
49985 + * Each cache has a short per-cpu head array, most allocs
49986 + * and frees go into that array, and if that array overflows, then 1/2
49987 + * of the entries in the array are given back into the global cache.
49988 + * The head array is strictly LIFO and should improve the cache hit rates.
49989 + * On SMP, it additionally reduces the spinlock operations.
49990 + *
49991 + * The c_cpuarray may not be read with enabled local interrupts -
49992 + * it's changed with a smp_call_function().
49993 + *
49994 + * SMP synchronization:
49995 + *  constructors and destructors are called without any locking.
49996 + *  Several members in struct kmem_cache and struct slab never change, they
49997 + *     are accessed without any locking.
49998 + *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
49999 + *     and local interrupts are disabled so slab code is preempt-safe.
50000 + *  The non-constant members are protected with a per-cache irq spinlock.
50001 + *
50002 + * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
50003 + * in 2000 - many ideas in the current implementation are derived from
50004 + * his patch.
50005 + *
50006 + * Further notes from the original documentation:
50007 + *
50008 + * 11 April '97.  Started multi-threading - markhe
50009 + *     The global cache-chain is protected by the mutex 'cache_chain_mutex'.
50010 + *     The sem is only needed when accessing/extending the cache-chain, which
50011 + *     can never happen inside an interrupt (kmem_cache_create(),
50012 + *     kmem_cache_shrink() and kmem_cache_reap()).
50013 + *
50014 + *     At present, each engine can be growing a cache.  This should be blocked.
50015 + *
50016 + * 15 March 2005. NUMA slab allocator.
50017 + *     Shai Fultheim <shai@scalex86.org>.
50018 + *     Shobhit Dayal <shobhit@calsoftinc.com>
50019 + *     Alok N Kataria <alokk@calsoftinc.com>
50020 + *     Christoph Lameter <christoph@lameter.com>
50021 + *
50022 + *     Modified the slab allocator to be node aware on NUMA systems.
50023 + *     Each node has its own list of partial, free and full slabs.
50024 + *     All object allocations for a node occur from node specific slab lists.
50025 + */
50026 +
50027 +#include       <linux/slab.h>
50028 +#include       <linux/mm.h>
50029 +#include       <linux/poison.h>
50030 +#include       <linux/swap.h>
50031 +#include       <linux/cache.h>
50032 +#include       <linux/interrupt.h>
50033 +#include       <linux/init.h>
50034 +#include       <linux/compiler.h>
50035 +#include       <linux/cpuset.h>
50036 +#include       <linux/seq_file.h>
50037 +#include       <linux/notifier.h>
50038 +#include       <linux/kallsyms.h>
50039 +#include       <linux/cpu.h>
50040 +#include       <linux/sysctl.h>
50041 +#include       <linux/module.h>
50042 +#include       <linux/rcupdate.h>
50043 +#include       <linux/string.h>
50044 +#include       <linux/uaccess.h>
50045 +#include       <linux/nodemask.h>
50046 +#include       <linux/mempolicy.h>
50047 +#include       <linux/mutex.h>
50048 +#include       <linux/fault-inject.h>
50049 +#include       <linux/rtmutex.h>
50050 +#include       <linux/reciprocal_div.h>
50051 +
50052 +#include       <asm/cacheflush.h>
50053 +#include       <asm/tlbflush.h>
50054 +#include       <asm/page.h>
50055 +
50056 +/*
50057 + * DEBUG       - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
50058 + *               0 for faster, smaller code (especially in the critical paths).
50059 + *
50060 + * STATS       - 1 to collect stats for /proc/slabinfo.
50061 + *               0 for faster, smaller code (especially in the critical paths).
50062 + *
50063 + * FORCED_DEBUG        - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
50064 + */
50065 +
50066 +#ifdef CONFIG_DEBUG_SLAB
50067 +#define        DEBUG           1
50068 +#define        STATS           1
50069 +#define        FORCED_DEBUG    1
50070 +#else
50071 +#define        DEBUG           0
50072 +#define        STATS           0
50073 +#define        FORCED_DEBUG    0
50074 +#endif
50075 +
50076 +/* Shouldn't this be in a header file somewhere? */
50077 +#define        BYTES_PER_WORD          sizeof(void *)
50078 +#define        REDZONE_ALIGN           max(BYTES_PER_WORD, __alignof__(unsigned long long))
50079 +
50080 +#ifndef cache_line_size
50081 +#define cache_line_size()      L1_CACHE_BYTES
50082 +#endif
50083 +
50084 +#ifndef ARCH_KMALLOC_MINALIGN
50085 +/*
50086 + * Enforce a minimum alignment for the kmalloc caches.
50087 + * Usually, the kmalloc caches are cache_line_size() aligned, except when
50088 + * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
50089 + * Some archs want to perform DMA into kmalloc caches and need a guaranteed
50090 + * alignment larger than the alignment of a 64-bit integer.
50091 + * ARCH_KMALLOC_MINALIGN allows that.
50092 + * Note that increasing this value may disable some debug features.
50093 + */
50094 +#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
50095 +#endif
50096 +
50097 +#ifndef ARCH_SLAB_MINALIGN
50098 +/*
50099 + * Enforce a minimum alignment for all caches.
50100 + * Intended for archs that get misalignment faults even for BYTES_PER_WORD
50101 + * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
50102 + * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
50103 + * some debug features.
50104 + */
50105 +#define ARCH_SLAB_MINALIGN 0
50106 +#endif
50107 +
50108 +#ifndef ARCH_KMALLOC_FLAGS
50109 +#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
50110 +#endif
50111 +
50112 +/* Legal flag mask for kmem_cache_create(). */
50113 +#if DEBUG
50114 +# define CREATE_MASK   (SLAB_RED_ZONE | \
50115 +                        SLAB_POISON | SLAB_HWCACHE_ALIGN | \
50116 +                        SLAB_CACHE_DMA | \
50117 +                        SLAB_STORE_USER | \
50118 +                        SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
50119 +                        SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
50120 +#else
50121 +# define CREATE_MASK   (SLAB_HWCACHE_ALIGN | \
50122 +                        SLAB_CACHE_DMA | \
50123 +                        SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
50124 +                        SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
50125 +#endif
50126 +
50127 +/*
50128 + * kmem_bufctl_t:
50129 + *
50130 + * Bufctl's are used for linking objs within a slab
50131 + * linked offsets.
50132 + *
50133 + * This implementation relies on "struct page" for locating the cache &
50134 + * slab an object belongs to.
50135 + * This allows the bufctl structure to be small (one int), but limits
50136 + * the number of objects a slab (not a cache) can contain when off-slab
50137 + * bufctls are used. The limit is the size of the largest general cache
50138 + * that does not use off-slab slabs.
50139 + * For 32bit archs with 4 kB pages, is this 56.
50140 + * This is not serious, as it is only for large objects, when it is unwise
50141 + * to have too many per slab.
50142 + * Note: This limit can be raised by introducing a general cache whose size
50143 + * is less than 512 (PAGE_SIZE<<3), but greater than 256.
50144 + */
50145 +
50146 +typedef unsigned int kmem_bufctl_t;
50147 +#define BUFCTL_END     (((kmem_bufctl_t)(~0U))-0)
50148 +#define BUFCTL_FREE    (((kmem_bufctl_t)(~0U))-1)
50149 +#define        BUFCTL_ACTIVE   (((kmem_bufctl_t)(~0U))-2)
50150 +#define        SLAB_LIMIT      (((kmem_bufctl_t)(~0U))-3)
50151 +
50152 +/*
50153 + * struct slab
50154 + *
50155 + * Manages the objs in a slab. Placed either at the beginning of mem allocated
50156 + * for a slab, or allocated from an general cache.
50157 + * Slabs are chained into three list: fully used, partial, fully free slabs.
50158 + */
50159 +struct slab {
50160 +       struct list_head list;
50161 +       unsigned long colouroff;
50162 +       void *s_mem;            /* including colour offset */
50163 +       unsigned int inuse;     /* num of objs active in slab */
50164 +       kmem_bufctl_t free;
50165 +       unsigned short nodeid;
50166 +};
50167 +
50168 +/*
50169 + * struct slab_rcu
50170 + *
50171 + * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
50172 + * arrange for kmem_freepages to be called via RCU.  This is useful if
50173 + * we need to approach a kernel structure obliquely, from its address
50174 + * obtained without the usual locking.  We can lock the structure to
50175 + * stabilize it and check it's still at the given address, only if we
50176 + * can be sure that the memory has not been meanwhile reused for some
50177 + * other kind of object (which our subsystem's lock might corrupt).
50178 + *
50179 + * rcu_read_lock before reading the address, then rcu_read_unlock after
50180 + * taking the spinlock within the structure expected at that address.
50181 + *
50182 + * We assume struct slab_rcu can overlay struct slab when destroying.
50183 + */
50184 +struct slab_rcu {
50185 +       struct rcu_head head;
50186 +       struct kmem_cache *cachep;
50187 +       void *addr;
50188 +};
50189 +
50190 +/*
50191 + * struct array_cache
50192 + *
50193 + * Purpose:
50194 + * - LIFO ordering, to hand out cache-warm objects from _alloc
50195 + * - reduce the number of linked list operations
50196 + * - reduce spinlock operations
50197 + *
50198 + * The limit is stored in the per-cpu structure to reduce the data cache
50199 + * footprint.
50200 + *
50201 + */
50202 +struct array_cache {
50203 +       unsigned int avail;
50204 +       unsigned int limit;
50205 +       unsigned int batchcount;
50206 +       unsigned int touched;
50207 +       spinlock_t lock;
50208 +       void *entry[0]; /*
50209 +                        * Must have this definition in here for the proper
50210 +                        * alignment of array_cache. Also simplifies accessing
50211 +                        * the entries.
50212 +                        * [0] is for gcc 2.95. It should really be [].
50213 +                        */
50214 +};
50215 +
50216 +/*
50217 + * bootstrap: The caches do not work without cpuarrays anymore, but the
50218 + * cpuarrays are allocated from the generic caches...
50219 + */
50220 +#define BOOT_CPUCACHE_ENTRIES  1
50221 +struct arraycache_init {
50222 +       struct array_cache cache;
50223 +       void *entries[BOOT_CPUCACHE_ENTRIES];
50224 +};
50225 +
50226 +/*
50227 + * The slab lists for all objects.
50228 + */
50229 +struct kmem_list3 {
50230 +       struct list_head slabs_partial; /* partial list first, better asm code */
50231 +       struct list_head slabs_full;
50232 +       struct list_head slabs_free;
50233 +       unsigned long free_objects;
50234 +       unsigned int free_limit;
50235 +       unsigned int colour_next;       /* Per-node cache coloring */
50236 +       spinlock_t list_lock;
50237 +       struct array_cache *shared;     /* shared per node */
50238 +       struct array_cache **alien;     /* on other nodes */
50239 +       unsigned long next_reap;        /* updated without locking */
50240 +       int free_touched;               /* updated without locking */
50241 +};
50242 +
50243 +/*
50244 + * Need this for bootstrapping a per node allocator.
50245 + */
50246 +#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1)
50247 +struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
50248 +#define        CACHE_CACHE 0
50249 +#define        SIZE_AC 1
50250 +#define        SIZE_L3 (1 + MAX_NUMNODES)
50251 +
50252 +static int drain_freelist(struct kmem_cache *cache,
50253 +                       struct kmem_list3 *l3, int tofree);
50254 +static void free_block(struct kmem_cache *cachep, void **objpp, int len,
50255 +                       int node);
50256 +static int enable_cpucache(struct kmem_cache *cachep);
50257 +static void cache_reap(struct work_struct *unused);
50258 +
50259 +/*
50260 + * This function must be completely optimized away if a constant is passed to
50261 + * it.  Mostly the same as what is in linux/slab.h except it returns an index.
50262 + */
50263 +static __always_inline int index_of(const size_t size)
50264 +{
50265 +       extern void __bad_size(void);
50266 +
50267 +       if (__builtin_constant_p(size)) {
50268 +               int i = 0;
50269 +
50270 +#define CACHE(x) \
50271 +       if (size <=x) \
50272 +               return i; \
50273 +       else \
50274 +               i++;
50275 +#include "linux/kmalloc_sizes.h"
50276 +#undef CACHE
50277 +               __bad_size();
50278 +       } else
50279 +               __bad_size();
50280 +       return 0;
50281 +}
50282 +
50283 +static int slab_early_init = 1;
50284 +
50285 +#define INDEX_AC index_of(sizeof(struct arraycache_init))
50286 +#define INDEX_L3 index_of(sizeof(struct kmem_list3))
50287 +
50288 +static void kmem_list3_init(struct kmem_list3 *parent)
50289 +{
50290 +       INIT_LIST_HEAD(&parent->slabs_full);
50291 +       INIT_LIST_HEAD(&parent->slabs_partial);
50292 +       INIT_LIST_HEAD(&parent->slabs_free);
50293 +       parent->shared = NULL;
50294 +       parent->alien = NULL;
50295 +       parent->colour_next = 0;
50296 +       spin_lock_init(&parent->list_lock);
50297 +       parent->free_objects = 0;
50298 +       parent->free_touched = 0;
50299 +}
50300 +
50301 +#define MAKE_LIST(cachep, listp, slab, nodeid)                         \
50302 +       do {                                                            \
50303 +               INIT_LIST_HEAD(listp);                                  \
50304 +               list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
50305 +       } while (0)
50306 +
50307 +#define        MAKE_ALL_LISTS(cachep, ptr, nodeid)                             \
50308 +       do {                                                            \
50309 +       MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);  \
50310 +       MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
50311 +       MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);  \
50312 +       } while (0)
50313 +
50314 +/*
50315 + * struct kmem_cache
50316 + *
50317 + * manages a cache.
50318 + */
50319 +
50320 +struct kmem_cache {
50321 +/* 1) per-cpu data, touched during every alloc/free */
50322 +       struct array_cache *array[NR_CPUS];
50323 +/* 2) Cache tunables. Protected by cache_chain_mutex */
50324 +       unsigned int batchcount;
50325 +       unsigned int limit;
50326 +       unsigned int shared;
50327 +
50328 +       unsigned int buffer_size;
50329 +       u32 reciprocal_buffer_size;
50330 +/* 3) touched by every alloc & free from the backend */
50331 +
50332 +       unsigned int flags;             /* constant flags */
50333 +       unsigned int num;               /* # of objs per slab */
50334 +
50335 +/* 4) cache_grow/shrink */
50336 +       /* order of pgs per slab (2^n) */
50337 +       unsigned int gfporder;
50338 +
50339 +       /* force GFP flags, e.g. GFP_DMA */
50340 +       gfp_t gfpflags;
50341 +
50342 +       size_t colour;                  /* cache colouring range */
50343 +       unsigned int colour_off;        /* colour offset */
50344 +       struct kmem_cache *slabp_cache;
50345 +       unsigned int slab_size;
50346 +       unsigned int dflags;            /* dynamic flags */
50347 +
50348 +       /* constructor func */
50349 +       void (*ctor) (void *, struct kmem_cache *, unsigned long);
50350 +
50351 +/* 5) cache creation/removal */
50352 +       const char *name;
50353 +       struct list_head next;
50354 +
50355 +/* 6) statistics */
50356 +#if STATS
50357 +       unsigned long num_active;
50358 +       unsigned long num_allocations;
50359 +       unsigned long high_mark;
50360 +       unsigned long grown;
50361 +       unsigned long reaped;
50362 +       unsigned long errors;
50363 +       unsigned long max_freeable;
50364 +       unsigned long node_allocs;
50365 +       unsigned long node_frees;
50366 +       unsigned long node_overflow;
50367 +       atomic_t allochit;
50368 +       atomic_t allocmiss;
50369 +       atomic_t freehit;
50370 +       atomic_t freemiss;
50371 +#endif
50372 +#if DEBUG
50373 +       /*
50374 +        * If debugging is enabled, then the allocator can add additional
50375 +        * fields and/or padding to every object. buffer_size contains the total
50376 +        * object size including these internal fields, the following two
50377 +        * variables contain the offset to the user object and its size.
50378 +        */
50379 +       int obj_offset;
50380 +       int obj_size;
50381 +#endif
50382 +       /*
50383 +        * We put nodelists[] at the end of kmem_cache, because we want to size
50384 +        * this array to nr_node_ids slots instead of MAX_NUMNODES
50385 +        * (see kmem_cache_init())
50386 +        * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
50387 +        * is statically defined, so we reserve the max number of nodes.
50388 +        */
50389 +       struct kmem_list3 *nodelists[MAX_NUMNODES];
50390 +       /*
50391 +        * Do not add fields after nodelists[]
50392 +        */
50393 +};
50394 +
50395 +#define CFLGS_OFF_SLAB         (0x80000000UL)
50396 +#define        OFF_SLAB(x)     ((x)->flags & CFLGS_OFF_SLAB)
50397 +
50398 +#define BATCHREFILL_LIMIT      16
50399 +/*
50400 + * Optimization question: fewer reaps means less probability for unnessary
50401 + * cpucache drain/refill cycles.
50402 + *
50403 + * OTOH the cpuarrays can contain lots of objects,
50404 + * which could lock up otherwise freeable slabs.
50405 + */
50406 +#define REAPTIMEOUT_CPUC       (2*HZ)
50407 +#define REAPTIMEOUT_LIST3      (4*HZ)
50408 +
50409 +#if STATS
50410 +#define        STATS_INC_ACTIVE(x)     ((x)->num_active++)
50411 +#define        STATS_DEC_ACTIVE(x)     ((x)->num_active--)
50412 +#define        STATS_INC_ALLOCED(x)    ((x)->num_allocations++)
50413 +#define        STATS_INC_GROWN(x)      ((x)->grown++)
50414 +#define        STATS_ADD_REAPED(x,y)   ((x)->reaped += (y))
50415 +#define        STATS_SET_HIGH(x)                                               \
50416 +       do {                                                            \
50417 +               if ((x)->num_active > (x)->high_mark)                   \
50418 +                       (x)->high_mark = (x)->num_active;               \
50419 +       } while (0)
50420 +#define        STATS_INC_ERR(x)        ((x)->errors++)
50421 +#define        STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
50422 +#define        STATS_INC_NODEFREES(x)  ((x)->node_frees++)
50423 +#define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
50424 +#define        STATS_SET_FREEABLE(x, i)                                        \
50425 +       do {                                                            \
50426 +               if ((x)->max_freeable < i)                              \
50427 +                       (x)->max_freeable = i;                          \
50428 +       } while (0)
50429 +#define STATS_INC_ALLOCHIT(x)  atomic_inc(&(x)->allochit)
50430 +#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
50431 +#define STATS_INC_FREEHIT(x)   atomic_inc(&(x)->freehit)
50432 +#define STATS_INC_FREEMISS(x)  atomic_inc(&(x)->freemiss)
50433 +#else
50434 +#define        STATS_INC_ACTIVE(x)     do { } while (0)
50435 +#define        STATS_DEC_ACTIVE(x)     do { } while (0)
50436 +#define        STATS_INC_ALLOCED(x)    do { } while (0)
50437 +#define        STATS_INC_GROWN(x)      do { } while (0)
50438 +#define        STATS_ADD_REAPED(x,y)   do { } while (0)
50439 +#define        STATS_SET_HIGH(x)       do { } while (0)
50440 +#define        STATS_INC_ERR(x)        do { } while (0)
50441 +#define        STATS_INC_NODEALLOCS(x) do { } while (0)
50442 +#define        STATS_INC_NODEFREES(x)  do { } while (0)
50443 +#define STATS_INC_ACOVERFLOW(x)   do { } while (0)
50444 +#define        STATS_SET_FREEABLE(x, i) do { } while (0)
50445 +#define STATS_INC_ALLOCHIT(x)  do { } while (0)
50446 +#define STATS_INC_ALLOCMISS(x) do { } while (0)
50447 +#define STATS_INC_FREEHIT(x)   do { } while (0)
50448 +#define STATS_INC_FREEMISS(x)  do { } while (0)
50449 +#endif
50450 +
50451 +#include "slab_vs.h"
50452 +
50453 +#if DEBUG
50454 +
50455 +/*
50456 + * memory layout of objects:
50457 + * 0           : objp
50458 + * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
50459 + *             the end of an object is aligned with the end of the real
50460 + *             allocation. Catches writes behind the end of the allocation.
50461 + * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
50462 + *             redzone word.
50463 + * cachep->obj_offset: The real object.
50464 + * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
50465 + * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
50466 + *                                     [BYTES_PER_WORD long]
50467 + */
50468 +static int obj_offset(struct kmem_cache *cachep)
50469 +{
50470 +       return cachep->obj_offset;
50471 +}
50472 +
50473 +static int obj_size(struct kmem_cache *cachep)
50474 +{
50475 +       return cachep->obj_size;
50476 +}
50477 +
50478 +static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
50479 +{
50480 +       BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
50481 +       return (unsigned long long*) (objp + obj_offset(cachep) -
50482 +                                     sizeof(unsigned long long));
50483 +}
50484 +
50485 +static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
50486 +{
50487 +       BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
50488 +       if (cachep->flags & SLAB_STORE_USER)
50489 +               return (unsigned long long *)(objp + cachep->buffer_size -
50490 +                                             sizeof(unsigned long long) -
50491 +                                             REDZONE_ALIGN);
50492 +       return (unsigned long long *) (objp + cachep->buffer_size -
50493 +                                      sizeof(unsigned long long));
50494 +}
50495 +
50496 +static void **dbg_userword(struct kmem_cache *cachep, void *objp)
50497 +{
50498 +       BUG_ON(!(cachep->flags & SLAB_STORE_USER));
50499 +       return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
50500 +}
50501 +
50502 +#else
50503 +
50504 +#define obj_offset(x)                  0
50505 +#define obj_size(cachep)               (cachep->buffer_size)
50506 +#define dbg_redzone1(cachep, objp)     ({BUG(); (unsigned long long *)NULL;})
50507 +#define dbg_redzone2(cachep, objp)     ({BUG(); (unsigned long long *)NULL;})
50508 +#define dbg_userword(cachep, objp)     ({BUG(); (void **)NULL;})
50509 +
50510 +#endif
50511 +
50512 +/*
50513 + * Do not go above this order unless 0 objects fit into the slab.
50514 + */
50515 +#define        BREAK_GFP_ORDER_HI      1
50516 +#define        BREAK_GFP_ORDER_LO      0
50517 +static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
50518 +
50519 +/*
50520 + * Functions for storing/retrieving the cachep and or slab from the page
50521 + * allocator.  These are used to find the slab an obj belongs to.  With kfree(),
50522 + * these are used to find the cache which an obj belongs to.
50523 + */
50524 +static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
50525 +{
50526 +       page->lru.next = (struct list_head *)cache;
50527 +}
50528 +
50529 +static inline struct kmem_cache *page_get_cache(struct page *page)
50530 +{
50531 +       page = compound_head(page);
50532 +       BUG_ON(!PageSlab(page));
50533 +       return (struct kmem_cache *)page->lru.next;
50534 +}
50535 +
50536 +static inline void page_set_slab(struct page *page, struct slab *slab)
50537 +{
50538 +       page->lru.prev = (struct list_head *)slab;
50539 +}
50540 +
50541 +static inline struct slab *page_get_slab(struct page *page)
50542 +{
50543 +       BUG_ON(!PageSlab(page));
50544 +       return (struct slab *)page->lru.prev;
50545 +}
50546 +
50547 +static inline struct kmem_cache *virt_to_cache(const void *obj)
50548 +{
50549 +       struct page *page = virt_to_head_page(obj);
50550 +       return page_get_cache(page);
50551 +}
50552 +
50553 +static inline struct slab *virt_to_slab(const void *obj)
50554 +{
50555 +       struct page *page = virt_to_head_page(obj);
50556 +       return page_get_slab(page);
50557 +}
50558 +
50559 +static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
50560 +                                unsigned int idx)
50561 +{
50562 +       return slab->s_mem + cache->buffer_size * idx;
50563 +}
50564 +
50565 +/*
50566 + * We want to avoid an expensive divide : (offset / cache->buffer_size)
50567 + *   Using the fact that buffer_size is a constant for a particular cache,
50568 + *   we can replace (offset / cache->buffer_size) by
50569 + *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
50570 + */
50571 +static inline unsigned int obj_to_index(const struct kmem_cache *cache,
50572 +                                       const struct slab *slab, void *obj)
50573 +{
50574 +       u32 offset = (obj - slab->s_mem);
50575 +       return reciprocal_divide(offset, cache->reciprocal_buffer_size);
50576 +}
50577 +
50578 +/*
50579 + * These are the default caches for kmalloc. Custom caches can have other sizes.
50580 + */
50581 +struct cache_sizes malloc_sizes[] = {
50582 +#define CACHE(x) { .cs_size = (x) },
50583 +#include <linux/kmalloc_sizes.h>
50584 +       CACHE(ULONG_MAX)
50585 +#undef CACHE
50586 +};
50587 +EXPORT_SYMBOL(malloc_sizes);
50588 +
50589 +/* Must match cache_sizes above. Out of line to keep cache footprint low. */
50590 +struct cache_names {
50591 +       char *name;
50592 +       char *name_dma;
50593 +};
50594 +
50595 +static struct cache_names __initdata cache_names[] = {
50596 +#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
50597 +#include <linux/kmalloc_sizes.h>
50598 +       {NULL,}
50599 +#undef CACHE
50600 +};
50601 +
50602 +static struct arraycache_init initarray_cache __initdata =
50603 +    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
50604 +static struct arraycache_init initarray_generic =
50605 +    { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
50606 +
50607 +/* internal cache of cache description objs */
50608 +static struct kmem_cache cache_cache = {
50609 +       .batchcount = 1,
50610 +       .limit = BOOT_CPUCACHE_ENTRIES,
50611 +       .shared = 1,
50612 +       .buffer_size = sizeof(struct kmem_cache),
50613 +       .name = "kmem_cache",
50614 +};
50615 +
50616 +#define BAD_ALIEN_MAGIC 0x01020304ul
50617 +
50618 +#ifdef CONFIG_LOCKDEP
50619 +
50620 +/*
50621 + * Slab sometimes uses the kmalloc slabs to store the slab headers
50622 + * for other slabs "off slab".
50623 + * The locking for this is tricky in that it nests within the locks
50624 + * of all other slabs in a few places; to deal with this special
50625 + * locking we put on-slab caches into a separate lock-class.
50626 + *
50627 + * We set lock class for alien array caches which are up during init.
50628 + * The lock annotation will be lost if all cpus of a node goes down and
50629 + * then comes back up during hotplug
50630 + */
50631 +static struct lock_class_key on_slab_l3_key;
50632 +static struct lock_class_key on_slab_alc_key;
50633 +
50634 +static inline void init_lock_keys(void)
50635 +
50636 +{
50637 +       int q;
50638 +       struct cache_sizes *s = malloc_sizes;
50639 +
50640 +       while (s->cs_size != ULONG_MAX) {
50641 +               for_each_node(q) {
50642 +                       struct array_cache **alc;
50643 +                       int r;
50644 +                       struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
50645 +                       if (!l3 || OFF_SLAB(s->cs_cachep))
50646 +                               continue;
50647 +                       lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
50648 +                       alc = l3->alien;
50649 +                       /*
50650 +                        * FIXME: This check for BAD_ALIEN_MAGIC
50651 +                        * should go away when common slab code is taught to
50652 +                        * work even without alien caches.
50653 +                        * Currently, non NUMA code returns BAD_ALIEN_MAGIC
50654 +                        * for alloc_alien_cache,
50655 +                        */
50656 +                       if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
50657 +                               continue;
50658 +                       for_each_node(r) {
50659 +                               if (alc[r])
50660 +                                       lockdep_set_class(&alc[r]->lock,
50661 +                                            &on_slab_alc_key);
50662 +                       }
50663 +               }
50664 +               s++;
50665 +       }
50666 +}
50667 +#else
50668 +static inline void init_lock_keys(void)
50669 +{
50670 +}
50671 +#endif
50672 +
50673 +/*
50674 + * 1. Guard access to the cache-chain.
50675 + * 2. Protect sanity of cpu_online_map against cpu hotplug events
50676 + */
50677 +static DEFINE_MUTEX(cache_chain_mutex);
50678 +static struct list_head cache_chain;
50679 +
50680 +/*
50681 + * chicken and egg problem: delay the per-cpu array allocation
50682 + * until the general caches are up.
50683 + */
50684 +static enum {
50685 +       NONE,
50686 +       PARTIAL_AC,
50687 +       PARTIAL_L3,
50688 +       FULL
50689 +} g_cpucache_up;
50690 +
50691 +/*
50692 + * used by boot code to determine if it can use slab based allocator
50693 + */
50694 +int slab_is_available(void)
50695 +{
50696 +       return g_cpucache_up == FULL;
50697 +}
50698 +
50699 +static DEFINE_PER_CPU(struct delayed_work, reap_work);
50700 +
50701 +static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
50702 +{
50703 +       return cachep->array[smp_processor_id()];
50704 +}
50705 +
50706 +static inline struct kmem_cache *__find_general_cachep(size_t size,
50707 +                                                       gfp_t gfpflags)
50708 +{
50709 +       struct cache_sizes *csizep = malloc_sizes;
50710 +
50711 +#if DEBUG
50712 +       /* This happens if someone tries to call
50713 +        * kmem_cache_create(), or __kmalloc(), before
50714 +        * the generic caches are initialized.
50715 +        */
50716 +       BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
50717 +#endif
50718 +       while (size > csizep->cs_size)
50719 +               csizep++;
50720 +
50721 +       /*
50722 +        * Really subtle: The last entry with cs->cs_size==ULONG_MAX
50723 +        * has cs_{dma,}cachep==NULL. Thus no special case
50724 +        * for large kmalloc calls required.
50725 +        */
50726 +#ifdef CONFIG_ZONE_DMA
50727 +       if (unlikely(gfpflags & GFP_DMA))
50728 +               return csizep->cs_dmacachep;
50729 +#endif
50730 +       return csizep->cs_cachep;
50731 +}
50732 +
50733 +static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
50734 +{
50735 +       return __find_general_cachep(size, gfpflags);
50736 +}
50737 +
50738 +static size_t slab_mgmt_size(size_t nr_objs, size_t align)
50739 +{
50740 +       return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
50741 +}
50742 +
50743 +/*
50744 + * Calculate the number of objects and left-over bytes for a given buffer size.
50745 + */
50746 +static void cache_estimate(unsigned long gfporder, size_t buffer_size,
50747 +                          size_t align, int flags, size_t *left_over,
50748 +                          unsigned int *num)
50749 +{
50750 +       int nr_objs;
50751 +       size_t mgmt_size;
50752 +       size_t slab_size = PAGE_SIZE << gfporder;
50753 +
50754 +       /*
50755 +        * The slab management structure can be either off the slab or
50756 +        * on it. For the latter case, the memory allocated for a
50757 +        * slab is used for:
50758 +        *
50759 +        * - The struct slab
50760 +        * - One kmem_bufctl_t for each object
50761 +        * - Padding to respect alignment of @align
50762 +        * - @buffer_size bytes for each object
50763 +        *
50764 +        * If the slab management structure is off the slab, then the
50765 +        * alignment will already be calculated into the size. Because
50766 +        * the slabs are all pages aligned, the objects will be at the
50767 +        * correct alignment when allocated.
50768 +        */
50769 +       if (flags & CFLGS_OFF_SLAB) {
50770 +               mgmt_size = 0;
50771 +               nr_objs = slab_size / buffer_size;
50772 +
50773 +               if (nr_objs > SLAB_LIMIT)
50774 +                       nr_objs = SLAB_LIMIT;
50775 +       } else {
50776 +               /*
50777 +                * Ignore padding for the initial guess. The padding
50778 +                * is at most @align-1 bytes, and @buffer_size is at
50779 +                * least @align. In the worst case, this result will
50780 +                * be one greater than the number of objects that fit
50781 +                * into the memory allocation when taking the padding
50782 +                * into account.
50783 +                */
50784 +               nr_objs = (slab_size - sizeof(struct slab)) /
50785 +                         (buffer_size + sizeof(kmem_bufctl_t));
50786 +
50787 +               /*
50788 +                * This calculated number will be either the right
50789 +                * amount, or one greater than what we want.
50790 +                */
50791 +               if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
50792 +                      > slab_size)
50793 +                       nr_objs--;
50794 +
50795 +               if (nr_objs > SLAB_LIMIT)
50796 +                       nr_objs = SLAB_LIMIT;
50797 +
50798 +               mgmt_size = slab_mgmt_size(nr_objs, align);
50799 +       }
50800 +       *num = nr_objs;
50801 +       *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
50802 +}
50803 +
50804 +#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
50805 +
50806 +static void __slab_error(const char *function, struct kmem_cache *cachep,
50807 +                       char *msg)
50808 +{
50809 +       printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
50810 +              function, cachep->name, msg);
50811 +       dump_stack();
50812 +}
50813 +
50814 +/*
50815 + * By default on NUMA we use alien caches to stage the freeing of
50816 + * objects allocated from other nodes. This causes massive memory
50817 + * inefficiencies when using fake NUMA setup to split memory into a
50818 + * large number of small nodes, so it can be disabled on the command
50819 + * line
50820 +  */
50821 +
50822 +static int use_alien_caches __read_mostly = 1;
50823 +static int __init noaliencache_setup(char *s)
50824 +{
50825 +       use_alien_caches = 0;
50826 +       return 1;
50827 +}
50828 +__setup("noaliencache", noaliencache_setup);
50829 +
50830 +#ifdef CONFIG_NUMA
50831 +/*
50832 + * Special reaping functions for NUMA systems called from cache_reap().
50833 + * These take care of doing round robin flushing of alien caches (containing
50834 + * objects freed on different nodes from which they were allocated) and the
50835 + * flushing of remote pcps by calling drain_node_pages.
50836 + */
50837 +static DEFINE_PER_CPU(unsigned long, reap_node);
50838 +
50839 +static void init_reap_node(int cpu)
50840 +{
50841 +       int node;
50842 +
50843 +       node = next_node(cpu_to_node(cpu), node_online_map);
50844 +       if (node == MAX_NUMNODES)
50845 +               node = first_node(node_online_map);
50846 +
50847 +       per_cpu(reap_node, cpu) = node;
50848 +}
50849 +
50850 +static void next_reap_node(void)
50851 +{
50852 +       int node = __get_cpu_var(reap_node);
50853 +
50854 +       node = next_node(node, node_online_map);
50855 +       if (unlikely(node >= MAX_NUMNODES))
50856 +               node = first_node(node_online_map);
50857 +       __get_cpu_var(reap_node) = node;
50858 +}
50859 +
50860 +#else
50861 +#define init_reap_node(cpu) do { } while (0)
50862 +#define next_reap_node(void) do { } while (0)
50863 +#endif
50864 +
50865 +/*
50866 + * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
50867 + * via the workqueue/eventd.
50868 + * Add the CPU number into the expiration time to minimize the possibility of
50869 + * the CPUs getting into lockstep and contending for the global cache chain
50870 + * lock.
50871 + */
50872 +static void __devinit start_cpu_timer(int cpu)
50873 +{
50874 +       struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
50875 +
50876 +       /*
50877 +        * When this gets called from do_initcalls via cpucache_init(),
50878 +        * init_workqueues() has already run, so keventd will be setup
50879 +        * at that time.
50880 +        */
50881 +       if (keventd_up() && reap_work->work.func == NULL) {
50882 +               init_reap_node(cpu);
50883 +               INIT_DELAYED_WORK(reap_work, cache_reap);
50884 +               schedule_delayed_work_on(cpu, reap_work,
50885 +                                       __round_jiffies_relative(HZ, cpu));
50886 +       }
50887 +}
50888 +
50889 +static struct array_cache *alloc_arraycache(int node, int entries,
50890 +                                           int batchcount)
50891 +{
50892 +       int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
50893 +       struct array_cache *nc = NULL;
50894 +
50895 +       nc = kmalloc_node(memsize, GFP_KERNEL, node);
50896 +       if (nc) {
50897 +               nc->avail = 0;
50898 +               nc->limit = entries;
50899 +               nc->batchcount = batchcount;
50900 +               nc->touched = 0;
50901 +               spin_lock_init(&nc->lock);
50902 +       }
50903 +       return nc;
50904 +}
50905 +
50906 +/*
50907 + * Transfer objects in one arraycache to another.
50908 + * Locking must be handled by the caller.
50909 + *
50910 + * Return the number of entries transferred.
50911 + */
50912 +static int transfer_objects(struct array_cache *to,
50913 +               struct array_cache *from, unsigned int max)
50914 +{
50915 +       /* Figure out how many entries to transfer */
50916 +       int nr = min(min(from->avail, max), to->limit - to->avail);
50917 +
50918 +       if (!nr)
50919 +               return 0;
50920 +
50921 +       memcpy(to->entry + to->avail, from->entry + from->avail -nr,
50922 +                       sizeof(void *) *nr);
50923 +
50924 +       from->avail -= nr;
50925 +       to->avail += nr;
50926 +       to->touched = 1;
50927 +       return nr;
50928 +}
50929 +
50930 +#ifndef CONFIG_NUMA
50931 +
50932 +#define drain_alien_cache(cachep, alien) do { } while (0)
50933 +#define reap_alien(cachep, l3) do { } while (0)
50934 +
50935 +static inline struct array_cache **alloc_alien_cache(int node, int limit)
50936 +{
50937 +       return (struct array_cache **)BAD_ALIEN_MAGIC;
50938 +}
50939 +
50940 +static inline void free_alien_cache(struct array_cache **ac_ptr)
50941 +{
50942 +}
50943 +
50944 +static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
50945 +{
50946 +       return 0;
50947 +}
50948 +
50949 +static inline void *alternate_node_alloc(struct kmem_cache *cachep,
50950 +               gfp_t flags)
50951 +{
50952 +       return NULL;
50953 +}
50954 +
50955 +static inline void *____cache_alloc_node(struct kmem_cache *cachep,
50956 +                gfp_t flags, int nodeid)
50957 +{
50958 +       return NULL;
50959 +}
50960 +
50961 +#else  /* CONFIG_NUMA */
50962 +
50963 +static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
50964 +static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
50965 +
50966 +static struct array_cache **alloc_alien_cache(int node, int limit)
50967 +{
50968 +       struct array_cache **ac_ptr;
50969 +       int memsize = sizeof(void *) * nr_node_ids;
50970 +       int i;
50971 +
50972 +       if (limit > 1)
50973 +               limit = 12;
50974 +       ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
50975 +       if (ac_ptr) {
50976 +               for_each_node(i) {
50977 +                       if (i == node || !node_online(i)) {
50978 +                               ac_ptr[i] = NULL;
50979 +                               continue;
50980 +                       }
50981 +                       ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
50982 +                       if (!ac_ptr[i]) {
50983 +                               for (i--; i <= 0; i--)
50984 +                                       kfree(ac_ptr[i]);
50985 +                               kfree(ac_ptr);
50986 +                               return NULL;
50987 +                       }
50988 +               }
50989 +       }
50990 +       return ac_ptr;
50991 +}
50992 +
50993 +static void free_alien_cache(struct array_cache **ac_ptr)
50994 +{
50995 +       int i;
50996 +
50997 +       if (!ac_ptr)
50998 +               return;
50999 +       for_each_node(i)
51000 +           kfree(ac_ptr[i]);
51001 +       kfree(ac_ptr);
51002 +}
51003 +
51004 +static void __drain_alien_cache(struct kmem_cache *cachep,
51005 +                               struct array_cache *ac, int node)
51006 +{
51007 +       struct kmem_list3 *rl3 = cachep->nodelists[node];
51008 +
51009 +       if (ac->avail) {
51010 +               spin_lock(&rl3->list_lock);
51011 +               /*
51012 +                * Stuff objects into the remote nodes shared array first.
51013 +                * That way we could avoid the overhead of putting the objects
51014 +                * into the free lists and getting them back later.
51015 +                */
51016 +               if (rl3->shared)
51017 +                       transfer_objects(rl3->shared, ac, ac->limit);
51018 +
51019 +               free_block(cachep, ac->entry, ac->avail, node);
51020 +               ac->avail = 0;
51021 +               spin_unlock(&rl3->list_lock);
51022 +       }
51023 +}
51024 +
51025 +/*
51026 + * Called from cache_reap() to regularly drain alien caches round robin.
51027 + */
51028 +static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
51029 +{
51030 +       int node = __get_cpu_var(reap_node);
51031 +
51032 +       if (l3->alien) {
51033 +               struct array_cache *ac = l3->alien[node];
51034 +
51035 +               if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
51036 +                       __drain_alien_cache(cachep, ac, node);
51037 +                       spin_unlock_irq(&ac->lock);
51038 +               }
51039 +       }
51040 +}
51041 +
51042 +static void drain_alien_cache(struct kmem_cache *cachep,
51043 +                               struct array_cache **alien)
51044 +{
51045 +       int i = 0;
51046 +       struct array_cache *ac;
51047 +       unsigned long flags;
51048 +
51049 +       for_each_online_node(i) {
51050 +               ac = alien[i];
51051 +               if (ac) {
51052 +                       spin_lock_irqsave(&ac->lock, flags);
51053 +                       __drain_alien_cache(cachep, ac, i);
51054 +                       spin_unlock_irqrestore(&ac->lock, flags);
51055 +               }
51056 +       }
51057 +}
51058 +
51059 +static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
51060 +{
51061 +       struct slab *slabp = virt_to_slab(objp);
51062 +       int nodeid = slabp->nodeid;
51063 +       struct kmem_list3 *l3;
51064 +       struct array_cache *alien = NULL;
51065 +       int node;
51066 +
51067 +       node = numa_node_id();
51068 +
51069 +       /*
51070 +        * Make sure we are not freeing a object from another node to the array
51071 +        * cache on this cpu.
51072 +        */
51073 +       if (likely(slabp->nodeid == node))
51074 +               return 0;
51075 +
51076 +       l3 = cachep->nodelists[node];
51077 +       STATS_INC_NODEFREES(cachep);
51078 +       if (l3->alien && l3->alien[nodeid]) {
51079 +               alien = l3->alien[nodeid];
51080 +               spin_lock(&alien->lock);
51081 +               if (unlikely(alien->avail == alien->limit)) {
51082 +                       STATS_INC_ACOVERFLOW(cachep);
51083 +                       __drain_alien_cache(cachep, alien, nodeid);
51084 +               }
51085 +               alien->entry[alien->avail++] = objp;
51086 +               spin_unlock(&alien->lock);
51087 +       } else {
51088 +               spin_lock(&(cachep->nodelists[nodeid])->list_lock);
51089 +               free_block(cachep, &objp, 1, nodeid);
51090 +               spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
51091 +       }
51092 +       return 1;
51093 +}
51094 +#endif
51095 +
51096 +static int __cpuinit cpuup_callback(struct notifier_block *nfb,
51097 +                                   unsigned long action, void *hcpu)
51098 +{
51099 +       long cpu = (long)hcpu;
51100 +       struct kmem_cache *cachep;
51101 +       struct kmem_list3 *l3 = NULL;
51102 +       int node = cpu_to_node(cpu);
51103 +       int memsize = sizeof(struct kmem_list3);
51104 +
51105 +       switch (action) {
51106 +       case CPU_LOCK_ACQUIRE:
51107 +               mutex_lock(&cache_chain_mutex);
51108 +               break;
51109 +       case CPU_UP_PREPARE:
51110 +       case CPU_UP_PREPARE_FROZEN:
51111 +               /*
51112 +                * We need to do this right in the beginning since
51113 +                * alloc_arraycache's are going to use this list.
51114 +                * kmalloc_node allows us to add the slab to the right
51115 +                * kmem_list3 and not this cpu's kmem_list3
51116 +                */
51117 +
51118 +               list_for_each_entry(cachep, &cache_chain, next) {
51119 +                       /*
51120 +                        * Set up the size64 kmemlist for cpu before we can
51121 +                        * begin anything. Make sure some other cpu on this
51122 +                        * node has not already allocated this
51123 +                        */
51124 +                       if (!cachep->nodelists[node]) {
51125 +                               l3 = kmalloc_node(memsize, GFP_KERNEL, node);
51126 +                               if (!l3)
51127 +                                       goto bad;
51128 +                               kmem_list3_init(l3);
51129 +                               l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
51130 +                                   ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
51131 +
51132 +                               /*
51133 +                                * The l3s don't come and go as CPUs come and
51134 +                                * go.  cache_chain_mutex is sufficient
51135 +                                * protection here.
51136 +                                */
51137 +                               cachep->nodelists[node] = l3;
51138 +                       }
51139 +
51140 +                       spin_lock_irq(&cachep->nodelists[node]->list_lock);
51141 +                       cachep->nodelists[node]->free_limit =
51142 +                               (1 + nr_cpus_node(node)) *
51143 +                               cachep->batchcount + cachep->num;
51144 +                       spin_unlock_irq(&cachep->nodelists[node]->list_lock);
51145 +               }
51146 +
51147 +               /*
51148 +                * Now we can go ahead with allocating the shared arrays and
51149 +                * array caches
51150 +                */
51151 +               list_for_each_entry(cachep, &cache_chain, next) {
51152 +                       struct array_cache *nc;
51153 +                       struct array_cache *shared = NULL;
51154 +                       struct array_cache **alien = NULL;
51155 +
51156 +                       nc = alloc_arraycache(node, cachep->limit,
51157 +                                               cachep->batchcount);
51158 +                       if (!nc)
51159 +                               goto bad;
51160 +                       if (cachep->shared) {
51161 +                               shared = alloc_arraycache(node,
51162 +                                       cachep->shared * cachep->batchcount,
51163 +                                       0xbaadf00d);
51164 +                               if (!shared)
51165 +                                       goto bad;
51166 +                       }
51167 +                       if (use_alien_caches) {
51168 +                                alien = alloc_alien_cache(node, cachep->limit);
51169 +                                if (!alien)
51170 +                                        goto bad;
51171 +                        }
51172 +                       cachep->array[cpu] = nc;
51173 +                       l3 = cachep->nodelists[node];
51174 +                       BUG_ON(!l3);
51175 +
51176 +                       spin_lock_irq(&l3->list_lock);
51177 +                       if (!l3->shared) {
51178 +                               /*
51179 +                                * We are serialised from CPU_DEAD or
51180 +                                * CPU_UP_CANCELLED by the cpucontrol lock
51181 +                                */
51182 +                               l3->shared = shared;
51183 +                               shared = NULL;
51184 +                       }
51185 +#ifdef CONFIG_NUMA
51186 +                       if (!l3->alien) {
51187 +                               l3->alien = alien;
51188 +                               alien = NULL;
51189 +                       }
51190 +#endif
51191 +                       spin_unlock_irq(&l3->list_lock);
51192 +                       kfree(shared);
51193 +                       free_alien_cache(alien);
51194 +               }
51195 +               break;
51196 +       case CPU_ONLINE:
51197 +       case CPU_ONLINE_FROZEN:
51198 +               start_cpu_timer(cpu);
51199 +               break;
51200 +#ifdef CONFIG_HOTPLUG_CPU
51201 +       case CPU_DOWN_PREPARE:
51202 +       case CPU_DOWN_PREPARE_FROZEN:
51203 +               /*
51204 +                * Shutdown cache reaper. Note that the cache_chain_mutex is
51205 +                * held so that if cache_reap() is invoked it cannot do
51206 +                * anything expensive but will only modify reap_work
51207 +                * and reschedule the timer.
51208 +               */
51209 +               cancel_rearming_delayed_work(&per_cpu(reap_work, cpu));
51210 +               /* Now the cache_reaper is guaranteed to be not running. */
51211 +               per_cpu(reap_work, cpu).work.func = NULL;
51212 +               break;
51213 +       case CPU_DOWN_FAILED:
51214 +       case CPU_DOWN_FAILED_FROZEN:
51215 +               start_cpu_timer(cpu);
51216 +               break;
51217 +       case CPU_DEAD:
51218 +       case CPU_DEAD_FROZEN:
51219 +               /*
51220 +                * Even if all the cpus of a node are down, we don't free the
51221 +                * kmem_list3 of any cache. This to avoid a race between
51222 +                * cpu_down, and a kmalloc allocation from another cpu for
51223 +                * memory from the node of the cpu going down.  The list3
51224 +                * structure is usually allocated from kmem_cache_create() and
51225 +                * gets destroyed at kmem_cache_destroy().
51226 +                */
51227 +               /* fall thru */
51228 +#endif
51229 +       case CPU_UP_CANCELED:
51230 +       case CPU_UP_CANCELED_FROZEN:
51231 +               list_for_each_entry(cachep, &cache_chain, next) {
51232 +                       struct array_cache *nc;
51233 +                       struct array_cache *shared;
51234 +                       struct array_cache **alien;
51235 +                       cpumask_t mask;
51236 +
51237 +                       mask = node_to_cpumask(node);
51238 +                       /* cpu is dead; no one can alloc from it. */
51239 +                       nc = cachep->array[cpu];
51240 +                       cachep->array[cpu] = NULL;
51241 +                       l3 = cachep->nodelists[node];
51242 +
51243 +                       if (!l3)
51244 +                               goto free_array_cache;
51245 +
51246 +                       spin_lock_irq(&l3->list_lock);
51247 +
51248 +                       /* Free limit for this kmem_list3 */
51249 +                       l3->free_limit -= cachep->batchcount;
51250 +                       if (nc)
51251 +                               free_block(cachep, nc->entry, nc->avail, node);
51252 +
51253 +                       if (!cpus_empty(mask)) {
51254 +                               spin_unlock_irq(&l3->list_lock);
51255 +                               goto free_array_cache;
51256 +                       }
51257 +
51258 +                       shared = l3->shared;
51259 +                       if (shared) {
51260 +                               free_block(cachep, shared->entry,
51261 +                                          shared->avail, node);
51262 +                               l3->shared = NULL;
51263 +                       }
51264 +
51265 +                       alien = l3->alien;
51266 +                       l3->alien = NULL;
51267 +
51268 +                       spin_unlock_irq(&l3->list_lock);
51269 +
51270 +                       kfree(shared);
51271 +                       if (alien) {
51272 +                               drain_alien_cache(cachep, alien);
51273 +                               free_alien_cache(alien);
51274 +                       }
51275 +free_array_cache:
51276 +                       kfree(nc);
51277 +               }
51278 +               /*
51279 +                * In the previous loop, all the objects were freed to
51280 +                * the respective cache's slabs,  now we can go ahead and
51281 +                * shrink each nodelist to its limit.
51282 +                */
51283 +               list_for_each_entry(cachep, &cache_chain, next) {
51284 +                       l3 = cachep->nodelists[node];
51285 +                       if (!l3)
51286 +                               continue;
51287 +                       drain_freelist(cachep, l3, l3->free_objects);
51288 +               }
51289 +               break;
51290 +       case CPU_LOCK_RELEASE:
51291 +               mutex_unlock(&cache_chain_mutex);
51292 +               break;
51293 +       }
51294 +       return NOTIFY_OK;
51295 +bad:
51296 +       return NOTIFY_BAD;
51297 +}
51298 +
51299 +static struct notifier_block __cpuinitdata cpucache_notifier = {
51300 +       &cpuup_callback, NULL, 0
51301 +};
51302 +
51303 +/*
51304 + * swap the static kmem_list3 with kmalloced memory
51305 + */
51306 +static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
51307 +                       int nodeid)
51308 +{
51309 +       struct kmem_list3 *ptr;
51310 +
51311 +       ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
51312 +       BUG_ON(!ptr);
51313 +
51314 +       local_irq_disable();
51315 +       memcpy(ptr, list, sizeof(struct kmem_list3));
51316 +       /*
51317 +        * Do not assume that spinlocks can be initialized via memcpy:
51318 +        */
51319 +       spin_lock_init(&ptr->list_lock);
51320 +
51321 +       MAKE_ALL_LISTS(cachep, ptr, nodeid);
51322 +       cachep->nodelists[nodeid] = ptr;
51323 +       local_irq_enable();
51324 +}
51325 +
51326 +/*
51327 + * Initialisation.  Called after the page allocator have been initialised and
51328 + * before smp_init().
51329 + */
51330 +void __init kmem_cache_init(void)
51331 +{
51332 +       size_t left_over;
51333 +       struct cache_sizes *sizes;
51334 +       struct cache_names *names;
51335 +       int i;
51336 +       int order;
51337 +       int node;
51338 +
51339 +       if (num_possible_nodes() == 1)
51340 +               use_alien_caches = 0;
51341 +
51342 +       for (i = 0; i < NUM_INIT_LISTS; i++) {
51343 +               kmem_list3_init(&initkmem_list3[i]);
51344 +               if (i < MAX_NUMNODES)
51345 +                       cache_cache.nodelists[i] = NULL;
51346 +       }
51347 +
51348 +       /*
51349 +        * Fragmentation resistance on low memory - only use bigger
51350 +        * page orders on machines with more than 32MB of memory.
51351 +        */
51352 +       if (num_physpages > (32 << 20) >> PAGE_SHIFT)
51353 +               slab_break_gfp_order = BREAK_GFP_ORDER_HI;
51354 +
51355 +       /* Bootstrap is tricky, because several objects are allocated
51356 +        * from caches that do not exist yet:
51357 +        * 1) initialize the cache_cache cache: it contains the struct
51358 +        *    kmem_cache structures of all caches, except cache_cache itself:
51359 +        *    cache_cache is statically allocated.
51360 +        *    Initially an __init data area is used for the head array and the
51361 +        *    kmem_list3 structures, it's replaced with a kmalloc allocated
51362 +        *    array at the end of the bootstrap.
51363 +        * 2) Create the first kmalloc cache.
51364 +        *    The struct kmem_cache for the new cache is allocated normally.
51365 +        *    An __init data area is used for the head array.
51366 +        * 3) Create the remaining kmalloc caches, with minimally sized
51367 +        *    head arrays.
51368 +        * 4) Replace the __init data head arrays for cache_cache and the first
51369 +        *    kmalloc cache with kmalloc allocated arrays.
51370 +        * 5) Replace the __init data for kmem_list3 for cache_cache and
51371 +        *    the other cache's with kmalloc allocated memory.
51372 +        * 6) Resize the head arrays of the kmalloc caches to their final sizes.
51373 +        */
51374 +
51375 +       node = numa_node_id();
51376 +
51377 +       /* 1) create the cache_cache */
51378 +       INIT_LIST_HEAD(&cache_chain);
51379 +       list_add(&cache_cache.next, &cache_chain);
51380 +       cache_cache.colour_off = cache_line_size();
51381 +       cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
51382 +       cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];
51383 +
51384 +       /*
51385 +        * struct kmem_cache size depends on nr_node_ids, which
51386 +        * can be less than MAX_NUMNODES.
51387 +        */
51388 +       cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
51389 +                                nr_node_ids * sizeof(struct kmem_list3 *);
51390 +#if DEBUG
51391 +       cache_cache.obj_size = cache_cache.buffer_size;
51392 +#endif
51393 +       cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
51394 +                                       cache_line_size());
51395 +       cache_cache.reciprocal_buffer_size =
51396 +               reciprocal_value(cache_cache.buffer_size);
51397 +
51398 +       for (order = 0; order < MAX_ORDER; order++) {
51399 +               cache_estimate(order, cache_cache.buffer_size,
51400 +                       cache_line_size(), 0, &left_over, &cache_cache.num);
51401 +               if (cache_cache.num)
51402 +                       break;
51403 +       }
51404 +       BUG_ON(!cache_cache.num);
51405 +       cache_cache.gfporder = order;
51406 +       cache_cache.colour = left_over / cache_cache.colour_off;
51407 +       cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
51408 +                                     sizeof(struct slab), cache_line_size());
51409 +
51410 +       /* 2+3) create the kmalloc caches */
51411 +       sizes = malloc_sizes;
51412 +       names = cache_names;
51413 +
51414 +       /*
51415 +        * Initialize the caches that provide memory for the array cache and the
51416 +        * kmem_list3 structures first.  Without this, further allocations will
51417 +        * bug.
51418 +        */
51419 +
51420 +       sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
51421 +                                       sizes[INDEX_AC].cs_size,
51422 +                                       ARCH_KMALLOC_MINALIGN,
51423 +                                       ARCH_KMALLOC_FLAGS|SLAB_PANIC,
51424 +                                       NULL, NULL);
51425 +
51426 +       if (INDEX_AC != INDEX_L3) {
51427 +               sizes[INDEX_L3].cs_cachep =
51428 +                       kmem_cache_create(names[INDEX_L3].name,
51429 +                               sizes[INDEX_L3].cs_size,
51430 +                               ARCH_KMALLOC_MINALIGN,
51431 +                               ARCH_KMALLOC_FLAGS|SLAB_PANIC,
51432 +                               NULL, NULL);
51433 +       }
51434 +
51435 +       slab_early_init = 0;
51436 +
51437 +       while (sizes->cs_size != ULONG_MAX) {
51438 +               /*
51439 +                * For performance, all the general caches are L1 aligned.
51440 +                * This should be particularly beneficial on SMP boxes, as it
51441 +                * eliminates "false sharing".
51442 +                * Note for systems short on memory removing the alignment will
51443 +                * allow tighter packing of the smaller caches.
51444 +                */
51445 +               if (!sizes->cs_cachep) {
51446 +                       sizes->cs_cachep = kmem_cache_create(names->name,
51447 +                                       sizes->cs_size,
51448 +                                       ARCH_KMALLOC_MINALIGN,
51449 +                                       ARCH_KMALLOC_FLAGS|SLAB_PANIC,
51450 +                                       NULL, NULL);
51451 +               }
51452 +#ifdef CONFIG_ZONE_DMA
51453 +               sizes->cs_dmacachep = kmem_cache_create(
51454 +                                       names->name_dma,
51455 +                                       sizes->cs_size,
51456 +                                       ARCH_KMALLOC_MINALIGN,
51457 +                                       ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
51458 +                                               SLAB_PANIC,
51459 +                                       NULL, NULL);
51460 +#endif
51461 +               sizes++;
51462 +               names++;
51463 +       }
51464 +       /* 4) Replace the bootstrap head arrays */
51465 +       {
51466 +               struct array_cache *ptr;
51467 +
51468 +               ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
51469 +
51470 +               local_irq_disable();
51471 +               BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
51472 +               memcpy(ptr, cpu_cache_get(&cache_cache),
51473 +                      sizeof(struct arraycache_init));
51474 +               /*
51475 +                * Do not assume that spinlocks can be initialized via memcpy:
51476 +                */
51477 +               spin_lock_init(&ptr->lock);
51478 +
51479 +               cache_cache.array[smp_processor_id()] = ptr;
51480 +               local_irq_enable();
51481 +
51482 +               ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
51483 +
51484 +               local_irq_disable();
51485 +               BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
51486 +                      != &initarray_generic.cache);
51487 +               memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
51488 +                      sizeof(struct arraycache_init));
51489 +               /*
51490 +                * Do not assume that spinlocks can be initialized via memcpy:
51491 +                */
51492 +               spin_lock_init(&ptr->lock);
51493 +
51494 +               malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
51495 +                   ptr;
51496 +               local_irq_enable();
51497 +       }
51498 +       /* 5) Replace the bootstrap kmem_list3's */
51499 +       {
51500 +               int nid;
51501 +
51502 +               /* Replace the static kmem_list3 structures for the boot cpu */
51503 +               init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node);
51504 +
51505 +               for_each_online_node(nid) {
51506 +                       init_list(malloc_sizes[INDEX_AC].cs_cachep,
51507 +                                 &initkmem_list3[SIZE_AC + nid], nid);
51508 +
51509 +                       if (INDEX_AC != INDEX_L3) {
51510 +                               init_list(malloc_sizes[INDEX_L3].cs_cachep,
51511 +                                         &initkmem_list3[SIZE_L3 + nid], nid);
51512 +                       }
51513 +               }
51514 +       }
51515 +
51516 +       /* 6) resize the head arrays to their final sizes */
51517 +       {
51518 +               struct kmem_cache *cachep;
51519 +               mutex_lock(&cache_chain_mutex);
51520 +               list_for_each_entry(cachep, &cache_chain, next)
51521 +                       if (enable_cpucache(cachep))
51522 +                               BUG();
51523 +               mutex_unlock(&cache_chain_mutex);
51524 +       }
51525 +
51526 +       /* Annotate slab for lockdep -- annotate the malloc caches */
51527 +       init_lock_keys();
51528 +
51529 +
51530 +       /* Done! */
51531 +       g_cpucache_up = FULL;
51532 +
51533 +       /*
51534 +        * Register a cpu startup notifier callback that initializes
51535 +        * cpu_cache_get for all new cpus
51536 +        */
51537 +       register_cpu_notifier(&cpucache_notifier);
51538 +
51539 +       /*
51540 +        * The reap timers are started later, with a module init call: That part
51541 +        * of the kernel is not yet operational.
51542 +        */
51543 +}
51544 +
51545 +static int __init cpucache_init(void)
51546 +{
51547 +       int cpu;
51548 +
51549 +       /*
51550 +        * Register the timers that return unneeded pages to the page allocator
51551 +        */
51552 +       for_each_online_cpu(cpu)
51553 +               start_cpu_timer(cpu);
51554 +       return 0;
51555 +}
51556 +__initcall(cpucache_init);
51557 +
51558 +/*
51559 + * Interface to system's page allocator. No need to hold the cache-lock.
51560 + *
51561 + * If we requested dmaable memory, we will get it. Even if we
51562 + * did not request dmaable memory, we might get it, but that
51563 + * would be relatively rare and ignorable.
51564 + */
51565 +static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
51566 +{
51567 +       struct page *page;
51568 +       int nr_pages;
51569 +       int i;
51570 +
51571 +#ifndef CONFIG_MMU
51572 +       /*
51573 +        * Nommu uses slab's for process anonymous memory allocations, and thus
51574 +        * requires __GFP_COMP to properly refcount higher order allocations
51575 +        */
51576 +       flags |= __GFP_COMP;
51577 +#endif
51578 +
51579 +       flags |= cachep->gfpflags;
51580 +
51581 +       page = alloc_pages_node(nodeid, flags, cachep->gfporder);
51582 +       if (!page)
51583 +               return NULL;
51584 +
51585 +       nr_pages = (1 << cachep->gfporder);
51586 +       if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
51587 +               add_zone_page_state(page_zone(page),
51588 +                       NR_SLAB_RECLAIMABLE, nr_pages);
51589 +       else
51590 +               add_zone_page_state(page_zone(page),
51591 +                       NR_SLAB_UNRECLAIMABLE, nr_pages);
51592 +       for (i = 0; i < nr_pages; i++)
51593 +               __SetPageSlab(page + i);
51594 +       return page_address(page);
51595 +}
51596 +
51597 +/*
51598 + * Interface to system's page release.
51599 + */
51600 +static void kmem_freepages(struct kmem_cache *cachep, void *addr)
51601 +{
51602 +       unsigned long i = (1 << cachep->gfporder);
51603 +       struct page *page = virt_to_page(addr);
51604 +       const unsigned long nr_freed = i;
51605 +
51606 +       if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
51607 +               sub_zone_page_state(page_zone(page),
51608 +                               NR_SLAB_RECLAIMABLE, nr_freed);
51609 +       else
51610 +               sub_zone_page_state(page_zone(page),
51611 +                               NR_SLAB_UNRECLAIMABLE, nr_freed);
51612 +       while (i--) {
51613 +               BUG_ON(!PageSlab(page));
51614 +               __ClearPageSlab(page);
51615 +               page++;
51616 +       }
51617 +       if (current->reclaim_state)
51618 +               current->reclaim_state->reclaimed_slab += nr_freed;
51619 +       free_pages((unsigned long)addr, cachep->gfporder);
51620 +}
51621 +
51622 +static void kmem_rcu_free(struct rcu_head *head)
51623 +{
51624 +       struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
51625 +       struct kmem_cache *cachep = slab_rcu->cachep;
51626 +
51627 +       kmem_freepages(cachep, slab_rcu->addr);
51628 +       if (OFF_SLAB(cachep))
51629 +               kmem_cache_free(cachep->slabp_cache, slab_rcu);
51630 +}
51631 +
51632 +#if DEBUG
51633 +
51634 +#ifdef CONFIG_DEBUG_PAGEALLOC
51635 +static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
51636 +                           unsigned long caller)
51637 +{
51638 +       int size = obj_size(cachep);
51639 +
51640 +       addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
51641 +
51642 +       if (size < 5 * sizeof(unsigned long))
51643 +               return;
51644 +
51645 +       *addr++ = 0x12345678;
51646 +       *addr++ = caller;
51647 +       *addr++ = smp_processor_id();
51648 +       size -= 3 * sizeof(unsigned long);
51649 +       {
51650 +               unsigned long *sptr = &caller;
51651 +               unsigned long svalue;
51652 +
51653 +               while (!kstack_end(sptr)) {
51654 +                       svalue = *sptr++;
51655 +                       if (kernel_text_address(svalue)) {
51656 +                               *addr++ = svalue;
51657 +                               size -= sizeof(unsigned long);
51658 +                               if (size <= sizeof(unsigned long))
51659 +                                       break;
51660 +                       }
51661 +               }
51662 +
51663 +       }
51664 +       *addr++ = 0x87654321;
51665 +}
51666 +#endif
51667 +
51668 +static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
51669 +{
51670 +       int size = obj_size(cachep);
51671 +       addr = &((char *)addr)[obj_offset(cachep)];
51672 +
51673 +       memset(addr, val, size);
51674 +       *(unsigned char *)(addr + size - 1) = POISON_END;
51675 +}
51676 +
51677 +static void dump_line(char *data, int offset, int limit)
51678 +{
51679 +       int i;
51680 +       unsigned char error = 0;
51681 +       int bad_count = 0;
51682 +
51683 +       printk(KERN_ERR "%03x:", offset);
51684 +       for (i = 0; i < limit; i++) {
51685 +               if (data[offset + i] != POISON_FREE) {
51686 +                       error = data[offset + i];
51687 +                       bad_count++;
51688 +               }
51689 +               printk(" %02x", (unsigned char)data[offset + i]);
51690 +       }
51691 +       printk("\n");
51692 +
51693 +       if (bad_count == 1) {
51694 +               error ^= POISON_FREE;
51695 +               if (!(error & (error - 1))) {
51696 +                       printk(KERN_ERR "Single bit error detected. Probably "
51697 +                                       "bad RAM.\n");
51698 +#ifdef CONFIG_X86
51699 +                       printk(KERN_ERR "Run memtest86+ or a similar memory "
51700 +                                       "test tool.\n");
51701 +#else
51702 +                       printk(KERN_ERR "Run a memory test tool.\n");
51703 +#endif
51704 +               }
51705 +       }
51706 +}
51707 +#endif
51708 +
51709 +#if DEBUG
51710 +
51711 +static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
51712 +{
51713 +       int i, size;
51714 +       char *realobj;
51715 +
51716 +       if (cachep->flags & SLAB_RED_ZONE) {
51717 +               printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
51718 +                       *dbg_redzone1(cachep, objp),
51719 +                       *dbg_redzone2(cachep, objp));
51720 +       }
51721 +
51722 +       if (cachep->flags & SLAB_STORE_USER) {
51723 +               printk(KERN_ERR "Last user: [<%p>]",
51724 +                       *dbg_userword(cachep, objp));
51725 +               print_symbol("(%s)",
51726 +                               (unsigned long)*dbg_userword(cachep, objp));
51727 +               printk("\n");
51728 +       }
51729 +       realobj = (char *)objp + obj_offset(cachep);
51730 +       size = obj_size(cachep);
51731 +       for (i = 0; i < size && lines; i += 16, lines--) {
51732 +               int limit;
51733 +               limit = 16;
51734 +               if (i + limit > size)
51735 +                       limit = size - i;
51736 +               dump_line(realobj, i, limit);
51737 +       }
51738 +}
51739 +
51740 +static void check_poison_obj(struct kmem_cache *cachep, void *objp)
51741 +{
51742 +       char *realobj;
51743 +       int size, i;
51744 +       int lines = 0;
51745 +
51746 +       realobj = (char *)objp + obj_offset(cachep);
51747 +       size = obj_size(cachep);
51748 +
51749 +       for (i = 0; i < size; i++) {
51750 +               char exp = POISON_FREE;
51751 +               if (i == size - 1)
51752 +                       exp = POISON_END;
51753 +               if (realobj[i] != exp) {
51754 +                       int limit;
51755 +                       /* Mismatch ! */
51756 +                       /* Print header */
51757 +                       if (lines == 0) {
51758 +                               printk(KERN_ERR
51759 +                                       "Slab corruption: %s start=%p, len=%d\n",
51760 +                                       cachep->name, realobj, size);
51761 +                               print_objinfo(cachep, objp, 0);
51762 +                       }
51763 +                       /* Hexdump the affected line */
51764 +                       i = (i / 16) * 16;
51765 +                       limit = 16;
51766 +                       if (i + limit > size)
51767 +                               limit = size - i;
51768 +                       dump_line(realobj, i, limit);
51769 +                       i += 16;
51770 +                       lines++;
51771 +                       /* Limit to 5 lines */
51772 +                       if (lines > 5)
51773 +                               break;
51774 +               }
51775 +       }
51776 +       if (lines != 0) {
51777 +               /* Print some data about the neighboring objects, if they
51778 +                * exist:
51779 +                */
51780 +               struct slab *slabp = virt_to_slab(objp);
51781 +               unsigned int objnr;
51782 +
51783 +               objnr = obj_to_index(cachep, slabp, objp);
51784 +               if (objnr) {
51785 +                       objp = index_to_obj(cachep, slabp, objnr - 1);
51786 +                       realobj = (char *)objp + obj_offset(cachep);
51787 +                       printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
51788 +                              realobj, size);
51789 +                       print_objinfo(cachep, objp, 2);
51790 +               }
51791 +               if (objnr + 1 < cachep->num) {
51792 +                       objp = index_to_obj(cachep, slabp, objnr + 1);
51793 +                       realobj = (char *)objp + obj_offset(cachep);
51794 +                       printk(KERN_ERR "Next obj: start=%p, len=%d\n",
51795 +                              realobj, size);
51796 +                       print_objinfo(cachep, objp, 2);
51797 +               }
51798 +       }
51799 +}
51800 +#endif
51801 +
51802 +#if DEBUG
51803 +/**
51804 + * slab_destroy_objs - destroy a slab and its objects
51805 + * @cachep: cache pointer being destroyed
51806 + * @slabp: slab pointer being destroyed
51807 + *
51808 + * Call the registered destructor for each object in a slab that is being
51809 + * destroyed.
51810 + */
51811 +static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
51812 +{
51813 +       int i;
51814 +       for (i = 0; i < cachep->num; i++) {
51815 +               void *objp = index_to_obj(cachep, slabp, i);
51816 +
51817 +               if (cachep->flags & SLAB_POISON) {
51818 +#ifdef CONFIG_DEBUG_PAGEALLOC
51819 +                       if (cachep->buffer_size % PAGE_SIZE == 0 &&
51820 +                                       OFF_SLAB(cachep))
51821 +                               kernel_map_pages(virt_to_page(objp),
51822 +                                       cachep->buffer_size / PAGE_SIZE, 1);
51823 +                       else
51824 +                               check_poison_obj(cachep, objp);
51825 +#else
51826 +                       check_poison_obj(cachep, objp);
51827 +#endif
51828 +               }
51829 +               if (cachep->flags & SLAB_RED_ZONE) {
51830 +                       if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
51831 +                               slab_error(cachep, "start of a freed object "
51832 +                                          "was overwritten");
51833 +                       if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
51834 +                               slab_error(cachep, "end of a freed object "
51835 +                                          "was overwritten");
51836 +               }
51837 +       }
51838 +}
51839 +#else
51840 +static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
51841 +{
51842 +}
51843 +#endif
51844 +
51845 +/**
51846 + * slab_destroy - destroy and release all objects in a slab
51847 + * @cachep: cache pointer being destroyed
51848 + * @slabp: slab pointer being destroyed
51849 + *
51850 + * Destroy all the objs in a slab, and release the mem back to the system.
51851 + * Before calling the slab must have been unlinked from the cache.  The
51852 + * cache-lock is not held/needed.
51853 + */
51854 +static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
51855 +{
51856 +       void *addr = slabp->s_mem - slabp->colouroff;
51857 +
51858 +       slab_destroy_objs(cachep, slabp);
51859 +       if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
51860 +               struct slab_rcu *slab_rcu;
51861 +
51862 +               slab_rcu = (struct slab_rcu *)slabp;
51863 +               slab_rcu->cachep = cachep;
51864 +               slab_rcu->addr = addr;
51865 +               call_rcu(&slab_rcu->head, kmem_rcu_free);
51866 +       } else {
51867 +               kmem_freepages(cachep, addr);
51868 +               if (OFF_SLAB(cachep))
51869 +                       kmem_cache_free(cachep->slabp_cache, slabp);
51870 +       }
51871 +}
51872 +
51873 +/*
51874 + * For setting up all the kmem_list3s for cache whose buffer_size is same as
51875 + * size of kmem_list3.
51876 + */
51877 +static void __init set_up_list3s(struct kmem_cache *cachep, int index)
51878 +{
51879 +       int node;
51880 +
51881 +       for_each_online_node(node) {
51882 +               cachep->nodelists[node] = &initkmem_list3[index + node];
51883 +               cachep->nodelists[node]->next_reap = jiffies +
51884 +                   REAPTIMEOUT_LIST3 +
51885 +                   ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
51886 +       }
51887 +}
51888 +
51889 +static void __kmem_cache_destroy(struct kmem_cache *cachep)
51890 +{
51891 +       int i;
51892 +       struct kmem_list3 *l3;
51893 +
51894 +       for_each_online_cpu(i)
51895 +           kfree(cachep->array[i]);
51896 +
51897 +       /* NUMA: free the list3 structures */
51898 +       for_each_online_node(i) {
51899 +               l3 = cachep->nodelists[i];
51900 +               if (l3) {
51901 +                       kfree(l3->shared);
51902 +                       free_alien_cache(l3->alien);
51903 +                       kfree(l3);
51904 +               }
51905 +       }
51906 +       kmem_cache_free(&cache_cache, cachep);
51907 +}
51908 +
51909 +
51910 +/**
51911 + * calculate_slab_order - calculate size (page order) of slabs
51912 + * @cachep: pointer to the cache that is being created
51913 + * @size: size of objects to be created in this cache.
51914 + * @align: required alignment for the objects.
51915 + * @flags: slab allocation flags
51916 + *
51917 + * Also calculates the number of objects per slab.
51918 + *
51919 + * This could be made much more intelligent.  For now, try to avoid using
51920 + * high order pages for slabs.  When the gfp() functions are more friendly
51921 + * towards high-order requests, this should be changed.
51922 + */
51923 +static size_t calculate_slab_order(struct kmem_cache *cachep,
51924 +                       size_t size, size_t align, unsigned long flags)
51925 +{
51926 +       unsigned long offslab_limit;
51927 +       size_t left_over = 0;
51928 +       int gfporder;
51929 +
51930 +       for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
51931 +               unsigned int num;
51932 +               size_t remainder;
51933 +
51934 +               cache_estimate(gfporder, size, align, flags, &remainder, &num);
51935 +               if (!num)
51936 +                       continue;
51937 +
51938 +               if (flags & CFLGS_OFF_SLAB) {
51939 +                       /*
51940 +                        * Max number of objs-per-slab for caches which
51941 +                        * use off-slab slabs. Needed to avoid a possible
51942 +                        * looping condition in cache_grow().
51943 +                        */
51944 +                       offslab_limit = size - sizeof(struct slab);
51945 +                       offslab_limit /= sizeof(kmem_bufctl_t);
51946 +
51947 +                       if (num > offslab_limit)
51948 +                               break;
51949 +               }
51950 +
51951 +               /* Found something acceptable - save it away */
51952 +               cachep->num = num;
51953 +               cachep->gfporder = gfporder;
51954 +               left_over = remainder;
51955 +
51956 +               /*
51957 +                * A VFS-reclaimable slab tends to have most allocations
51958 +                * as GFP_NOFS and we really don't want to have to be allocating
51959 +                * higher-order pages when we are unable to shrink dcache.
51960 +                */
51961 +               if (flags & SLAB_RECLAIM_ACCOUNT)
51962 +                       break;
51963 +
51964 +               /*
51965 +                * Large number of objects is good, but very large slabs are
51966 +                * currently bad for the gfp()s.
51967 +                */
51968 +               if (gfporder >= slab_break_gfp_order)
51969 +                       break;
51970 +
51971 +               /*
51972 +                * Acceptable internal fragmentation?
51973 +                */
51974 +               if (left_over * 8 <= (PAGE_SIZE << gfporder))
51975 +                       break;
51976 +       }
51977 +       return left_over;
51978 +}
51979 +
51980 +static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
51981 +{
51982 +       if (g_cpucache_up == FULL)
51983 +               return enable_cpucache(cachep);
51984 +
51985 +       if (g_cpucache_up == NONE) {
51986 +               /*
51987 +                * Note: the first kmem_cache_create must create the cache
51988 +                * that's used by kmalloc(24), otherwise the creation of
51989 +                * further caches will BUG().
51990 +                */
51991 +               cachep->array[smp_processor_id()] = &initarray_generic.cache;
51992 +
51993 +               /*
51994 +                * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
51995 +                * the first cache, then we need to set up all its list3s,
51996 +                * otherwise the creation of further caches will BUG().
51997 +                */
51998 +               set_up_list3s(cachep, SIZE_AC);
51999 +               if (INDEX_AC == INDEX_L3)
52000 +                       g_cpucache_up = PARTIAL_L3;
52001 +               else
52002 +                       g_cpucache_up = PARTIAL_AC;
52003 +       } else {
52004 +               cachep->array[smp_processor_id()] =
52005 +                       kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
52006 +
52007 +               if (g_cpucache_up == PARTIAL_AC) {
52008 +                       set_up_list3s(cachep, SIZE_L3);
52009 +                       g_cpucache_up = PARTIAL_L3;
52010 +               } else {
52011 +                       int node;
52012 +                       for_each_online_node(node) {
52013 +                               cachep->nodelists[node] =
52014 +                                   kmalloc_node(sizeof(struct kmem_list3),
52015 +                                               GFP_KERNEL, node);
52016 +                               BUG_ON(!cachep->nodelists[node]);
52017 +                               kmem_list3_init(cachep->nodelists[node]);
52018 +                       }
52019 +               }
52020 +       }
52021 +       cachep->nodelists[numa_node_id()]->next_reap =
52022 +                       jiffies + REAPTIMEOUT_LIST3 +
52023 +                       ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
52024 +
52025 +       cpu_cache_get(cachep)->avail = 0;
52026 +       cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
52027 +       cpu_cache_get(cachep)->batchcount = 1;
52028 +       cpu_cache_get(cachep)->touched = 0;
52029 +       cachep->batchcount = 1;
52030 +       cachep->limit = BOOT_CPUCACHE_ENTRIES;
52031 +       return 0;
52032 +}
52033 +
52034 +/**
52035 + * kmem_cache_create - Create a cache.
52036 + * @name: A string which is used in /proc/slabinfo to identify this cache.
52037 + * @size: The size of objects to be created in this cache.
52038 + * @align: The required alignment for the objects.
52039 + * @flags: SLAB flags
52040 + * @ctor: A constructor for the objects.
52041 + * @dtor: A destructor for the objects (not implemented anymore).
52042 + *
52043 + * Returns a ptr to the cache on success, NULL on failure.
52044 + * Cannot be called within a int, but can be interrupted.
52045 + * The @ctor is run when new pages are allocated by the cache
52046 + * and the @dtor is run before the pages are handed back.
52047 + *
52048 + * @name must be valid until the cache is destroyed. This implies that
52049 + * the module calling this has to destroy the cache before getting unloaded.
52050 + *
52051 + * The flags are
52052 + *
52053 + * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
52054 + * to catch references to uninitialised memory.
52055 + *
52056 + * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
52057 + * for buffer overruns.
52058 + *
52059 + * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
52060 + * cacheline.  This can be beneficial if you're counting cycles as closely
52061 + * as davem.
52062 + */
52063 +struct kmem_cache *
52064 +kmem_cache_create (const char *name, size_t size, size_t align,
52065 +       unsigned long flags,
52066 +       void (*ctor)(void*, struct kmem_cache *, unsigned long),
52067 +       void (*dtor)(void*, struct kmem_cache *, unsigned long))
52068 +{
52069 +       size_t left_over, slab_size, ralign;
52070 +       struct kmem_cache *cachep = NULL, *pc;
52071 +
52072 +       /*
52073 +        * Sanity checks... these are all serious usage bugs.
52074 +        */
52075 +       if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
52076 +           size > KMALLOC_MAX_SIZE || dtor) {
52077 +               printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
52078 +                               name);
52079 +               BUG();
52080 +       }
52081 +
52082 +       /*
52083 +        * We use cache_chain_mutex to ensure a consistent view of
52084 +        * cpu_online_map as well.  Please see cpuup_callback
52085 +        */
52086 +       mutex_lock(&cache_chain_mutex);
52087 +
52088 +       list_for_each_entry(pc, &cache_chain, next) {
52089 +               char tmp;
52090 +               int res;
52091 +
52092 +               /*
52093 +                * This happens when the module gets unloaded and doesn't
52094 +                * destroy its slab cache and no-one else reuses the vmalloc
52095 +                * area of the module.  Print a warning.
52096 +                */
52097 +               res = probe_kernel_address(pc->name, tmp);
52098 +               if (res) {
52099 +                       printk(KERN_ERR
52100 +                              "SLAB: cache with size %d has lost its name\n",
52101 +                              pc->buffer_size);
52102 +                       continue;
52103 +               }
52104 +
52105 +               if (!strcmp(pc->name, name)) {
52106 +                       printk(KERN_ERR
52107 +                              "kmem_cache_create: duplicate cache %s\n", name);
52108 +                       dump_stack();
52109 +                       goto oops;
52110 +               }
52111 +       }
52112 +
52113 +#if DEBUG
52114 +       WARN_ON(strchr(name, ' '));     /* It confuses parsers */
52115 +#if FORCED_DEBUG
52116 +       /*
52117 +        * Enable redzoning and last user accounting, except for caches with
52118 +        * large objects, if the increased size would increase the object size
52119 +        * above the next power of two: caches with object sizes just above a
52120 +        * power of two have a significant amount of internal fragmentation.
52121 +        */
52122 +       if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
52123 +                                               2 * sizeof(unsigned long long)))
52124 +               flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
52125 +       if (!(flags & SLAB_DESTROY_BY_RCU))
52126 +               flags |= SLAB_POISON;
52127 +#endif
52128 +       if (flags & SLAB_DESTROY_BY_RCU)
52129 +               BUG_ON(flags & SLAB_POISON);
52130 +#endif
52131 +       /*
52132 +        * Always checks flags, a caller might be expecting debug support which
52133 +        * isn't available.
52134 +        */
52135 +       BUG_ON(flags & ~CREATE_MASK);
52136 +
52137 +       /*
52138 +        * Check that size is in terms of words.  This is needed to avoid
52139 +        * unaligned accesses for some archs when redzoning is used, and makes
52140 +        * sure any on-slab bufctl's are also correctly aligned.
52141 +        */
52142 +       if (size & (BYTES_PER_WORD - 1)) {
52143 +               size += (BYTES_PER_WORD - 1);
52144 +               size &= ~(BYTES_PER_WORD - 1);
52145 +       }
52146 +
52147 +       /* calculate the final buffer alignment: */
52148 +
52149 +       /* 1) arch recommendation: can be overridden for debug */
52150 +       if (flags & SLAB_HWCACHE_ALIGN) {
52151 +               /*
52152 +                * Default alignment: as specified by the arch code.  Except if
52153 +                * an object is really small, then squeeze multiple objects into
52154 +                * one cacheline.
52155 +                */
52156 +               ralign = cache_line_size();
52157 +               while (size <= ralign / 2)
52158 +                       ralign /= 2;
52159 +       } else {
52160 +               ralign = BYTES_PER_WORD;
52161 +       }
52162 +
52163 +       /*
52164 +        * Redzoning and user store require word alignment or possibly larger.
52165 +        * Note this will be overridden by architecture or caller mandated
52166 +        * alignment if either is greater than BYTES_PER_WORD.
52167 +        */
52168 +       if (flags & SLAB_STORE_USER)
52169 +               ralign = BYTES_PER_WORD;
52170 +
52171 +       if (flags & SLAB_RED_ZONE) {
52172 +               ralign = REDZONE_ALIGN;
52173 +               /* If redzoning, ensure that the second redzone is suitably
52174 +                * aligned, by adjusting the object size accordingly. */
52175 +               size += REDZONE_ALIGN - 1;
52176 +               size &= ~(REDZONE_ALIGN - 1);
52177 +       }
52178 +
52179 +       /* 2) arch mandated alignment */
52180 +       if (ralign < ARCH_SLAB_MINALIGN) {
52181 +               ralign = ARCH_SLAB_MINALIGN;
52182 +       }
52183 +       /* 3) caller mandated alignment */
52184 +       if (ralign < align) {
52185 +               ralign = align;
52186 +       }
52187 +       /* disable debug if necessary */
52188 +       if (ralign > __alignof__(unsigned long long))
52189 +               flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
52190 +       /*
52191 +        * 4) Store it.
52192 +        */
52193 +       align = ralign;
52194 +
52195 +       /* Get cache's description obj. */
52196 +       cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL);
52197 +       if (!cachep)
52198 +               goto oops;
52199 +
52200 +#if DEBUG
52201 +       cachep->obj_size = size;
52202 +
52203 +       /*
52204 +        * Both debugging options require word-alignment which is calculated
52205 +        * into align above.
52206 +        */
52207 +       if (flags & SLAB_RED_ZONE) {
52208 +               /* add space for red zone words */
52209 +               cachep->obj_offset += sizeof(unsigned long long);
52210 +               size += 2 * sizeof(unsigned long long);
52211 +       }
52212 +       if (flags & SLAB_STORE_USER) {
52213 +               /* user store requires one word storage behind the end of
52214 +                * the real object. But if the second red zone needs to be
52215 +                * aligned to 64 bits, we must allow that much space.
52216 +                */
52217 +               if (flags & SLAB_RED_ZONE)
52218 +                       size += REDZONE_ALIGN;
52219 +               else
52220 +                       size += BYTES_PER_WORD;
52221 +       }
52222 +#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
52223 +       if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
52224 +           && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
52225 +               cachep->obj_offset += PAGE_SIZE - size;
52226 +               size = PAGE_SIZE;
52227 +       }
52228 +#endif
52229 +#endif
52230 +
52231 +       /*
52232 +        * Determine if the slab management is 'on' or 'off' slab.
52233 +        * (bootstrapping cannot cope with offslab caches so don't do
52234 +        * it too early on.)
52235 +        */
52236 +       if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
52237 +               /*
52238 +                * Size is large, assume best to place the slab management obj
52239 +                * off-slab (should allow better packing of objs).
52240 +                */
52241 +               flags |= CFLGS_OFF_SLAB;
52242 +
52243 +       size = ALIGN(size, align);
52244 +
52245 +       left_over = calculate_slab_order(cachep, size, align, flags);
52246 +
52247 +       if (!cachep->num) {
52248 +               printk(KERN_ERR
52249 +                      "kmem_cache_create: couldn't create cache %s.\n", name);
52250 +               kmem_cache_free(&cache_cache, cachep);
52251 +               cachep = NULL;
52252 +               goto oops;
52253 +       }
52254 +       slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
52255 +                         + sizeof(struct slab), align);
52256 +
52257 +       /*
52258 +        * If the slab has been placed off-slab, and we have enough space then
52259 +        * move it on-slab. This is at the expense of any extra colouring.
52260 +        */
52261 +       if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
52262 +               flags &= ~CFLGS_OFF_SLAB;
52263 +               left_over -= slab_size;
52264 +       }
52265 +
52266 +       if (flags & CFLGS_OFF_SLAB) {
52267 +               /* really off slab. No need for manual alignment */
52268 +               slab_size =
52269 +                   cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
52270 +       }
52271 +
52272 +       cachep->colour_off = cache_line_size();
52273 +       /* Offset must be a multiple of the alignment. */
52274 +       if (cachep->colour_off < align)
52275 +               cachep->colour_off = align;
52276 +       cachep->colour = left_over / cachep->colour_off;
52277 +       cachep->slab_size = slab_size;
52278 +       cachep->flags = flags;
52279 +       cachep->gfpflags = 0;
52280 +       if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
52281 +               cachep->gfpflags |= GFP_DMA;
52282 +       cachep->buffer_size = size;
52283 +       cachep->reciprocal_buffer_size = reciprocal_value(size);
52284 +
52285 +       if (flags & CFLGS_OFF_SLAB) {
52286 +               cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
52287 +               /*
52288 +                * This is a possibility for one of the malloc_sizes caches.
52289 +                * But since we go off slab only for object size greater than
52290 +                * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
52291 +                * this should not happen at all.
52292 +                * But leave a BUG_ON for some lucky dude.
52293 +                */
52294 +               BUG_ON(!cachep->slabp_cache);
52295 +       }
52296 +       cachep->ctor = ctor;
52297 +       cachep->name = name;
52298 +
52299 +       if (setup_cpu_cache(cachep)) {
52300 +               __kmem_cache_destroy(cachep);
52301 +               cachep = NULL;
52302 +               goto oops;
52303 +       }
52304 +
52305 +       /* cache setup completed, link it into the list */
52306 +       list_add(&cachep->next, &cache_chain);
52307 +oops:
52308 +       if (!cachep && (flags & SLAB_PANIC))
52309 +               panic("kmem_cache_create(): failed to create slab `%s'\n",
52310 +                     name);
52311 +       mutex_unlock(&cache_chain_mutex);
52312 +       return cachep;
52313 +}
52314 +EXPORT_SYMBOL(kmem_cache_create);
52315 +
52316 +#if DEBUG
52317 +static void check_irq_off(void)
52318 +{
52319 +       BUG_ON(!irqs_disabled());
52320 +}
52321 +
52322 +static void check_irq_on(void)
52323 +{
52324 +       BUG_ON(irqs_disabled());
52325 +}
52326 +
52327 +static void check_spinlock_acquired(struct kmem_cache *cachep)
52328 +{
52329 +#ifdef CONFIG_SMP
52330 +       check_irq_off();
52331 +       assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
52332 +#endif
52333 +}
52334 +
52335 +static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
52336 +{
52337 +#ifdef CONFIG_SMP
52338 +       check_irq_off();
52339 +       assert_spin_locked(&cachep->nodelists[node]->list_lock);
52340 +#endif
52341 +}
52342 +
52343 +#else
52344 +#define check_irq_off()        do { } while(0)
52345 +#define check_irq_on() do { } while(0)
52346 +#define check_spinlock_acquired(x) do { } while(0)
52347 +#define check_spinlock_acquired_node(x, y) do { } while(0)
52348 +#endif
52349 +
52350 +static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
52351 +                       struct array_cache *ac,
52352 +                       int force, int node);
52353 +
52354 +static void do_drain(void *arg)
52355 +{
52356 +       struct kmem_cache *cachep = arg;
52357 +       struct array_cache *ac;
52358 +       int node = numa_node_id();
52359 +
52360 +       check_irq_off();
52361 +       ac = cpu_cache_get(cachep);
52362 +       spin_lock(&cachep->nodelists[node]->list_lock);
52363 +       free_block(cachep, ac->entry, ac->avail, node);
52364 +       spin_unlock(&cachep->nodelists[node]->list_lock);
52365 +       ac->avail = 0;
52366 +}
52367 +
52368 +static void drain_cpu_caches(struct kmem_cache *cachep)
52369 +{
52370 +       struct kmem_list3 *l3;
52371 +       int node;
52372 +
52373 +       on_each_cpu(do_drain, cachep, 1, 1);
52374 +       check_irq_on();
52375 +       for_each_online_node(node) {
52376 +               l3 = cachep->nodelists[node];
52377 +               if (l3 && l3->alien)
52378 +                       drain_alien_cache(cachep, l3->alien);
52379 +       }
52380 +
52381 +       for_each_online_node(node) {
52382 +               l3 = cachep->nodelists[node];
52383 +               if (l3)
52384 +                       drain_array(cachep, l3, l3->shared, 1, node);
52385 +       }
52386 +}
52387 +
52388 +/*
52389 + * Remove slabs from the list of free slabs.
52390 + * Specify the number of slabs to drain in tofree.
52391 + *
52392 + * Returns the actual number of slabs released.
52393 + */
52394 +static int drain_freelist(struct kmem_cache *cache,
52395 +                       struct kmem_list3 *l3, int tofree)
52396 +{
52397 +       struct list_head *p;
52398 +       int nr_freed;
52399 +       struct slab *slabp;
52400 +
52401 +       nr_freed = 0;
52402 +       while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
52403 +
52404 +               spin_lock_irq(&l3->list_lock);
52405 +               p = l3->slabs_free.prev;
52406 +               if (p == &l3->slabs_free) {
52407 +                       spin_unlock_irq(&l3->list_lock);
52408 +                       goto out;
52409 +               }
52410 +
52411 +               slabp = list_entry(p, struct slab, list);
52412 +#if DEBUG
52413 +               BUG_ON(slabp->inuse);
52414 +#endif
52415 +               list_del(&slabp->list);
52416 +               /*
52417 +                * Safe to drop the lock. The slab is no longer linked
52418 +                * to the cache.
52419 +                */
52420 +               l3->free_objects -= cache->num;
52421 +               spin_unlock_irq(&l3->list_lock);
52422 +               slab_destroy(cache, slabp);
52423 +               nr_freed++;
52424 +       }
52425 +out:
52426 +       return nr_freed;
52427 +}
52428 +
52429 +/* Called with cache_chain_mutex held to protect against cpu hotplug */
52430 +static int __cache_shrink(struct kmem_cache *cachep)
52431 +{
52432 +       int ret = 0, i = 0;
52433 +       struct kmem_list3 *l3;
52434 +
52435 +       drain_cpu_caches(cachep);
52436 +
52437 +       check_irq_on();
52438 +       for_each_online_node(i) {
52439 +               l3 = cachep->nodelists[i];
52440 +               if (!l3)
52441 +                       continue;
52442 +
52443 +               drain_freelist(cachep, l3, l3->free_objects);
52444 +
52445 +               ret += !list_empty(&l3->slabs_full) ||
52446 +                       !list_empty(&l3->slabs_partial);
52447 +       }
52448 +       return (ret ? 1 : 0);
52449 +}
52450 +
52451 +/**
52452 + * kmem_cache_shrink - Shrink a cache.
52453 + * @cachep: The cache to shrink.
52454 + *
52455 + * Releases as many slabs as possible for a cache.
52456 + * To help debugging, a zero exit status indicates all slabs were released.
52457 + */
52458 +int kmem_cache_shrink(struct kmem_cache *cachep)
52459 +{
52460 +       int ret;
52461 +       BUG_ON(!cachep || in_interrupt());
52462 +
52463 +       mutex_lock(&cache_chain_mutex);
52464 +       ret = __cache_shrink(cachep);
52465 +       mutex_unlock(&cache_chain_mutex);
52466 +       return ret;
52467 +}
52468 +EXPORT_SYMBOL(kmem_cache_shrink);
52469 +
52470 +/**
52471 + * kmem_cache_destroy - delete a cache
52472 + * @cachep: the cache to destroy
52473 + *
52474 + * Remove a &struct kmem_cache object from the slab cache.
52475 + *
52476 + * It is expected this function will be called by a module when it is
52477 + * unloaded.  This will remove the cache completely, and avoid a duplicate
52478 + * cache being allocated each time a module is loaded and unloaded, if the
52479 + * module doesn't have persistent in-kernel storage across loads and unloads.
52480 + *
52481 + * The cache must be empty before calling this function.
52482 + *
52483 + * The caller must guarantee that noone will allocate memory from the cache
52484 + * during the kmem_cache_destroy().
52485 + */
52486 +void kmem_cache_destroy(struct kmem_cache *cachep)
52487 +{
52488 +       BUG_ON(!cachep || in_interrupt());
52489 +
52490 +       /* Find the cache in the chain of caches. */
52491 +       mutex_lock(&cache_chain_mutex);
52492 +       /*
52493 +        * the chain is never empty, cache_cache is never destroyed
52494 +        */
52495 +       list_del(&cachep->next);
52496 +       if (__cache_shrink(cachep)) {
52497 +               slab_error(cachep, "Can't free all objects");
52498 +               list_add(&cachep->next, &cache_chain);
52499 +               mutex_unlock(&cache_chain_mutex);
52500 +               return;
52501 +       }
52502 +
52503 +       if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
52504 +               synchronize_rcu();
52505 +
52506 +       __kmem_cache_destroy(cachep);
52507 +       mutex_unlock(&cache_chain_mutex);
52508 +}
52509 +EXPORT_SYMBOL(kmem_cache_destroy);
52510 +
52511 +/*
52512 + * Get the memory for a slab management obj.
52513 + * For a slab cache when the slab descriptor is off-slab, slab descriptors
52514 + * always come from malloc_sizes caches.  The slab descriptor cannot
52515 + * come from the same cache which is getting created because,
52516 + * when we are searching for an appropriate cache for these
52517 + * descriptors in kmem_cache_create, we search through the malloc_sizes array.
52518 + * If we are creating a malloc_sizes cache here it would not be visible to
52519 + * kmem_find_general_cachep till the initialization is complete.
52520 + * Hence we cannot have slabp_cache same as the original cache.
52521 + */
52522 +static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
52523 +                                  int colour_off, gfp_t local_flags,
52524 +                                  int nodeid)
52525 +{
52526 +       struct slab *slabp;
52527 +
52528 +       if (OFF_SLAB(cachep)) {
52529 +               /* Slab management obj is off-slab. */
52530 +               slabp = kmem_cache_alloc_node(cachep->slabp_cache,
52531 +                                             local_flags & ~GFP_THISNODE, nodeid);
52532 +               if (!slabp)
52533 +                       return NULL;
52534 +       } else {
52535 +               slabp = objp + colour_off;
52536 +               colour_off += cachep->slab_size;
52537 +       }
52538 +       slabp->inuse = 0;
52539 +       slabp->colouroff = colour_off;
52540 +       slabp->s_mem = objp + colour_off;
52541 +       slabp->nodeid = nodeid;
52542 +       return slabp;
52543 +}
52544 +
52545 +static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
52546 +{
52547 +       return (kmem_bufctl_t *) (slabp + 1);
52548 +}
52549 +
52550 +static void cache_init_objs(struct kmem_cache *cachep,
52551 +                           struct slab *slabp)
52552 +{
52553 +       int i;
52554 +
52555 +       for (i = 0; i < cachep->num; i++) {
52556 +               void *objp = index_to_obj(cachep, slabp, i);
52557 +#if DEBUG
52558 +               /* need to poison the objs? */
52559 +               if (cachep->flags & SLAB_POISON)
52560 +                       poison_obj(cachep, objp, POISON_FREE);
52561 +               if (cachep->flags & SLAB_STORE_USER)
52562 +                       *dbg_userword(cachep, objp) = NULL;
52563 +
52564 +               if (cachep->flags & SLAB_RED_ZONE) {
52565 +                       *dbg_redzone1(cachep, objp) = RED_INACTIVE;
52566 +                       *dbg_redzone2(cachep, objp) = RED_INACTIVE;
52567 +               }
52568 +               /*
52569 +                * Constructors are not allowed to allocate memory from the same
52570 +                * cache which they are a constructor for.  Otherwise, deadlock.
52571 +                * They must also be threaded.
52572 +                */
52573 +               if (cachep->ctor && !(cachep->flags & SLAB_POISON))
52574 +                       cachep->ctor(objp + obj_offset(cachep), cachep,
52575 +                                    0);
52576 +
52577 +               if (cachep->flags & SLAB_RED_ZONE) {
52578 +                       if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
52579 +                               slab_error(cachep, "constructor overwrote the"
52580 +                                          " end of an object");
52581 +                       if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
52582 +                               slab_error(cachep, "constructor overwrote the"
52583 +                                          " start of an object");
52584 +               }
52585 +               if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
52586 +                           OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
52587 +                       kernel_map_pages(virt_to_page(objp),
52588 +                                        cachep->buffer_size / PAGE_SIZE, 0);
52589 +#else
52590 +               if (cachep->ctor)
52591 +                       cachep->ctor(objp, cachep, 0);
52592 +#endif
52593 +               slab_bufctl(slabp)[i] = i + 1;
52594 +       }
52595 +       slab_bufctl(slabp)[i - 1] = BUFCTL_END;
52596 +       slabp->free = 0;
52597 +}
52598 +
52599 +static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
52600 +{
52601 +       if (CONFIG_ZONE_DMA_FLAG) {
52602 +               if (flags & GFP_DMA)
52603 +                       BUG_ON(!(cachep->gfpflags & GFP_DMA));
52604 +               else
52605 +                       BUG_ON(cachep->gfpflags & GFP_DMA);
52606 +       }
52607 +}
52608 +
52609 +static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
52610 +                               int nodeid)
52611 +{
52612 +       void *objp = index_to_obj(cachep, slabp, slabp->free);
52613 +       kmem_bufctl_t next;
52614 +
52615 +       slabp->inuse++;
52616 +       next = slab_bufctl(slabp)[slabp->free];
52617 +#if DEBUG
52618 +       slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
52619 +       WARN_ON(slabp->nodeid != nodeid);
52620 +#endif
52621 +       slabp->free = next;
52622 +
52623 +       return objp;
52624 +}
52625 +
52626 +static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
52627 +                               void *objp, int nodeid)
52628 +{
52629 +       unsigned int objnr = obj_to_index(cachep, slabp, objp);
52630 +
52631 +#if DEBUG
52632 +       /* Verify that the slab belongs to the intended node */
52633 +       WARN_ON(slabp->nodeid != nodeid);
52634 +
52635 +       if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
52636 +               printk(KERN_ERR "slab: double free detected in cache "
52637 +                               "'%s', objp %p\n", cachep->name, objp);
52638 +               BUG();
52639 +       }
52640 +#endif
52641 +       slab_bufctl(slabp)[objnr] = slabp->free;
52642 +       slabp->free = objnr;
52643 +       slabp->inuse--;
52644 +}
52645 +
52646 +/*
52647 + * Map pages beginning at addr to the given cache and slab. This is required
52648 + * for the slab allocator to be able to lookup the cache and slab of a
52649 + * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
52650 + */
52651 +static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
52652 +                          void *addr)
52653 +{
52654 +       int nr_pages;
52655 +       struct page *page;
52656 +
52657 +       page = virt_to_page(addr);
52658 +
52659 +       nr_pages = 1;
52660 +       if (likely(!PageCompound(page)))
52661 +               nr_pages <<= cache->gfporder;
52662 +
52663 +       do {
52664 +               page_set_cache(page, cache);
52665 +               page_set_slab(page, slab);
52666 +               page++;
52667 +       } while (--nr_pages);
52668 +}
52669 +
52670 +/*
52671 + * Grow (by 1) the number of slabs within a cache.  This is called by
52672 + * kmem_cache_alloc() when there are no active objs left in a cache.
52673 + */
52674 +static int cache_grow(struct kmem_cache *cachep,
52675 +               gfp_t flags, int nodeid, void *objp)
52676 +{
52677 +       struct slab *slabp;
52678 +       size_t offset;
52679 +       gfp_t local_flags;
52680 +       struct kmem_list3 *l3;
52681 +
52682 +       /*
52683 +        * Be lazy and only check for valid flags here,  keeping it out of the
52684 +        * critical path in kmem_cache_alloc().
52685 +        */
52686 +       BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
52687 +
52688 +       local_flags = (flags & GFP_LEVEL_MASK);
52689 +       /* Take the l3 list lock to change the colour_next on this node */
52690 +       check_irq_off();
52691 +       l3 = cachep->nodelists[nodeid];
52692 +       spin_lock(&l3->list_lock);
52693 +
52694 +       /* Get colour for the slab, and cal the next value. */
52695 +       offset = l3->colour_next;
52696 +       l3->colour_next++;
52697 +       if (l3->colour_next >= cachep->colour)
52698 +               l3->colour_next = 0;
52699 +       spin_unlock(&l3->list_lock);
52700 +
52701 +       offset *= cachep->colour_off;
52702 +
52703 +       if (local_flags & __GFP_WAIT)
52704 +               local_irq_enable();
52705 +
52706 +       /*
52707 +        * The test for missing atomic flag is performed here, rather than
52708 +        * the more obvious place, simply to reduce the critical path length
52709 +        * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
52710 +        * will eventually be caught here (where it matters).
52711 +        */
52712 +       kmem_flagcheck(cachep, flags);
52713 +
52714 +       /*
52715 +        * Get mem for the objs.  Attempt to allocate a physical page from
52716 +        * 'nodeid'.
52717 +        */
52718 +       if (!objp)
52719 +               objp = kmem_getpages(cachep, flags, nodeid);
52720 +       if (!objp)
52721 +               goto failed;
52722 +
52723 +       /* Get slab management. */
52724 +       slabp = alloc_slabmgmt(cachep, objp, offset,
52725 +                       local_flags & ~GFP_THISNODE, nodeid);
52726 +       if (!slabp)
52727 +               goto opps1;
52728 +
52729 +       slabp->nodeid = nodeid;
52730 +       slab_map_pages(cachep, slabp, objp);
52731 +
52732 +       cache_init_objs(cachep, slabp);
52733 +
52734 +       if (local_flags & __GFP_WAIT)
52735 +               local_irq_disable();
52736 +       check_irq_off();
52737 +       spin_lock(&l3->list_lock);
52738 +
52739 +       /* Make slab active. */
52740 +       list_add_tail(&slabp->list, &(l3->slabs_free));
52741 +       STATS_INC_GROWN(cachep);
52742 +       l3->free_objects += cachep->num;
52743 +       spin_unlock(&l3->list_lock);
52744 +       return 1;
52745 +opps1:
52746 +       kmem_freepages(cachep, objp);
52747 +failed:
52748 +       if (local_flags & __GFP_WAIT)
52749 +               local_irq_disable();
52750 +       return 0;
52751 +}
52752 +
52753 +#if DEBUG
52754 +
52755 +/*
52756 + * Perform extra freeing checks:
52757 + * - detect bad pointers.
52758 + * - POISON/RED_ZONE checking
52759 + */
52760 +static void kfree_debugcheck(const void *objp)
52761 +{
52762 +       if (!virt_addr_valid(objp)) {
52763 +               printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
52764 +                      (unsigned long)objp);
52765 +               BUG();
52766 +       }
52767 +}
52768 +
52769 +static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
52770 +{
52771 +       unsigned long long redzone1, redzone2;
52772 +
52773 +       redzone1 = *dbg_redzone1(cache, obj);
52774 +       redzone2 = *dbg_redzone2(cache, obj);
52775 +
52776 +       /*
52777 +        * Redzone is ok.
52778 +        */
52779 +       if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
52780 +               return;
52781 +
52782 +       if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
52783 +               slab_error(cache, "double free detected");
52784 +       else
52785 +               slab_error(cache, "memory outside object was overwritten");
52786 +
52787 +       printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
52788 +                       obj, redzone1, redzone2);
52789 +}
52790 +
52791 +static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
52792 +                                  void *caller)
52793 +{
52794 +       struct page *page;
52795 +       unsigned int objnr;
52796 +       struct slab *slabp;
52797 +
52798 +       objp -= obj_offset(cachep);
52799 +       kfree_debugcheck(objp);
52800 +       page = virt_to_head_page(objp);
52801 +
52802 +       slabp = page_get_slab(page);
52803 +
52804 +       if (cachep->flags & SLAB_RED_ZONE) {
52805 +               verify_redzone_free(cachep, objp);
52806 +               *dbg_redzone1(cachep, objp) = RED_INACTIVE;
52807 +               *dbg_redzone2(cachep, objp) = RED_INACTIVE;
52808 +       }
52809 +       if (cachep->flags & SLAB_STORE_USER)
52810 +               *dbg_userword(cachep, objp) = caller;
52811 +
52812 +       objnr = obj_to_index(cachep, slabp, objp);
52813 +
52814 +       BUG_ON(objnr >= cachep->num);
52815 +       BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
52816 +
52817 +#ifdef CONFIG_DEBUG_SLAB_LEAK
52818 +       slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
52819 +#endif
52820 +       if (cachep->flags & SLAB_POISON) {
52821 +#ifdef CONFIG_DEBUG_PAGEALLOC
52822 +               if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
52823 +                       store_stackinfo(cachep, objp, (unsigned long)caller);
52824 +                       kernel_map_pages(virt_to_page(objp),
52825 +                                        cachep->buffer_size / PAGE_SIZE, 0);
52826 +               } else {
52827 +                       poison_obj(cachep, objp, POISON_FREE);
52828 +               }
52829 +#else
52830 +               poison_obj(cachep, objp, POISON_FREE);
52831 +#endif
52832 +       }
52833 +       return objp;
52834 +}
52835 +
52836 +static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
52837 +{
52838 +       kmem_bufctl_t i;
52839 +       int entries = 0;
52840 +
52841 +       /* Check slab's freelist to see if this obj is there. */
52842 +       for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
52843 +               entries++;
52844 +               if (entries > cachep->num || i >= cachep->num)
52845 +                       goto bad;
52846 +       }
52847 +       if (entries != cachep->num - slabp->inuse) {
52848 +bad:
52849 +               printk(KERN_ERR "slab: Internal list corruption detected in "
52850 +                               "cache '%s'(%d), slabp %p(%d). Hexdump:\n",
52851 +                       cachep->name, cachep->num, slabp, slabp->inuse);
52852 +               for (i = 0;
52853 +                    i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
52854 +                    i++) {
52855 +                       if (i % 16 == 0)
52856 +                               printk("\n%03x:", i);
52857 +                       printk(" %02x", ((unsigned char *)slabp)[i]);
52858 +               }
52859 +               printk("\n");
52860 +               BUG();
52861 +       }
52862 +}
52863 +#else
52864 +#define kfree_debugcheck(x) do { } while(0)
52865 +#define cache_free_debugcheck(x,objp,z) (objp)
52866 +#define check_slabp(x,y) do { } while(0)
52867 +#endif
52868 +
52869 +static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
52870 +{
52871 +       int batchcount;
52872 +       struct kmem_list3 *l3;
52873 +       struct array_cache *ac;
52874 +       int node;
52875 +
52876 +       node = numa_node_id();
52877 +
52878 +       check_irq_off();
52879 +       ac = cpu_cache_get(cachep);
52880 +retry:
52881 +       batchcount = ac->batchcount;
52882 +       if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
52883 +               /*
52884 +                * If there was little recent activity on this cache, then
52885 +                * perform only a partial refill.  Otherwise we could generate
52886 +                * refill bouncing.
52887 +                */
52888 +               batchcount = BATCHREFILL_LIMIT;
52889 +       }
52890 +       l3 = cachep->nodelists[node];
52891 +
52892 +       BUG_ON(ac->avail > 0 || !l3);
52893 +       spin_lock(&l3->list_lock);
52894 +
52895 +       /* See if we can refill from the shared array */
52896 +       if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
52897 +               goto alloc_done;
52898 +
52899 +       while (batchcount > 0) {
52900 +               struct list_head *entry;
52901 +               struct slab *slabp;
52902 +               /* Get slab alloc is to come from. */
52903 +               entry = l3->slabs_partial.next;
52904 +               if (entry == &l3->slabs_partial) {
52905 +                       l3->free_touched = 1;
52906 +                       entry = l3->slabs_free.next;
52907 +                       if (entry == &l3->slabs_free)
52908 +                               goto must_grow;
52909 +               }
52910 +
52911 +               slabp = list_entry(entry, struct slab, list);
52912 +               check_slabp(cachep, slabp);
52913 +               check_spinlock_acquired(cachep);
52914 +
52915 +               /*
52916 +                * The slab was either on partial or free list so
52917 +                * there must be at least one object available for
52918 +                * allocation.
52919 +                */
52920 +               BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num);
52921 +
52922 +               while (slabp->inuse < cachep->num && batchcount--) {
52923 +                       STATS_INC_ALLOCED(cachep);
52924 +                       STATS_INC_ACTIVE(cachep);
52925 +                       STATS_SET_HIGH(cachep);
52926 +
52927 +                       ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
52928 +                                                           node);
52929 +               }
52930 +               check_slabp(cachep, slabp);
52931 +
52932 +               /* move slabp to correct slabp list: */
52933 +               list_del(&slabp->list);
52934 +               if (slabp->free == BUFCTL_END)
52935 +                       list_add(&slabp->list, &l3->slabs_full);
52936 +               else
52937 +                       list_add(&slabp->list, &l3->slabs_partial);
52938 +       }
52939 +
52940 +must_grow:
52941 +       l3->free_objects -= ac->avail;
52942 +alloc_done:
52943 +       spin_unlock(&l3->list_lock);
52944 +
52945 +       if (unlikely(!ac->avail)) {
52946 +               int x;
52947 +               x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
52948 +
52949 +               /* cache_grow can reenable interrupts, then ac could change. */
52950 +               ac = cpu_cache_get(cachep);
52951 +               if (!x && ac->avail == 0)       /* no objects in sight? abort */
52952 +                       return NULL;
52953 +
52954 +               if (!ac->avail)         /* objects refilled by interrupt? */
52955 +                       goto retry;
52956 +       }
52957 +       ac->touched = 1;
52958 +       return ac->entry[--ac->avail];
52959 +}
52960 +
52961 +static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
52962 +                                               gfp_t flags)
52963 +{
52964 +       might_sleep_if(flags & __GFP_WAIT);
52965 +#if DEBUG
52966 +       kmem_flagcheck(cachep, flags);
52967 +#endif
52968 +}
52969 +
52970 +#if DEBUG
52971 +static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
52972 +                               gfp_t flags, void *objp, void *caller)
52973 +{
52974 +       if (!objp)
52975 +               return objp;
52976 +       if (cachep->flags & SLAB_POISON) {
52977 +#ifdef CONFIG_DEBUG_PAGEALLOC
52978 +               if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
52979 +                       kernel_map_pages(virt_to_page(objp),
52980 +                                        cachep->buffer_size / PAGE_SIZE, 1);
52981 +               else
52982 +                       check_poison_obj(cachep, objp);
52983 +#else
52984 +               check_poison_obj(cachep, objp);
52985 +#endif
52986 +               poison_obj(cachep, objp, POISON_INUSE);
52987 +       }
52988 +       if (cachep->flags & SLAB_STORE_USER)
52989 +               *dbg_userword(cachep, objp) = caller;
52990 +
52991 +       if (cachep->flags & SLAB_RED_ZONE) {
52992 +               if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
52993 +                               *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
52994 +                       slab_error(cachep, "double free, or memory outside"
52995 +                                               " object was overwritten");
52996 +                       printk(KERN_ERR
52997 +                               "%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
52998 +                               objp, *dbg_redzone1(cachep, objp),
52999 +                               *dbg_redzone2(cachep, objp));
53000 +               }
53001 +               *dbg_redzone1(cachep, objp) = RED_ACTIVE;
53002 +               *dbg_redzone2(cachep, objp) = RED_ACTIVE;
53003 +       }
53004 +#ifdef CONFIG_DEBUG_SLAB_LEAK
53005 +       {
53006 +               struct slab *slabp;
53007 +               unsigned objnr;
53008 +
53009 +               slabp = page_get_slab(virt_to_head_page(objp));
53010 +               objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
53011 +               slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
53012 +       }
53013 +#endif
53014 +       objp += obj_offset(cachep);
53015 +       if (cachep->ctor && cachep->flags & SLAB_POISON)
53016 +               cachep->ctor(objp, cachep, 0);
53017 +#if ARCH_SLAB_MINALIGN
53018 +       if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
53019 +               printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
53020 +                      objp, ARCH_SLAB_MINALIGN);
53021 +       }
53022 +#endif
53023 +       return objp;
53024 +}
53025 +#else
53026 +#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
53027 +#endif
53028 +
53029 +#ifdef CONFIG_FAILSLAB
53030 +
53031 +static struct failslab_attr {
53032 +
53033 +       struct fault_attr attr;
53034 +
53035 +       u32 ignore_gfp_wait;
53036 +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
53037 +       struct dentry *ignore_gfp_wait_file;
53038 +#endif
53039 +
53040 +} failslab = {
53041 +       .attr = FAULT_ATTR_INITIALIZER,
53042 +       .ignore_gfp_wait = 1,
53043 +};
53044 +
53045 +static int __init setup_failslab(char *str)
53046 +{
53047 +       return setup_fault_attr(&failslab.attr, str);
53048 +}
53049 +__setup("failslab=", setup_failslab);
53050 +
53051 +static int should_failslab(struct kmem_cache *cachep, gfp_t flags)
53052 +{
53053 +       if (cachep == &cache_cache)
53054 +               return 0;
53055 +       if (flags & __GFP_NOFAIL)
53056 +               return 0;
53057 +       if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT))
53058 +               return 0;
53059 +
53060 +       return should_fail(&failslab.attr, obj_size(cachep));
53061 +}
53062 +
53063 +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
53064 +
53065 +static int __init failslab_debugfs(void)
53066 +{
53067 +       mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
53068 +       struct dentry *dir;
53069 +       int err;
53070 +
53071 +       err = init_fault_attr_dentries(&failslab.attr, "failslab");
53072 +       if (err)
53073 +               return err;
53074 +       dir = failslab.attr.dentries.dir;
53075 +
53076 +       failslab.ignore_gfp_wait_file =
53077 +               debugfs_create_bool("ignore-gfp-wait", mode, dir,
53078 +                                     &failslab.ignore_gfp_wait);
53079 +
53080 +       if (!failslab.ignore_gfp_wait_file) {
53081 +               err = -ENOMEM;
53082 +               debugfs_remove(failslab.ignore_gfp_wait_file);
53083 +               cleanup_fault_attr_dentries(&failslab.attr);
53084 +       }
53085 +
53086 +       return err;
53087 +}
53088 +
53089 +late_initcall(failslab_debugfs);
53090 +
53091 +#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
53092 +
53093 +#else /* CONFIG_FAILSLAB */
53094 +
53095 +static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags)
53096 +{
53097 +       return 0;
53098 +}
53099 +
53100 +#endif /* CONFIG_FAILSLAB */
53101 +
53102 +static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
53103 +{
53104 +       void *objp;
53105 +       struct array_cache *ac;
53106 +
53107 +       check_irq_off();
53108 +
53109 +       ac = cpu_cache_get(cachep);
53110 +       if (likely(ac->avail)) {
53111 +               STATS_INC_ALLOCHIT(cachep);
53112 +               ac->touched = 1;
53113 +               objp = ac->entry[--ac->avail];
53114 +       } else {
53115 +               STATS_INC_ALLOCMISS(cachep);
53116 +               objp = cache_alloc_refill(cachep, flags);
53117 +       }
53118 +       return objp;
53119 +}
53120 +
53121 +#ifdef CONFIG_NUMA
53122 +/*
53123 + * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
53124 + *
53125 + * If we are in_interrupt, then process context, including cpusets and
53126 + * mempolicy, may not apply and should not be used for allocation policy.
53127 + */
53128 +static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
53129 +{
53130 +       int nid_alloc, nid_here;
53131 +
53132 +       if (in_interrupt() || (flags & __GFP_THISNODE))
53133 +               return NULL;
53134 +       nid_alloc = nid_here = numa_node_id();
53135 +       if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
53136 +               nid_alloc = cpuset_mem_spread_node();
53137 +       else if (current->mempolicy)
53138 +               nid_alloc = slab_node(current->mempolicy);
53139 +       if (nid_alloc != nid_here)
53140 +               return ____cache_alloc_node(cachep, flags, nid_alloc);
53141 +       return NULL;
53142 +}
53143 +
53144 +/*
53145 + * Fallback function if there was no memory available and no objects on a
53146 + * certain node and fall back is permitted. First we scan all the
53147 + * available nodelists for available objects. If that fails then we
53148 + * perform an allocation without specifying a node. This allows the page
53149 + * allocator to do its reclaim / fallback magic. We then insert the
53150 + * slab into the proper nodelist and then allocate from it.
53151 + */
53152 +static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
53153 +{
53154 +       struct zonelist *zonelist;
53155 +       gfp_t local_flags;
53156 +       struct zone **z;
53157 +       void *obj = NULL;
53158 +       int nid;
53159 +
53160 +       if (flags & __GFP_THISNODE)
53161 +               return NULL;
53162 +
53163 +       zonelist = &NODE_DATA(slab_node(current->mempolicy))
53164 +                       ->node_zonelists[gfp_zone(flags)];
53165 +       local_flags = (flags & GFP_LEVEL_MASK);
53166 +
53167 +retry:
53168 +       /*
53169 +        * Look through allowed nodes for objects available
53170 +        * from existing per node queues.
53171 +        */
53172 +       for (z = zonelist->zones; *z && !obj; z++) {
53173 +               nid = zone_to_nid(*z);
53174 +
53175 +               if (cpuset_zone_allowed_hardwall(*z, flags) &&
53176 +                       cache->nodelists[nid] &&
53177 +                       cache->nodelists[nid]->free_objects)
53178 +                               obj = ____cache_alloc_node(cache,
53179 +                                       flags | GFP_THISNODE, nid);
53180 +       }
53181 +
53182 +       if (!obj) {
53183 +               /*
53184 +                * This allocation will be performed within the constraints
53185 +                * of the current cpuset / memory policy requirements.
53186 +                * We may trigger various forms of reclaim on the allowed
53187 +                * set and go into memory reserves if necessary.
53188 +                */
53189 +               if (local_flags & __GFP_WAIT)
53190 +                       local_irq_enable();
53191 +               kmem_flagcheck(cache, flags);
53192 +               obj = kmem_getpages(cache, flags, -1);
53193 +               if (local_flags & __GFP_WAIT)
53194 +                       local_irq_disable();
53195 +               if (obj) {
53196 +                       /*
53197 +                        * Insert into the appropriate per node queues
53198 +                        */
53199 +                       nid = page_to_nid(virt_to_page(obj));
53200 +                       if (cache_grow(cache, flags, nid, obj)) {
53201 +                               obj = ____cache_alloc_node(cache,
53202 +                                       flags | GFP_THISNODE, nid);
53203 +                               if (!obj)
53204 +                                       /*
53205 +                                        * Another processor may allocate the
53206 +                                        * objects in the slab since we are
53207 +                                        * not holding any locks.
53208 +                                        */
53209 +                                       goto retry;
53210 +                       } else {
53211 +                               /* cache_grow already freed obj */
53212 +                               obj = NULL;
53213 +                       }
53214 +               }
53215 +       }
53216 +       return obj;
53217 +}
53218 +
53219 +/*
53220 + * A interface to enable slab creation on nodeid
53221 + */
53222 +static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
53223 +                               int nodeid)
53224 +{
53225 +       struct list_head *entry;
53226 +       struct slab *slabp;
53227 +       struct kmem_list3 *l3;
53228 +       void *obj;
53229 +       int x;
53230 +
53231 +       l3 = cachep->nodelists[nodeid];
53232 +       BUG_ON(!l3);
53233 +
53234 +retry:
53235 +       check_irq_off();
53236 +       spin_lock(&l3->list_lock);
53237 +       entry = l3->slabs_partial.next;
53238 +       if (entry == &l3->slabs_partial) {
53239 +               l3->free_touched = 1;
53240 +               entry = l3->slabs_free.next;
53241 +               if (entry == &l3->slabs_free)
53242 +                       goto must_grow;
53243 +       }
53244 +
53245 +       slabp = list_entry(entry, struct slab, list);
53246 +       check_spinlock_acquired_node(cachep, nodeid);
53247 +       check_slabp(cachep, slabp);
53248 +
53249 +       STATS_INC_NODEALLOCS(cachep);
53250 +       STATS_INC_ACTIVE(cachep);
53251 +       STATS_SET_HIGH(cachep);
53252 +
53253 +       BUG_ON(slabp->inuse == cachep->num);
53254 +
53255 +       obj = slab_get_obj(cachep, slabp, nodeid);
53256 +       check_slabp(cachep, slabp);
53257 +       vx_slab_alloc(cachep, flags);
53258 +       l3->free_objects--;
53259 +       /* move slabp to correct slabp list: */
53260 +       list_del(&slabp->list);
53261 +
53262 +       if (slabp->free == BUFCTL_END)
53263 +               list_add(&slabp->list, &l3->slabs_full);
53264 +       else
53265 +               list_add(&slabp->list, &l3->slabs_partial);
53266 +
53267 +       spin_unlock(&l3->list_lock);
53268 +       goto done;
53269 +
53270 +must_grow:
53271 +       spin_unlock(&l3->list_lock);
53272 +       x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
53273 +       if (x)
53274 +               goto retry;
53275 +
53276 +       return fallback_alloc(cachep, flags);
53277 +
53278 +done:
53279 +       return obj;
53280 +}
53281 +
53282 +/**
53283 + * kmem_cache_alloc_node - Allocate an object on the specified node
53284 + * @cachep: The cache to allocate from.
53285 + * @flags: See kmalloc().
53286 + * @nodeid: node number of the target node.
53287 + * @caller: return address of caller, used for debug information
53288 + *
53289 + * Identical to kmem_cache_alloc but it will allocate memory on the given
53290 + * node, which can improve the performance for cpu bound structures.
53291 + *
53292 + * Fallback to other node is possible if __GFP_THISNODE is not set.
53293 + */
53294 +static __always_inline void *
53295 +__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
53296 +                  void *caller)
53297 +{
53298 +       unsigned long save_flags;
53299 +       void *ptr;
53300 +
53301 +       if (should_failslab(cachep, flags))
53302 +               return NULL;
53303 +
53304 +       cache_alloc_debugcheck_before(cachep, flags);
53305 +       local_irq_save(save_flags);
53306 +
53307 +       if (unlikely(nodeid == -1))
53308 +               nodeid = numa_node_id();
53309 +
53310 +       if (unlikely(!cachep->nodelists[nodeid])) {
53311 +               /* Node not bootstrapped yet */
53312 +               ptr = fallback_alloc(cachep, flags);
53313 +               goto out;
53314 +       }
53315 +
53316 +       if (nodeid == numa_node_id()) {
53317 +               /*
53318 +                * Use the locally cached objects if possible.
53319 +                * However ____cache_alloc does not allow fallback
53320 +                * to other nodes. It may fail while we still have
53321 +                * objects on other nodes available.
53322 +                */
53323 +               ptr = ____cache_alloc(cachep, flags);
53324 +               if (ptr)
53325 +                       goto out;
53326 +       }
53327 +       /* ___cache_alloc_node can fall back to other nodes */
53328 +       ptr = ____cache_alloc_node(cachep, flags, nodeid);
53329 +  out:
53330 +       vx_slab_alloc(cachep, flags);
53331 +       local_irq_restore(save_flags);
53332 +       ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
53333 +
53334 +       return ptr;
53335 +}
53336 +
53337 +static __always_inline void *
53338 +__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
53339 +{
53340 +       void *objp;
53341 +
53342 +       if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
53343 +               objp = alternate_node_alloc(cache, flags);
53344 +               if (objp)
53345 +                       goto out;
53346 +       }
53347 +       objp = ____cache_alloc(cache, flags);
53348 +
53349 +       /*
53350 +        * We may just have run out of memory on the local node.
53351 +        * ____cache_alloc_node() knows how to locate memory on other nodes
53352 +        */
53353 +       if (!objp)
53354 +               objp = ____cache_alloc_node(cache, flags, numa_node_id());
53355 +
53356 +  out:
53357 +       return objp;
53358 +}
53359 +#else
53360 +
53361 +static __always_inline void *
53362 +__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
53363 +{
53364 +       return ____cache_alloc(cachep, flags);
53365 +}
53366 +
53367 +#endif /* CONFIG_NUMA */
53368 +
53369 +static __always_inline void *
53370 +__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
53371 +{
53372 +       unsigned long save_flags;
53373 +       void *objp;
53374 +
53375 +       if (should_failslab(cachep, flags))
53376 +               return NULL;
53377 +
53378 +       cache_alloc_debugcheck_before(cachep, flags);
53379 +       local_irq_save(save_flags);
53380 +       objp = __do_cache_alloc(cachep, flags);
53381 +       local_irq_restore(save_flags);
53382 +       objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
53383 +       prefetchw(objp);
53384 +
53385 +       return objp;
53386 +}
53387 +
53388 +/*
53389 + * Caller needs to acquire correct kmem_list's list_lock
53390 + */
53391 +static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
53392 +                      int node)
53393 +{
53394 +       int i;
53395 +       struct kmem_list3 *l3;
53396 +
53397 +       for (i = 0; i < nr_objects; i++) {
53398 +               void *objp = objpp[i];
53399 +               struct slab *slabp;
53400 +
53401 +               slabp = virt_to_slab(objp);
53402 +               l3 = cachep->nodelists[node];
53403 +               list_del(&slabp->list);
53404 +               check_spinlock_acquired_node(cachep, node);
53405 +               check_slabp(cachep, slabp);
53406 +               slab_put_obj(cachep, slabp, objp, node);
53407 +               STATS_DEC_ACTIVE(cachep);
53408 +               l3->free_objects++;
53409 +               check_slabp(cachep, slabp);
53410 +
53411 +               /* fixup slab chains */
53412 +               if (slabp->inuse == 0) {
53413 +                       if (l3->free_objects > l3->free_limit) {
53414 +                               l3->free_objects -= cachep->num;
53415 +                               /* No need to drop any previously held
53416 +                                * lock here, even if we have a off-slab slab
53417 +                                * descriptor it is guaranteed to come from
53418 +                                * a different cache, refer to comments before
53419 +                                * alloc_slabmgmt.
53420 +                                */
53421 +                               slab_destroy(cachep, slabp);
53422 +                       } else {
53423 +                               list_add(&slabp->list, &l3->slabs_free);
53424 +                       }
53425 +               } else {
53426 +                       /* Unconditionally move a slab to the end of the
53427 +                        * partial list on free - maximum time for the
53428 +                        * other objects to be freed, too.
53429 +                        */
53430 +                       list_add_tail(&slabp->list, &l3->slabs_partial);
53431 +               }
53432 +       }
53433 +}
53434 +
53435 +static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
53436 +{
53437 +       int batchcount;
53438 +       struct kmem_list3 *l3;
53439 +       int node = numa_node_id();
53440 +
53441 +       batchcount = ac->batchcount;
53442 +#if DEBUG
53443 +       BUG_ON(!batchcount || batchcount > ac->avail);
53444 +#endif
53445 +       check_irq_off();
53446 +       l3 = cachep->nodelists[node];
53447 +       spin_lock(&l3->list_lock);
53448 +       if (l3->shared) {
53449 +               struct array_cache *shared_array = l3->shared;
53450 +               int max = shared_array->limit - shared_array->avail;
53451 +               if (max) {
53452 +                       if (batchcount > max)
53453 +                               batchcount = max;
53454 +                       memcpy(&(shared_array->entry[shared_array->avail]),
53455 +                              ac->entry, sizeof(void *) * batchcount);
53456 +                       shared_array->avail += batchcount;
53457 +                       goto free_done;
53458 +               }
53459 +       }
53460 +
53461 +       free_block(cachep, ac->entry, batchcount, node);
53462 +free_done:
53463 +#if STATS
53464 +       {
53465 +               int i = 0;
53466 +               struct list_head *p;
53467 +
53468 +               p = l3->slabs_free.next;
53469 +               while (p != &(l3->slabs_free)) {
53470 +                       struct slab *slabp;
53471 +
53472 +                       slabp = list_entry(p, struct slab, list);
53473 +                       BUG_ON(slabp->inuse);
53474 +
53475 +                       i++;
53476 +                       p = p->next;
53477 +               }
53478 +               STATS_SET_FREEABLE(cachep, i);
53479 +       }
53480 +#endif
53481 +       spin_unlock(&l3->list_lock);
53482 +       ac->avail -= batchcount;
53483 +       memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
53484 +}
53485 +
53486 +/*
53487 + * Release an obj back to its cache. If the obj has a constructed state, it must
53488 + * be in this state _before_ it is released.  Called with disabled ints.
53489 + */
53490 +static inline void __cache_free(struct kmem_cache *cachep, void *objp)
53491 +{
53492 +       struct array_cache *ac = cpu_cache_get(cachep);
53493 +
53494 +       check_irq_off();
53495 +       objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
53496 +       vx_slab_free(cachep);
53497 +
53498 +       if (cache_free_alien(cachep, objp))
53499 +               return;
53500 +
53501 +       if (likely(ac->avail < ac->limit)) {
53502 +               STATS_INC_FREEHIT(cachep);
53503 +               ac->entry[ac->avail++] = objp;
53504 +               return;
53505 +       } else {
53506 +               STATS_INC_FREEMISS(cachep);
53507 +               cache_flusharray(cachep, ac);
53508 +               ac->entry[ac->avail++] = objp;
53509 +       }
53510 +}
53511 +
53512 +/**
53513 + * kmem_cache_alloc - Allocate an object
53514 + * @cachep: The cache to allocate from.
53515 + * @flags: See kmalloc().
53516 + *
53517 + * Allocate an object from this cache.  The flags are only relevant
53518 + * if the cache has no available objects.
53519 + */
53520 +void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
53521 +{
53522 +       return __cache_alloc(cachep, flags, __builtin_return_address(0));
53523 +}
53524 +EXPORT_SYMBOL(kmem_cache_alloc);
53525 +
53526 +/**
53527 + * kmem_cache_zalloc - Allocate an object. The memory is set to zero.
53528 + * @cache: The cache to allocate from.
53529 + * @flags: See kmalloc().
53530 + *
53531 + * Allocate an object from this cache and set the allocated memory to zero.
53532 + * The flags are only relevant if the cache has no available objects.
53533 + */
53534 +void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
53535 +{
53536 +       void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
53537 +       if (ret)
53538 +               memset(ret, 0, obj_size(cache));
53539 +       return ret;
53540 +}
53541 +EXPORT_SYMBOL(kmem_cache_zalloc);
53542 +
53543 +/**
53544 + * kmem_ptr_validate - check if an untrusted pointer might
53545 + *     be a slab entry.
53546 + * @cachep: the cache we're checking against
53547 + * @ptr: pointer to validate
53548 + *
53549 + * This verifies that the untrusted pointer looks sane:
53550 + * it is _not_ a guarantee that the pointer is actually
53551 + * part of the slab cache in question, but it at least
53552 + * validates that the pointer can be dereferenced and
53553 + * looks half-way sane.
53554 + *
53555 + * Currently only used for dentry validation.
53556 + */
53557 +int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
53558 +{
53559 +       unsigned long addr = (unsigned long)ptr;
53560 +       unsigned long min_addr = PAGE_OFFSET;
53561 +       unsigned long align_mask = BYTES_PER_WORD - 1;
53562 +       unsigned long size = cachep->buffer_size;
53563 +       struct page *page;
53564 +
53565 +       if (unlikely(addr < min_addr))
53566 +               goto out;
53567 +       if (unlikely(addr > (unsigned long)high_memory - size))
53568 +               goto out;
53569 +       if (unlikely(addr & align_mask))
53570 +               goto out;
53571 +       if (unlikely(!kern_addr_valid(addr)))
53572 +               goto out;
53573 +       if (unlikely(!kern_addr_valid(addr + size - 1)))
53574 +               goto out;
53575 +       page = virt_to_page(ptr);
53576 +       if (unlikely(!PageSlab(page)))
53577 +               goto out;
53578 +       if (unlikely(page_get_cache(page) != cachep))
53579 +               goto out;
53580 +       return 1;
53581 +out:
53582 +       return 0;
53583 +}
53584 +
53585 +#ifdef CONFIG_NUMA
53586 +void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
53587 +{
53588 +       return __cache_alloc_node(cachep, flags, nodeid,
53589 +                       __builtin_return_address(0));
53590 +}
53591 +EXPORT_SYMBOL(kmem_cache_alloc_node);
53592 +
53593 +static __always_inline void *
53594 +__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
53595 +{
53596 +       struct kmem_cache *cachep;
53597 +
53598 +       cachep = kmem_find_general_cachep(size, flags);
53599 +       if (unlikely(cachep == NULL))
53600 +               return NULL;
53601 +       return kmem_cache_alloc_node(cachep, flags, node);
53602 +}
53603 +
53604 +#ifdef CONFIG_DEBUG_SLAB
53605 +void *__kmalloc_node(size_t size, gfp_t flags, int node)
53606 +{
53607 +       return __do_kmalloc_node(size, flags, node,
53608 +                       __builtin_return_address(0));
53609 +}
53610 +EXPORT_SYMBOL(__kmalloc_node);
53611 +
53612 +void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
53613 +               int node, void *caller)
53614 +{
53615 +       return __do_kmalloc_node(size, flags, node, caller);
53616 +}
53617 +EXPORT_SYMBOL(__kmalloc_node_track_caller);
53618 +#else
53619 +void *__kmalloc_node(size_t size, gfp_t flags, int node)
53620 +{
53621 +       return __do_kmalloc_node(size, flags, node, NULL);
53622 +}
53623 +EXPORT_SYMBOL(__kmalloc_node);
53624 +#endif /* CONFIG_DEBUG_SLAB */
53625 +#endif /* CONFIG_NUMA */
53626 +
53627 +/**
53628 + * __do_kmalloc - allocate memory
53629 + * @size: how many bytes of memory are required.
53630 + * @flags: the type of memory to allocate (see kmalloc).
53631 + * @caller: function caller for debug tracking of the caller
53632 + */
53633 +static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
53634 +                                         void *caller)
53635 +{
53636 +       struct kmem_cache *cachep;
53637 +
53638 +       /* If you want to save a few bytes .text space: replace
53639 +        * __ with kmem_.
53640 +        * Then kmalloc uses the uninlined functions instead of the inline
53641 +        * functions.
53642 +        */
53643 +       cachep = __find_general_cachep(size, flags);
53644 +       if (unlikely(cachep == NULL))
53645 +               return NULL;
53646 +       return __cache_alloc(cachep, flags, caller);
53647 +}
53648 +
53649 +
53650 +#ifdef CONFIG_DEBUG_SLAB
53651 +void *__kmalloc(size_t size, gfp_t flags)
53652 +{
53653 +       return __do_kmalloc(size, flags, __builtin_return_address(0));
53654 +}
53655 +EXPORT_SYMBOL(__kmalloc);
53656 +
53657 +void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
53658 +{
53659 +       return __do_kmalloc(size, flags, caller);
53660 +}
53661 +EXPORT_SYMBOL(__kmalloc_track_caller);
53662 +
53663 +#else
53664 +void *__kmalloc(size_t size, gfp_t flags)
53665 +{
53666 +       return __do_kmalloc(size, flags, NULL);
53667 +}
53668 +EXPORT_SYMBOL(__kmalloc);
53669 +#endif
53670 +
53671 +/**
53672 + * krealloc - reallocate memory. The contents will remain unchanged.
53673 + * @p: object to reallocate memory for.
53674 + * @new_size: how many bytes of memory are required.
53675 + * @flags: the type of memory to allocate.
53676 + *
53677 + * The contents of the object pointed to are preserved up to the
53678 + * lesser of the new and old sizes.  If @p is %NULL, krealloc()
53679 + * behaves exactly like kmalloc().  If @size is 0 and @p is not a
53680 + * %NULL pointer, the object pointed to is freed.
53681 + */
53682 +void *krealloc(const void *p, size_t new_size, gfp_t flags)
53683 +{
53684 +       struct kmem_cache *cache, *new_cache;
53685 +       void *ret;
53686 +
53687 +       if (unlikely(!p))
53688 +               return kmalloc_track_caller(new_size, flags);
53689 +
53690 +       if (unlikely(!new_size)) {
53691 +               kfree(p);
53692 +               return NULL;
53693 +       }
53694 +
53695 +       cache = virt_to_cache(p);
53696 +       new_cache = __find_general_cachep(new_size, flags);
53697 +
53698 +       /*
53699 +        * If new size fits in the current cache, bail out.
53700 +        */
53701 +       if (likely(cache == new_cache))
53702 +               return (void *)p;
53703 +
53704 +       /*
53705 +        * We are on the slow-path here so do not use __cache_alloc
53706 +        * because it bloats kernel text.
53707 +        */
53708 +       ret = kmalloc_track_caller(new_size, flags);
53709 +       if (ret) {
53710 +               memcpy(ret, p, min(new_size, ksize(p)));
53711 +               kfree(p);
53712 +       }
53713 +       return ret;
53714 +}
53715 +EXPORT_SYMBOL(krealloc);
53716 +
53717 +/**
53718 + * kmem_cache_free - Deallocate an object
53719 + * @cachep: The cache the allocation was from.
53720 + * @objp: The previously allocated object.
53721 + *
53722 + * Free an object which was previously allocated from this
53723 + * cache.
53724 + */
53725 +void kmem_cache_free(struct kmem_cache *cachep, void *objp)
53726 +{
53727 +       unsigned long flags;
53728 +
53729 +       BUG_ON(virt_to_cache(objp) != cachep);
53730 +
53731 +       local_irq_save(flags);
53732 +       debug_check_no_locks_freed(objp, obj_size(cachep));
53733 +       __cache_free(cachep, objp);
53734 +       local_irq_restore(flags);
53735 +}
53736 +EXPORT_SYMBOL(kmem_cache_free);
53737 +
53738 +/**
53739 + * kfree - free previously allocated memory
53740 + * @objp: pointer returned by kmalloc.
53741 + *
53742 + * If @objp is NULL, no operation is performed.
53743 + *
53744 + * Don't free memory not originally allocated by kmalloc()
53745 + * or you will run into trouble.
53746 + */
53747 +void kfree(const void *objp)
53748 +{
53749 +       struct kmem_cache *c;
53750 +       unsigned long flags;
53751 +
53752 +       if (unlikely(!objp))
53753 +               return;
53754 +       local_irq_save(flags);
53755 +       kfree_debugcheck(objp);
53756 +       c = virt_to_cache(objp);
53757 +       debug_check_no_locks_freed(objp, obj_size(c));
53758 +       __cache_free(c, (void *)objp);
53759 +       local_irq_restore(flags);
53760 +}
53761 +EXPORT_SYMBOL(kfree);
53762 +
53763 +unsigned int kmem_cache_size(struct kmem_cache *cachep)
53764 +{
53765 +       return obj_size(cachep);
53766 +}
53767 +EXPORT_SYMBOL(kmem_cache_size);
53768 +
53769 +const char *kmem_cache_name(struct kmem_cache *cachep)
53770 +{
53771 +       return cachep->name;
53772 +}
53773 +EXPORT_SYMBOL_GPL(kmem_cache_name);
53774 +
53775 +/*
53776 + * This initializes kmem_list3 or resizes varioius caches for all nodes.
53777 + */
53778 +static int alloc_kmemlist(struct kmem_cache *cachep)
53779 +{
53780 +       int node;
53781 +       struct kmem_list3 *l3;
53782 +       struct array_cache *new_shared;
53783 +       struct array_cache **new_alien = NULL;
53784 +
53785 +       for_each_online_node(node) {
53786 +
53787 +                if (use_alien_caches) {
53788 +                        new_alien = alloc_alien_cache(node, cachep->limit);
53789 +                        if (!new_alien)
53790 +                                goto fail;
53791 +                }
53792 +
53793 +               new_shared = NULL;
53794 +               if (cachep->shared) {
53795 +                       new_shared = alloc_arraycache(node,
53796 +                               cachep->shared*cachep->batchcount,
53797 +                                       0xbaadf00d);
53798 +                       if (!new_shared) {
53799 +                               free_alien_cache(new_alien);
53800 +                               goto fail;
53801 +                       }
53802 +               }
53803 +
53804 +               l3 = cachep->nodelists[node];
53805 +               if (l3) {
53806 +                       struct array_cache *shared = l3->shared;
53807 +
53808 +                       spin_lock_irq(&l3->list_lock);
53809 +
53810 +                       if (shared)
53811 +                               free_block(cachep, shared->entry,
53812 +                                               shared->avail, node);
53813 +
53814 +                       l3->shared = new_shared;
53815 +                       if (!l3->alien) {
53816 +                               l3->alien = new_alien;
53817 +                               new_alien = NULL;
53818 +                       }
53819 +                       l3->free_limit = (1 + nr_cpus_node(node)) *
53820 +                                       cachep->batchcount + cachep->num;
53821 +                       spin_unlock_irq(&l3->list_lock);
53822 +                       kfree(shared);
53823 +                       free_alien_cache(new_alien);
53824 +                       continue;
53825 +               }
53826 +               l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
53827 +               if (!l3) {
53828 +                       free_alien_cache(new_alien);
53829 +                       kfree(new_shared);
53830 +                       goto fail;
53831 +               }
53832 +
53833 +               kmem_list3_init(l3);
53834 +               l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
53835 +                               ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
53836 +               l3->shared = new_shared;
53837 +               l3->alien = new_alien;
53838 +               l3->free_limit = (1 + nr_cpus_node(node)) *
53839 +                                       cachep->batchcount + cachep->num;
53840 +               cachep->nodelists[node] = l3;
53841 +       }
53842 +       return 0;
53843 +
53844 +fail:
53845 +       if (!cachep->next.next) {
53846 +               /* Cache is not active yet. Roll back what we did */
53847 +               node--;
53848 +               while (node >= 0) {
53849 +                       if (cachep->nodelists[node]) {
53850 +                               l3 = cachep->nodelists[node];
53851 +
53852 +                               kfree(l3->shared);
53853 +                               free_alien_cache(l3->alien);
53854 +                               kfree(l3);
53855 +                               cachep->nodelists[node] = NULL;
53856 +                       }
53857 +                       node--;
53858 +               }
53859 +       }
53860 +       return -ENOMEM;
53861 +}
53862 +
53863 +struct ccupdate_struct {
53864 +       struct kmem_cache *cachep;
53865 +       struct array_cache *new[NR_CPUS];
53866 +};
53867 +
53868 +static void do_ccupdate_local(void *info)
53869 +{
53870 +       struct ccupdate_struct *new = info;
53871 +       struct array_cache *old;
53872 +
53873 +       check_irq_off();
53874 +       old = cpu_cache_get(new->cachep);
53875 +
53876 +       new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
53877 +       new->new[smp_processor_id()] = old;
53878 +}
53879 +
53880 +/* Always called with the cache_chain_mutex held */
53881 +static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
53882 +                               int batchcount, int shared)
53883 +{
53884 +       struct ccupdate_struct *new;
53885 +       int i;
53886 +
53887 +       new = kzalloc(sizeof(*new), GFP_KERNEL);
53888 +       if (!new)
53889 +               return -ENOMEM;
53890 +
53891 +       for_each_online_cpu(i) {
53892 +               new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
53893 +                                               batchcount);
53894 +               if (!new->new[i]) {
53895 +                       for (i--; i >= 0; i--)
53896 +                               kfree(new->new[i]);
53897 +                       kfree(new);
53898 +                       return -ENOMEM;
53899 +               }
53900 +       }
53901 +       new->cachep = cachep;
53902 +
53903 +       on_each_cpu(do_ccupdate_local, (void *)new, 1, 1);
53904 +
53905 +       check_irq_on();
53906 +       cachep->batchcount = batchcount;
53907 +       cachep->limit = limit;
53908 +       cachep->shared = shared;
53909 +
53910 +       for_each_online_cpu(i) {
53911 +               struct array_cache *ccold = new->new[i];
53912 +               if (!ccold)
53913 +                       continue;
53914 +               spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
53915 +               free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
53916 +               spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
53917 +               kfree(ccold);
53918 +       }
53919 +       kfree(new);
53920 +       return alloc_kmemlist(cachep);
53921 +}
53922 +
53923 +/* Called with cache_chain_mutex held always */
53924 +static int enable_cpucache(struct kmem_cache *cachep)
53925 +{
53926 +       int err;
53927 +       int limit, shared;
53928 +
53929 +       /*
53930 +        * The head array serves three purposes:
53931 +        * - create a LIFO ordering, i.e. return objects that are cache-warm
53932 +        * - reduce the number of spinlock operations.
53933 +        * - reduce the number of linked list operations on the slab and
53934 +        *   bufctl chains: array operations are cheaper.
53935 +        * The numbers are guessed, we should auto-tune as described by
53936 +        * Bonwick.
53937 +        */
53938 +       if (cachep->buffer_size > 131072)
53939 +               limit = 1;
53940 +       else if (cachep->buffer_size > PAGE_SIZE)
53941 +               limit = 8;
53942 +       else if (cachep->buffer_size > 1024)
53943 +               limit = 24;
53944 +       else if (cachep->buffer_size > 256)
53945 +               limit = 54;
53946 +       else
53947 +               limit = 120;
53948 +
53949 +       /*
53950 +        * CPU bound tasks (e.g. network routing) can exhibit cpu bound
53951 +        * allocation behaviour: Most allocs on one cpu, most free operations
53952 +        * on another cpu. For these cases, an efficient object passing between
53953 +        * cpus is necessary. This is provided by a shared array. The array
53954 +        * replaces Bonwick's magazine layer.
53955 +        * On uniprocessor, it's functionally equivalent (but less efficient)
53956 +        * to a larger limit. Thus disabled by default.
53957 +        */
53958 +       shared = 0;
53959 +       if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
53960 +               shared = 8;
53961 +
53962 +#if DEBUG
53963 +       /*
53964 +        * With debugging enabled, large batchcount lead to excessively long
53965 +        * periods with disabled local interrupts. Limit the batchcount
53966 +        */
53967 +       if (limit > 32)
53968 +               limit = 32;
53969 +#endif
53970 +       err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
53971 +       if (err)
53972 +               printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
53973 +                      cachep->name, -err);
53974 +       return err;
53975 +}
53976 +
53977 +/*
53978 + * Drain an array if it contains any elements taking the l3 lock only if
53979 + * necessary. Note that the l3 listlock also protects the array_cache
53980 + * if drain_array() is used on the shared array.
53981 + */
53982 +void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
53983 +                        struct array_cache *ac, int force, int node)
53984 +{
53985 +       int tofree;
53986 +
53987 +       if (!ac || !ac->avail)
53988 +               return;
53989 +       if (ac->touched && !force) {
53990 +               ac->touched = 0;
53991 +       } else {
53992 +               spin_lock_irq(&l3->list_lock);
53993 +               if (ac->avail) {
53994 +                       tofree = force ? ac->avail : (ac->limit + 4) / 5;
53995 +                       if (tofree > ac->avail)
53996 +                               tofree = (ac->avail + 1) / 2;
53997 +                       free_block(cachep, ac->entry, tofree, node);
53998 +                       ac->avail -= tofree;
53999 +                       memmove(ac->entry, &(ac->entry[tofree]),
54000 +                               sizeof(void *) * ac->avail);
54001 +               }
54002 +               spin_unlock_irq(&l3->list_lock);
54003 +       }
54004 +}
54005 +
54006 +/**
54007 + * cache_reap - Reclaim memory from caches.
54008 + * @w: work descriptor
54009 + *
54010 + * Called from workqueue/eventd every few seconds.
54011 + * Purpose:
54012 + * - clear the per-cpu caches for this CPU.
54013 + * - return freeable pages to the main free memory pool.
54014 + *
54015 + * If we cannot acquire the cache chain mutex then just give up - we'll try
54016 + * again on the next iteration.
54017 + */
54018 +static void cache_reap(struct work_struct *w)
54019 +{
54020 +       struct kmem_cache *searchp;
54021 +       struct kmem_list3 *l3;
54022 +       int node = numa_node_id();
54023 +       struct delayed_work *work =
54024 +               container_of(w, struct delayed_work, work);
54025 +
54026 +       if (!mutex_trylock(&cache_chain_mutex))
54027 +               /* Give up. Setup the next iteration. */
54028 +               goto out;
54029 +
54030 +       list_for_each_entry(searchp, &cache_chain, next) {
54031 +               check_irq_on();
54032 +
54033 +               /*
54034 +                * We only take the l3 lock if absolutely necessary and we
54035 +                * have established with reasonable certainty that
54036 +                * we can do some work if the lock was obtained.
54037 +                */
54038 +               l3 = searchp->nodelists[node];
54039 +
54040 +               reap_alien(searchp, l3);
54041 +
54042 +               drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
54043 +
54044 +               /*
54045 +                * These are racy checks but it does not matter
54046 +                * if we skip one check or scan twice.
54047 +                */
54048 +               if (time_after(l3->next_reap, jiffies))
54049 +                       goto next;
54050 +
54051 +               l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
54052 +
54053 +               drain_array(searchp, l3, l3->shared, 0, node);
54054 +
54055 +               if (l3->free_touched)
54056 +                       l3->free_touched = 0;
54057 +               else {
54058 +                       int freed;
54059 +
54060 +                       freed = drain_freelist(searchp, l3, (l3->free_limit +
54061 +                               5 * searchp->num - 1) / (5 * searchp->num));
54062 +                       STATS_ADD_REAPED(searchp, freed);
54063 +               }
54064 +next:
54065 +               cond_resched();
54066 +       }
54067 +       check_irq_on();
54068 +       mutex_unlock(&cache_chain_mutex);
54069 +       next_reap_node();
54070 +out:
54071 +       /* Set up the next iteration */
54072 +       schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
54073 +}
54074 +
54075 +#ifdef CONFIG_PROC_FS
54076 +
54077 +static void print_slabinfo_header(struct seq_file *m)
54078 +{
54079 +       /*
54080 +        * Output format version, so at least we can change it
54081 +        * without _too_ many complaints.
54082 +        */
54083 +#if STATS
54084 +       seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
54085 +#else
54086 +       seq_puts(m, "slabinfo - version: 2.1\n");
54087 +#endif
54088 +       seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
54089 +                "<objperslab> <pagesperslab>");
54090 +       seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
54091 +       seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
54092 +#if STATS
54093 +       seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
54094 +                "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
54095 +       seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
54096 +#endif
54097 +       seq_putc(m, '\n');
54098 +}
54099 +
54100 +static void *s_start(struct seq_file *m, loff_t *pos)
54101 +{
54102 +       loff_t n = *pos;
54103 +       struct list_head *p;
54104 +
54105 +       mutex_lock(&cache_chain_mutex);
54106 +       if (!n)
54107 +               print_slabinfo_header(m);
54108 +       p = cache_chain.next;
54109 +       while (n--) {
54110 +               p = p->next;
54111 +               if (p == &cache_chain)
54112 +                       return NULL;
54113 +       }
54114 +       return list_entry(p, struct kmem_cache, next);
54115 +}
54116 +
54117 +static void *s_next(struct seq_file *m, void *p, loff_t *pos)
54118 +{
54119 +       struct kmem_cache *cachep = p;
54120 +       ++*pos;
54121 +       return cachep->next.next == &cache_chain ?
54122 +               NULL : list_entry(cachep->next.next, struct kmem_cache, next);
54123 +}
54124 +
54125 +static void s_stop(struct seq_file *m, void *p)
54126 +{
54127 +       mutex_unlock(&cache_chain_mutex);
54128 +}
54129 +
54130 +static int s_show(struct seq_file *m, void *p)
54131 +{
54132 +       struct kmem_cache *cachep = p;
54133 +       struct slab *slabp;
54134 +       unsigned long active_objs;
54135 +       unsigned long num_objs;
54136 +       unsigned long active_slabs = 0;
54137 +       unsigned long num_slabs, free_objects = 0, shared_avail = 0;
54138 +       const char *name;
54139 +       char *error = NULL;
54140 +       int node;
54141 +       struct kmem_list3 *l3;
54142 +
54143 +       active_objs = 0;
54144 +       num_slabs = 0;
54145 +       for_each_online_node(node) {
54146 +               l3 = cachep->nodelists[node];
54147 +               if (!l3)
54148 +                       continue;
54149 +
54150 +               check_irq_on();
54151 +               spin_lock_irq(&l3->list_lock);
54152 +
54153 +               list_for_each_entry(slabp, &l3->slabs_full, list) {
54154 +                       if (slabp->inuse != cachep->num && !error)
54155 +                               error = "slabs_full accounting error";
54156 +                       active_objs += cachep->num;
54157 +                       active_slabs++;
54158 +               }
54159 +               list_for_each_entry(slabp, &l3->slabs_partial, list) {
54160 +                       if (slabp->inuse == cachep->num && !error)
54161 +                               error = "slabs_partial inuse accounting error";
54162 +                       if (!slabp->inuse && !error)
54163 +                               error = "slabs_partial/inuse accounting error";
54164 +                       active_objs += slabp->inuse;
54165 +                       active_slabs++;
54166 +               }
54167 +               list_for_each_entry(slabp, &l3->slabs_free, list) {
54168 +                       if (slabp->inuse && !error)
54169 +                               error = "slabs_free/inuse accounting error";
54170 +                       num_slabs++;
54171 +               }
54172 +               free_objects += l3->free_objects;
54173 +               if (l3->shared)
54174 +                       shared_avail += l3->shared->avail;
54175 +
54176 +               spin_unlock_irq(&l3->list_lock);
54177 +       }
54178 +       num_slabs += active_slabs;
54179 +       num_objs = num_slabs * cachep->num;
54180 +       if (num_objs - active_objs != free_objects && !error)
54181 +               error = "free_objects accounting error";
54182 +
54183 +       name = cachep->name;
54184 +       if (error)
54185 +               printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
54186 +
54187 +       seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
54188 +                  name, active_objs, num_objs, cachep->buffer_size,
54189 +                  cachep->num, (1 << cachep->gfporder));
54190 +       seq_printf(m, " : tunables %4u %4u %4u",
54191 +                  cachep->limit, cachep->batchcount, cachep->shared);
54192 +       seq_printf(m, " : slabdata %6lu %6lu %6lu",
54193 +                  active_slabs, num_slabs, shared_avail);
54194 +#if STATS
54195 +       {                       /* list3 stats */
54196 +               unsigned long high = cachep->high_mark;
54197 +               unsigned long allocs = cachep->num_allocations;
54198 +               unsigned long grown = cachep->grown;
54199 +               unsigned long reaped = cachep->reaped;
54200 +               unsigned long errors = cachep->errors;
54201 +               unsigned long max_freeable = cachep->max_freeable;
54202 +               unsigned long node_allocs = cachep->node_allocs;
54203 +               unsigned long node_frees = cachep->node_frees;
54204 +               unsigned long overflows = cachep->node_overflow;
54205 +
54206 +               seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
54207 +                               %4lu %4lu %4lu %4lu %4lu", allocs, high, grown,
54208 +                               reaped, errors, max_freeable, node_allocs,
54209 +                               node_frees, overflows);
54210 +       }
54211 +       /* cpu stats */
54212 +       {
54213 +               unsigned long allochit = atomic_read(&cachep->allochit);
54214 +               unsigned long allocmiss = atomic_read(&cachep->allocmiss);
54215 +               unsigned long freehit = atomic_read(&cachep->freehit);
54216 +               unsigned long freemiss = atomic_read(&cachep->freemiss);
54217 +
54218 +               seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
54219 +                          allochit, allocmiss, freehit, freemiss);
54220 +       }
54221 +#endif
54222 +       seq_putc(m, '\n');
54223 +       return 0;
54224 +}
54225 +
54226 +/*
54227 + * slabinfo_op - iterator that generates /proc/slabinfo
54228 + *
54229 + * Output layout:
54230 + * cache-name
54231 + * num-active-objs
54232 + * total-objs
54233 + * object size
54234 + * num-active-slabs
54235 + * total-slabs
54236 + * num-pages-per-slab
54237 + * + further values on SMP and with statistics enabled
54238 + */
54239 +
54240 +const struct seq_operations slabinfo_op = {
54241 +       .start = s_start,
54242 +       .next = s_next,
54243 +       .stop = s_stop,
54244 +       .show = s_show,
54245 +};
54246 +
54247 +#define MAX_SLABINFO_WRITE 128
54248 +/**
54249 + * slabinfo_write - Tuning for the slab allocator
54250 + * @file: unused
54251 + * @buffer: user buffer
54252 + * @count: data length
54253 + * @ppos: unused
54254 + */
54255 +ssize_t slabinfo_write(struct file *file, const char __user * buffer,
54256 +                      size_t count, loff_t *ppos)
54257 +{
54258 +       char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
54259 +       int limit, batchcount, shared, res;
54260 +       struct kmem_cache *cachep;
54261 +
54262 +       if (count > MAX_SLABINFO_WRITE)
54263 +               return -EINVAL;
54264 +       if (copy_from_user(&kbuf, buffer, count))
54265 +               return -EFAULT;
54266 +       kbuf[MAX_SLABINFO_WRITE] = '\0';
54267 +
54268 +       tmp = strchr(kbuf, ' ');
54269 +       if (!tmp)
54270 +               return -EINVAL;
54271 +       *tmp = '\0';
54272 +       tmp++;
54273 +       if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
54274 +               return -EINVAL;
54275 +
54276 +       /* Find the cache in the chain of caches. */
54277 +       mutex_lock(&cache_chain_mutex);
54278 +       res = -EINVAL;
54279 +       list_for_each_entry(cachep, &cache_chain, next) {
54280 +               if (!strcmp(cachep->name, kbuf)) {
54281 +                       if (limit < 1 || batchcount < 1 ||
54282 +                                       batchcount > limit || shared < 0) {
54283 +                               res = 0;
54284 +                       } else {
54285 +                               res = do_tune_cpucache(cachep, limit,
54286 +                                                      batchcount, shared);
54287 +                       }
54288 +                       break;
54289 +               }
54290 +       }
54291 +       mutex_unlock(&cache_chain_mutex);
54292 +       if (res >= 0)
54293 +               res = count;
54294 +       return res;
54295 +}
54296 +
54297 +#ifdef CONFIG_DEBUG_SLAB_LEAK
54298 +
54299 +static void *leaks_start(struct seq_file *m, loff_t *pos)
54300 +{
54301 +       loff_t n = *pos;
54302 +       struct list_head *p;
54303 +
54304 +       mutex_lock(&cache_chain_mutex);
54305 +       p = cache_chain.next;
54306 +       while (n--) {
54307 +               p = p->next;
54308 +               if (p == &cache_chain)
54309 +                       return NULL;
54310 +       }
54311 +       return list_entry(p, struct kmem_cache, next);
54312 +}
54313 +
54314 +static inline int add_caller(unsigned long *n, unsigned long v)
54315 +{
54316 +       unsigned long *p;
54317 +       int l;
54318 +       if (!v)
54319 +               return 1;
54320 +       l = n[1];
54321 +       p = n + 2;
54322 +       while (l) {
54323 +               int i = l/2;
54324 +               unsigned long *q = p + 2 * i;
54325 +               if (*q == v) {
54326 +                       q[1]++;
54327 +                       return 1;
54328 +               }
54329 +               if (*q > v) {
54330 +                       l = i;
54331 +               } else {
54332 +                       p = q + 2;
54333 +                       l -= i + 1;
54334 +               }
54335 +       }
54336 +       if (++n[1] == n[0])
54337 +               return 0;
54338 +       memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
54339 +       p[0] = v;
54340 +       p[1] = 1;
54341 +       return 1;
54342 +}
54343 +
54344 +static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
54345 +{
54346 +       void *p;
54347 +       int i;
54348 +       if (n[0] == n[1])
54349 +               return;
54350 +       for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
54351 +               if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
54352 +                       continue;
54353 +               if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
54354 +                       return;
54355 +       }
54356 +}
54357 +
54358 +static void show_symbol(struct seq_file *m, unsigned long address)
54359 +{
54360 +#ifdef CONFIG_KALLSYMS
54361 +       unsigned long offset, size;
54362 +       char modname[MODULE_NAME_LEN + 1], name[KSYM_NAME_LEN + 1];
54363 +
54364 +       if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
54365 +               seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
54366 +               if (modname[0])
54367 +                       seq_printf(m, " [%s]", modname);
54368 +               return;
54369 +       }
54370 +#endif
54371 +       seq_printf(m, "%p", (void *)address);
54372 +}
54373 +
54374 +static int leaks_show(struct seq_file *m, void *p)
54375 +{
54376 +       struct kmem_cache *cachep = p;
54377 +       struct slab *slabp;
54378 +       struct kmem_list3 *l3;
54379 +       const char *name;
54380 +       unsigned long *n = m->private;
54381 +       int node;
54382 +       int i;
54383 +
54384 +       if (!(cachep->flags & SLAB_STORE_USER))
54385 +               return 0;
54386 +       if (!(cachep->flags & SLAB_RED_ZONE))
54387 +               return 0;
54388 +
54389 +       /* OK, we can do it */
54390 +
54391 +       n[1] = 0;
54392 +
54393 +       for_each_online_node(node) {
54394 +               l3 = cachep->nodelists[node];
54395 +               if (!l3)
54396 +                       continue;
54397 +
54398 +               check_irq_on();
54399 +               spin_lock_irq(&l3->list_lock);
54400 +
54401 +               list_for_each_entry(slabp, &l3->slabs_full, list)
54402 +                       handle_slab(n, cachep, slabp);
54403 +               list_for_each_entry(slabp, &l3->slabs_partial, list)
54404 +                       handle_slab(n, cachep, slabp);
54405 +               spin_unlock_irq(&l3->list_lock);
54406 +       }
54407 +       name = cachep->name;
54408 +       if (n[0] == n[1]) {
54409 +               /* Increase the buffer size */
54410 +               mutex_unlock(&cache_chain_mutex);
54411 +               m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
54412 +               if (!m->private) {
54413 +                       /* Too bad, we are really out */
54414 +                       m->private = n;
54415 +                       mutex_lock(&cache_chain_mutex);
54416 +                       return -ENOMEM;
54417 +               }
54418 +               *(unsigned long *)m->private = n[0] * 2;
54419 +               kfree(n);
54420 +               mutex_lock(&cache_chain_mutex);
54421 +               /* Now make sure this entry will be retried */
54422 +               m->count = m->size;
54423 +               return 0;
54424 +       }
54425 +       for (i = 0; i < n[1]; i++) {
54426 +               seq_printf(m, "%s: %lu ", name, n[2*i+3]);
54427 +               show_symbol(m, n[2*i+2]);
54428 +               seq_putc(m, '\n');
54429 +       }
54430 +
54431 +       return 0;
54432 +}
54433 +
54434 +const struct seq_operations slabstats_op = {
54435 +       .start = leaks_start,
54436 +       .next = s_next,
54437 +       .stop = s_stop,
54438 +       .show = leaks_show,
54439 +};
54440 +#endif
54441 +#endif
54442 +
54443 +/**
54444 + * ksize - get the actual amount of memory allocated for a given object
54445 + * @objp: Pointer to the object
54446 + *
54447 + * kmalloc may internally round up allocations and return more memory
54448 + * than requested. ksize() can be used to determine the actual amount of
54449 + * memory allocated. The caller may use this additional memory, even though
54450 + * a smaller amount of memory was initially specified with the kmalloc call.
54451 + * The caller must guarantee that objp points to a valid object previously
54452 + * allocated with either kmalloc() or kmem_cache_alloc(). The object
54453 + * must not be freed during the duration of the call.
54454 + */
54455 +size_t ksize(const void *objp)
54456 +{
54457 +       if (unlikely(objp == NULL))
54458 +               return 0;
54459 +
54460 +       return obj_size(virt_to_cache(objp));
54461 +}