X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fx86_64%2Flib%2Fcopy_user.S;fp=arch%2Fx86_64%2Flib%2Fcopy_user.S;h=f64569b83b548605c51d9ff6d31f4542eb9b59f9;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=962f3a693c5ec6332db8eb1a4875d7a5c48f34d5;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/arch/x86_64/lib/copy_user.S b/arch/x86_64/lib/copy_user.S index 962f3a693..f64569b83 100644 --- a/arch/x86_64/lib/copy_user.S +++ b/arch/x86_64/lib/copy_user.S @@ -4,9 +4,6 @@ * Functions to copy from and to user space. */ -#include -#include - #define FIX_ALIGNMENT 1 #include @@ -15,8 +12,9 @@ #include /* Standard copy_to_user with segment limit checking */ -ENTRY(copy_to_user) - CFI_STARTPROC + .globl copy_to_user + .p2align 4 +copy_to_user: GET_THREAD_INFO(%rax) movq %rdi,%rcx addq %rdx,%rcx @@ -27,11 +25,9 @@ ENTRY(copy_to_user) .byte 0xe9 /* 32bit jump */ .long .Lcug-1f 1: - CFI_ENDPROC -ENDPROC(copy_to_user) .section .altinstr_replacement,"ax" -3: .byte 0xe9 /* replacement jmp with 32 bit immediate */ +3: .byte 0xe9 /* replacement jmp with 8 bit immediate */ .long copy_user_generic_c-1b /* offset */ .previous .section .altinstructions,"a" @@ -44,8 +40,9 @@ ENDPROC(copy_to_user) .previous /* Standard copy_from_user with segment limit checking */ -ENTRY(copy_from_user) - CFI_STARTPROC + .globl copy_from_user + .p2align 4 +copy_from_user: GET_THREAD_INFO(%rax) movq %rsi,%rcx addq %rdx,%rcx @@ -53,13 +50,10 @@ ENTRY(copy_from_user) cmpq threadinfo_addr_limit(%rax),%rcx jae bad_from_user /* FALL THROUGH to copy_user_generic */ - CFI_ENDPROC -ENDPROC(copy_from_user) .section .fixup,"ax" /* must zero dest */ bad_from_user: - CFI_STARTPROC movl %edx,%ecx xorl %eax,%eax rep @@ -67,8 +61,6 @@ bad_from_user: bad_to_user: movl %edx,%eax ret - CFI_ENDPROC -END(bad_from_user) .previous @@ -83,8 +75,9 @@ END(bad_from_user) * Output: * eax uncopied bytes or 0 if successful. */ -ENTRY(copy_user_generic) - CFI_STARTPROC + .globl copy_user_generic + .p2align 4 +copy_user_generic: .byte 0x66,0x66,0x90 /* 5 byte nop for replacement jump */ .byte 0x66,0x90 1: @@ -102,8 +95,6 @@ ENTRY(copy_user_generic) .previous .Lcug: pushq %rbx - CFI_ADJUST_CFA_OFFSET 8 - CFI_REL_OFFSET rbx, 0 xorl %eax,%eax /*zero for the exception handler */ #ifdef FIX_ALIGNMENT @@ -177,13 +168,9 @@ ENTRY(copy_user_generic) decl %ecx jnz .Lloop_1 - CFI_REMEMBER_STATE .Lende: popq %rbx - CFI_ADJUST_CFA_OFFSET -8 - CFI_RESTORE rbx ret - CFI_RESTORE_STATE #ifdef FIX_ALIGNMENT /* align destination */ @@ -274,9 +261,6 @@ ENTRY(copy_user_generic) .Le_zero: movq %rdx,%rax jmp .Lende - CFI_ENDPROC -ENDPROC(copy_user_generic) - /* Some CPUs run faster using the string copy instructions. This is also a lot simpler. Use them when possible. @@ -298,7 +282,6 @@ ENDPROC(copy_user_generic) * this please consider this. */ copy_user_generic_c: - CFI_STARTPROC movl %edx,%ecx shrl $3,%ecx andl $7,%edx @@ -311,8 +294,6 @@ copy_user_generic_c: ret 3: lea (%rdx,%rcx,8),%rax ret - CFI_ENDPROC -END(copy_user_generic_c) .section __ex_table,"a" .quad 1b,3b