~ubuntu-branches/ubuntu/quantal/linux-linaro-mx51/quantal

« back to all changes in this revision

Viewing changes to arch/x86/lib/copy_user_64.S

  • Committer: Package Import Robot
  • Author(s): John Rigby, John Rigby
  • Date: 2011-09-26 10:44:23 UTC
  • Revision ID: package-import@ubuntu.com-20110926104423-3o58a3c1bj7x00rs
Tags: 3.0.0-1007.9
[ John Rigby ]

Enable crypto modules and remove crypto-modules from
exclude-module files
LP: #826021

Show diffs side-by-side

added added

removed removed

Lines of Context:
15
15
#include <asm/asm-offsets.h>
16
16
#include <asm/thread_info.h>
17
17
#include <asm/cpufeature.h>
 
18
#include <asm/alternative-asm.h>
18
19
 
19
 
        .macro ALTERNATIVE_JUMP feature,orig,alt
 
20
/*
 
21
 * By placing feature2 after feature1 in altinstructions section, we logically
 
22
 * implement:
 
23
 * If CPU has feature2, jmp to alt2 is used
 
24
 * else if CPU has feature1, jmp to alt1 is used
 
25
 * else jmp to orig is used.
 
26
 */
 
27
        .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
20
28
0:
21
29
        .byte 0xe9      /* 32bit jump */
22
30
        .long \orig-1f  /* by default jump to orig */
23
31
1:
24
32
        .section .altinstr_replacement,"ax"
25
33
2:      .byte 0xe9                      /* near jump with 32bit immediate */
26
 
        .long \alt-1b /* offset */   /* or alternatively to alt */
 
34
        .long \alt1-1b /* offset */   /* or alternatively to alt1 */
 
35
3:      .byte 0xe9                      /* near jump with 32bit immediate */
 
36
        .long \alt2-1b /* offset */   /* or alternatively to alt2 */
27
37
        .previous
 
38
 
28
39
        .section .altinstructions,"a"
29
 
        .align 8
30
 
        .quad  0b
31
 
        .quad  2b
32
 
        .word  \feature                 /* when feature is set */
33
 
        .byte  5
34
 
        .byte  5
 
40
        altinstruction_entry 0b,2b,\feature1,5,5
 
41
        altinstruction_entry 0b,3b,\feature2,5,5
35
42
        .previous
36
43
        .endm
37
44
 
73
80
        jc bad_to_user
74
81
        cmpq TI_addr_limit(%rax),%rcx
75
82
        ja bad_to_user
76
 
        ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
 
83
        ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
 
84
                copy_user_generic_unrolled,copy_user_generic_string,    \
 
85
                copy_user_enhanced_fast_string
77
86
        CFI_ENDPROC
78
87
ENDPROC(_copy_to_user)
79
88
 
86
95
        jc bad_from_user
87
96
        cmpq TI_addr_limit(%rax),%rcx
88
97
        ja bad_from_user
89
 
        ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
 
98
        ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
 
99
                copy_user_generic_unrolled,copy_user_generic_string,    \
 
100
                copy_user_enhanced_fast_string
90
101
        CFI_ENDPROC
91
102
ENDPROC(_copy_from_user)
92
103
 
255
266
        .previous
256
267
        CFI_ENDPROC
257
268
ENDPROC(copy_user_generic_string)
 
269
 
 
270
/*
 
271
 * Some CPUs are adding enhanced REP MOVSB/STOSB instructions.
 
272
 * It's recommended to use enhanced REP MOVSB/STOSB if it's enabled.
 
273
 *
 
274
 * Input:
 
275
 * rdi destination
 
276
 * rsi source
 
277
 * rdx count
 
278
 *
 
279
 * Output:
 
280
 * eax uncopied bytes or 0 if successful.
 
281
 */
 
282
ENTRY(copy_user_enhanced_fast_string)
 
283
        CFI_STARTPROC
 
284
        andl %edx,%edx
 
285
        jz 2f
 
286
        movl %edx,%ecx
 
287
1:      rep
 
288
        movsb
 
289
2:      xorl %eax,%eax
 
290
        ret
 
291
 
 
292
        .section .fixup,"ax"
 
293
12:     movl %ecx,%edx          /* ecx is zerorest also */
 
294
        jmp copy_user_handle_tail
 
295
        .previous
 
296
 
 
297
        .section __ex_table,"a"
 
298
        .align 8
 
299
        .quad 1b,12b
 
300
        .previous
 
301
        CFI_ENDPROC
 
302
ENDPROC(copy_user_enhanced_fast_string)