2 * arch/alpha/lib/memmove.S
4 * Barely optimized memmove routine for Alpha EV5.
6 * This is hand-massaged output from the original memcpy.c. We defer to
7 * memcpy whenever possible; the backwards copy loops are not unrolled.
20 cmpule $4,$17,$1 /* dest + n <= src */
21 cmpule $5,$16,$2 /* dest >= src + n */
28 and $2,7,$2 /* Test for src/dest co-alignment. */
31 bne $3,$memmove_up /* dest < src */
36 beq $1,$skip_aligned_byte_loop_head_dn
38 $aligned_byte_loop_head_dn:
55 bne $6,$aligned_byte_loop_head_dn
57 $skip_aligned_byte_loop_head_dn:
59 blt $18,$skip_aligned_word_loop_dn
61 $aligned_word_loop_dn:
70 bge $18,$aligned_word_loop_dn
72 $skip_aligned_word_loop_dn:
74 bgt $18,$byte_loop_tail_dn
98 bgt $18,$byte_loop_tail_dn
104 bne $2,$misaligned_up
105 beq $1,$skip_aligned_byte_loop_head_up
107 $aligned_byte_loop_head_up:
124 bne $6,$aligned_byte_loop_head_up
126 $skip_aligned_byte_loop_head_up:
128 blt $18,$skip_aligned_word_loop_up
130 $aligned_word_loop_up:
139 bge $18,$aligned_word_loop_up
141 $skip_aligned_word_loop_up:
143 bgt $18,$byte_loop_tail_up
168 bgt $18,$byte_loop_tail_up