1 /* U3memcpy.S: UltraSparc-III optimized memcpy.
3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
7 #include <asm/visasm.h>
10 #define ASI_BLK_P 0xf0
13 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
14 clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
15 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
17 #define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
18 #define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
31 #define EX_RETVAL(x) x
35 #define LOAD(type,addr,dest) type [addr], dest
39 #define STORE(type,src,addr) type src, [addr]
43 #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P
47 #define FUNC_NAME U3memcpy
58 .register %g2,#scratch
59 .register %g3,#scratch
61 /* Special/non-trivial issues of this code:
63 * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
64 * 2) Only low 32 FPU registers are used so that only the
65 * lower half of the FPU register set is dirtied by this
66 * code. This is especially important in the kernel.
67 * 3) This code never prefetches cachelines past the end
68 * of the source buffer.
74 /* The cheetah's flexible spine, oversized liver, enlarged heart,
75 * slender muscular body, and claws make it the swiftest hunter
76 * in Africa and the fastest animal on land. Can reach speeds
77 * of up to 2.4GB per second.
81 FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
95 /* Clobbers o5/g1/g2/g3/g7/icc/xcc. We must preserve
96 * o5 from here until we hit VISExitHalf.
100 /* Is 'dst' already aligned on an 64-byte boundary? */
104 /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
105 * of bytes to copy to make 'dst' 64-byte aligned. We pre-
106 * subtract this from 'len'.
116 1: subcc %g1, 0x1, %g1
117 EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
118 EX_ST(STORE(stb, %o3, %o1 + %o4))
127 alignaddr %o1, %g0, %o1
129 EX_LD(LOAD(ldd, %o1, %f4))
130 1: EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
133 faligndata %f4, %f6, %f0
134 EX_ST(STORE(std, %f0, %o0))
138 EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
141 faligndata %f6, %f4, %f2
142 EX_ST(STORE(std, %f2, %o0))
146 3: LOAD(prefetch, %o1 + 0x000, #one_read)
147 LOAD(prefetch, %o1 + 0x040, #one_read)
148 andn %o2, (0x40 - 1), %o4
149 LOAD(prefetch, %o1 + 0x080, #one_read)
150 LOAD(prefetch, %o1 + 0x0c0, #one_read)
151 LOAD(prefetch, %o1 + 0x100, #one_read)
152 EX_LD(LOAD(ldd, %o1 + 0x000, %f0))
153 LOAD(prefetch, %o1 + 0x140, #one_read)
154 EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
155 LOAD(prefetch, %o1 + 0x180, #one_read)
156 EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
157 LOAD(prefetch, %o1 + 0x1c0, #one_read)
158 faligndata %f0, %f2, %f16
159 EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
160 faligndata %f2, %f4, %f18
161 EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
162 faligndata %f4, %f6, %f20
163 EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
164 faligndata %f6, %f8, %f22
166 EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
167 faligndata %f8, %f10, %f24
168 EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
169 faligndata %f10, %f12, %f26
170 EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
181 EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
182 faligndata %f12, %f14, %f28
183 EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
184 faligndata %f14, %f0, %f30
185 EX_ST(STORE_BLK(%f16, %o0))
186 EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
187 faligndata %f0, %f2, %f16
190 EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
191 faligndata %f2, %f4, %f18
192 EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
193 faligndata %f4, %f6, %f20
194 EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
196 faligndata %f6, %f8, %f22
197 EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
199 faligndata %f8, %f10, %f24
200 EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
201 LOAD(prefetch, %o1 + 0x1c0, #one_read)
202 faligndata %f10, %f12, %f26
206 /* Finally we copy the last full 64-byte block. */
208 EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
209 faligndata %f12, %f14, %f28
210 EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
211 faligndata %f14, %f0, %f30
212 EX_ST(STORE_BLK(%f16, %o0))
213 EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
214 faligndata %f0, %f2, %f16
215 EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
216 faligndata %f2, %f4, %f18
217 EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
218 faligndata %f4, %f6, %f20
219 EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
220 faligndata %f6, %f8, %f22
221 EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
222 faligndata %f8, %f10, %f24
226 EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
227 1: faligndata %f10, %f12, %f26
228 faligndata %f12, %f14, %f28
229 faligndata %f14, %f0, %f30
230 EX_ST(STORE_BLK(%f16, %o0))
235 /* Now we copy the (len modulo 64) bytes at the end.
236 * Note how we borrow the %f0 loaded above.
238 * Also notice how this code is careful not to perform a
239 * load past the end of the src buffer.
250 EX_LD(LOAD(ldd, %o1 + 0x00, %f0))
252 1: EX_LD(LOAD(ldd, %o1 + 0x08, %f2))
255 faligndata %f0, %f2, %f8
256 EX_ST(STORE(std, %f8, %o0))
259 EX_LD(LOAD(ldd, %o1 + 0x08, %f0))
262 faligndata %f2, %f0, %f8
263 EX_ST(STORE(std, %f8, %o0))
267 /* If anything is left, we copy it one byte at a time.
268 * Note that %g1 is (src & 0x3) saved above before the
269 * alignaddr was performed.
283 EX_LD(LOAD(ldx, %o1, %o5))
284 EX_ST(STORE(stx, %o5, %o1 + %o3))
287 1: andcc %o2, 0x4, %g0
290 EX_LD(LOAD(lduw, %o1, %o5))
291 EX_ST(STORE(stw, %o5, %o1 + %o3))
294 1: andcc %o2, 0x2, %g0
297 EX_LD(LOAD(lduh, %o1, %o5))
298 EX_ST(STORE(sth, %o5, %o1 + %o3))
301 1: andcc %o2, 0x1, %g0
304 EX_LD(LOAD(ldub, %o1, %o5))
306 EX_ST(STORE(stb, %o5, %o1 + %o3))
309 70: /* 16 < len <= 64 */
316 1: subcc %o4, 0x10, %o4
317 EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
318 EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
319 EX_ST(STORE(stx, %o5, %o1 + %o3))
321 EX_ST(STORE(stx, %g1, %o1 + %o3))
324 73: andcc %o2, 0x8, %g0
328 EX_LD(LOAD(ldx, %o1, %o5))
329 EX_ST(STORE(stx, %o5, %o1 + %o3))
331 1: andcc %o2, 0x4, %g0
335 EX_LD(LOAD(lduw, %o1, %o5))
336 EX_ST(STORE(stw, %o5, %o1 + %o3))
352 EX_LD(LOAD(ldub, %o1, %o5))
353 EX_ST(STORE(stb, %o5, %o1 + %o3))
369 EX_LD(LOAD(ldx, %o1, %g2))
373 1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
378 EX_ST(STORE(stx, %o5, %o0))
391 80: /* 0 < len <= 16 */
398 EX_LD(LOAD(lduw, %o1, %g1))
399 EX_ST(STORE(stw, %g1, %o1 + %o3))
404 mov EX_RETVAL(%g5), %o0
409 EX_LD(LOAD(ldub, %o1, %g1))
410 EX_ST(STORE(stb, %g1, %o1 + %o3))
414 mov EX_RETVAL(%g5), %o0