1 /* U1memcpy.S: UltraSPARC-I/II/IIi/IIe optimized memcpy.
3 * Copyright (C) 1997, 2004 David S. Miller (davem@redhat.com)
4 * Copyright (C) 1996, 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
8 #include <asm/visasm.h>
11 #define ASI_BLK_P 0xf0
14 #define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
15 clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
16 #define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
18 #define VISEntry rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
19 #define VISExit and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
32 #define EX_RETVAL(x) x
36 #define LOAD(type,addr,dest) type [addr], dest
40 #define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_P, dest
44 #define STORE(type,src,addr) type src, [addr]
48 #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_P
52 #define FUNC_NAME memcpy
63 #define FREG_FROB(f1, f2, f3, f4, f5, f6, f7, f8, f9) \
64 faligndata %f1, %f2, %f48; \
65 faligndata %f2, %f3, %f50; \
66 faligndata %f3, %f4, %f52; \
67 faligndata %f4, %f5, %f54; \
68 faligndata %f5, %f6, %f56; \
69 faligndata %f6, %f7, %f58; \
70 faligndata %f7, %f8, %f60; \
71 faligndata %f8, %f9, %f62;
73 #define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \
74 EX_LD(LOAD_BLK(%src, %fdest)); \
75 EX_ST(STORE_BLK(%fsrc, %dest)); \
76 add %src, 0x40, %src; \
77 subcc %len, 0x40, %len; \
79 add %dest, 0x40, %dest; \
81 #define LOOP_CHUNK1(src, dest, len, branch_dest) \
82 MAIN_LOOP_CHUNK(src, dest, f0, f48, len, branch_dest)
83 #define LOOP_CHUNK2(src, dest, len, branch_dest) \
84 MAIN_LOOP_CHUNK(src, dest, f16, f48, len, branch_dest)
85 #define LOOP_CHUNK3(src, dest, len, branch_dest) \
86 MAIN_LOOP_CHUNK(src, dest, f32, f48, len, branch_dest)
88 #define STORE_SYNC(dest, fsrc) \
89 EX_ST(STORE_BLK(%fsrc, %dest)); \
90 add %dest, 0x40, %dest;
92 #define STORE_JUMP(dest, fsrc, target) \
93 EX_ST(STORE_BLK(%fsrc, %dest)); \
94 add %dest, 0x40, %dest; \
97 #define FINISH_VISCHUNK(dest, f0, f1, left) \
98 subcc %left, 8, %left;\
100 faligndata %f0, %f1, %f48; \
101 EX_ST(STORE(std, %f48, %dest)); \
104 #define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
105 subcc %left, 8, %left; \
109 #define UNEVEN_VISCHUNK(dest, f0, f1, left) \
110 UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
113 .register %g2,#scratch
114 .register %g3,#scratch
120 FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
134 /* Clobbers o5/g1/g2/g3/g7/icc/xcc. */
137 /* Is 'dst' already aligned on an 64-byte boundary? */
141 /* Compute abs((dst & 0x3f) - 0x40) into %g2. This is the number
142 * of bytes to copy to make 'dst' 64-byte aligned. We pre-
143 * subtract this from 'len'.
153 1: subcc %g1, 0x1, %g1
154 EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
155 EX_ST(STORE(stb, %o3, %o1 + %o4))
164 alignaddr %o1, %g0, %o1
166 EX_LD(LOAD(ldd, %o1, %f4))
167 1: EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
170 faligndata %f4, %f6, %f0
171 EX_ST(STORE(std, %f0, %o0))
175 EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
178 faligndata %f6, %f4, %f0
179 EX_ST(STORE(std, %f0, %o0))
183 /* Destination is 64-byte aligned. */
185 membar #LoadStore | #StoreStore | #StoreLoad
189 andncc %o4, (0x40 - 1), %o4
192 andn %o1, (0x40 - 1), %o1
202 EX_LD(LOAD_BLK(%o1, %f0))
205 EX_LD(LOAD_BLK(%o1, %f16))
208 EX_LD(LOAD_BLK(%o1, %f32))
211 /* There are 8 instances of the unrolled loop,
212 * one for each possible alignment of the
213 * source buffer. Each loop instance is 452
222 add %o3, %lo(1f - 1b), %o3
227 1: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
228 LOOP_CHUNK1(o1, o0, o4, 1f)
229 FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
230 LOOP_CHUNK2(o1, o0, o4, 2f)
231 FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
232 LOOP_CHUNK3(o1, o0, o4, 3f)
234 faligndata %f0, %f2, %f48
235 1: FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
236 STORE_SYNC(o0, f48) membar #Sync
237 FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
238 STORE_JUMP(o0, f48, 40f) membar #Sync
239 2: FREG_FROB(f32,f34,f36,f38,f40,f42,f44,f46,f0)
240 STORE_SYNC(o0, f48) membar #Sync
241 FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
242 STORE_JUMP(o0, f48, 48f) membar #Sync
243 3: FREG_FROB(f0, f2, f4, f6, f8, f10,f12,f14,f16)
244 STORE_SYNC(o0, f48) membar #Sync
245 FREG_FROB(f16,f18,f20,f22,f24,f26,f28,f30,f32)
246 STORE_JUMP(o0, f48, 56f) membar #Sync
248 1: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
249 LOOP_CHUNK1(o1, o0, o4, 1f)
250 FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
251 LOOP_CHUNK2(o1, o0, o4, 2f)
252 FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
253 LOOP_CHUNK3(o1, o0, o4, 3f)
255 faligndata %f2, %f4, %f48
256 1: FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
257 STORE_SYNC(o0, f48) membar #Sync
258 FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
259 STORE_JUMP(o0, f48, 41f) membar #Sync
260 2: FREG_FROB(f34,f36,f38,f40,f42,f44,f46,f0, f2)
261 STORE_SYNC(o0, f48) membar #Sync
262 FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
263 STORE_JUMP(o0, f48, 49f) membar #Sync
264 3: FREG_FROB(f2, f4, f6, f8, f10,f12,f14,f16,f18)
265 STORE_SYNC(o0, f48) membar #Sync
266 FREG_FROB(f18,f20,f22,f24,f26,f28,f30,f32,f34)
267 STORE_JUMP(o0, f48, 57f) membar #Sync
269 1: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
270 LOOP_CHUNK1(o1, o0, o4, 1f)
271 FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
272 LOOP_CHUNK2(o1, o0, o4, 2f)
273 FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
274 LOOP_CHUNK3(o1, o0, o4, 3f)
276 faligndata %f4, %f6, %f48
277 1: FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
278 STORE_SYNC(o0, f48) membar #Sync
279 FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
280 STORE_JUMP(o0, f48, 42f) membar #Sync
281 2: FREG_FROB(f36,f38,f40,f42,f44,f46,f0, f2, f4)
282 STORE_SYNC(o0, f48) membar #Sync
283 FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
284 STORE_JUMP(o0, f48, 50f) membar #Sync
285 3: FREG_FROB(f4, f6, f8, f10,f12,f14,f16,f18,f20)
286 STORE_SYNC(o0, f48) membar #Sync
287 FREG_FROB(f20,f22,f24,f26,f28,f30,f32,f34,f36)
288 STORE_JUMP(o0, f48, 58f) membar #Sync
290 1: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
291 LOOP_CHUNK1(o1, o0, o4, 1f)
292 FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
293 LOOP_CHUNK2(o1, o0, o4, 2f)
294 FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
295 LOOP_CHUNK3(o1, o0, o4, 3f)
297 faligndata %f6, %f8, %f48
298 1: FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
299 STORE_SYNC(o0, f48) membar #Sync
300 FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
301 STORE_JUMP(o0, f48, 43f) membar #Sync
302 2: FREG_FROB(f38,f40,f42,f44,f46,f0, f2, f4, f6)
303 STORE_SYNC(o0, f48) membar #Sync
304 FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
305 STORE_JUMP(o0, f48, 51f) membar #Sync
306 3: FREG_FROB(f6, f8, f10,f12,f14,f16,f18,f20,f22)
307 STORE_SYNC(o0, f48) membar #Sync
308 FREG_FROB(f22,f24,f26,f28,f30,f32,f34,f36,f38)
309 STORE_JUMP(o0, f48, 59f) membar #Sync
311 1: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
312 LOOP_CHUNK1(o1, o0, o4, 1f)
313 FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
314 LOOP_CHUNK2(o1, o0, o4, 2f)
315 FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
316 LOOP_CHUNK3(o1, o0, o4, 3f)
318 faligndata %f8, %f10, %f48
319 1: FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
320 STORE_SYNC(o0, f48) membar #Sync
321 FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
322 STORE_JUMP(o0, f48, 44f) membar #Sync
323 2: FREG_FROB(f40,f42,f44,f46,f0, f2, f4, f6, f8)
324 STORE_SYNC(o0, f48) membar #Sync
325 FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
326 STORE_JUMP(o0, f48, 52f) membar #Sync
327 3: FREG_FROB(f8, f10,f12,f14,f16,f18,f20,f22,f24)
328 STORE_SYNC(o0, f48) membar #Sync
329 FREG_FROB(f24,f26,f28,f30,f32,f34,f36,f38,f40)
330 STORE_JUMP(o0, f48, 60f) membar #Sync
332 1: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
333 LOOP_CHUNK1(o1, o0, o4, 1f)
334 FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
335 LOOP_CHUNK2(o1, o0, o4, 2f)
336 FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
337 LOOP_CHUNK3(o1, o0, o4, 3f)
339 faligndata %f10, %f12, %f48
340 1: FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
341 STORE_SYNC(o0, f48) membar #Sync
342 FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
343 STORE_JUMP(o0, f48, 45f) membar #Sync
344 2: FREG_FROB(f42,f44,f46,f0, f2, f4, f6, f8, f10)
345 STORE_SYNC(o0, f48) membar #Sync
346 FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
347 STORE_JUMP(o0, f48, 53f) membar #Sync
348 3: FREG_FROB(f10,f12,f14,f16,f18,f20,f22,f24,f26)
349 STORE_SYNC(o0, f48) membar #Sync
350 FREG_FROB(f26,f28,f30,f32,f34,f36,f38,f40,f42)
351 STORE_JUMP(o0, f48, 61f) membar #Sync
353 1: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
354 LOOP_CHUNK1(o1, o0, o4, 1f)
355 FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
356 LOOP_CHUNK2(o1, o0, o4, 2f)
357 FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
358 LOOP_CHUNK3(o1, o0, o4, 3f)
360 faligndata %f12, %f14, %f48
361 1: FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
362 STORE_SYNC(o0, f48) membar #Sync
363 FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
364 STORE_JUMP(o0, f48, 46f) membar #Sync
365 2: FREG_FROB(f44,f46,f0, f2, f4, f6, f8, f10,f12)
366 STORE_SYNC(o0, f48) membar #Sync
367 FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
368 STORE_JUMP(o0, f48, 54f) membar #Sync
369 3: FREG_FROB(f12,f14,f16,f18,f20,f22,f24,f26,f28)
370 STORE_SYNC(o0, f48) membar #Sync
371 FREG_FROB(f28,f30,f32,f34,f36,f38,f40,f42,f44)
372 STORE_JUMP(o0, f48, 62f) membar #Sync
374 1: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
375 LOOP_CHUNK1(o1, o0, o4, 1f)
376 FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
377 LOOP_CHUNK2(o1, o0, o4, 2f)
378 FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
379 LOOP_CHUNK3(o1, o0, o4, 3f)
381 faligndata %f14, %f16, %f48
382 1: FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
383 STORE_SYNC(o0, f48) membar #Sync
384 FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
385 STORE_JUMP(o0, f48, 47f) membar #Sync
386 2: FREG_FROB(f46,f0, f2, f4, f6, f8, f10,f12,f14)
387 STORE_SYNC(o0, f48) membar #Sync
388 FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
389 STORE_JUMP(o0, f48, 55f) membar #Sync
390 3: FREG_FROB(f14,f16,f18,f20,f22,f24,f26,f28,f30)
391 STORE_SYNC(o0, f48) membar #Sync
392 FREG_FROB(f30,f32,f34,f36,f38,f40,f42,f44,f46)
393 STORE_JUMP(o0, f48, 63f) membar #Sync
395 40: FINISH_VISCHUNK(o0, f0, f2, g3)
396 41: FINISH_VISCHUNK(o0, f2, f4, g3)
397 42: FINISH_VISCHUNK(o0, f4, f6, g3)
398 43: FINISH_VISCHUNK(o0, f6, f8, g3)
399 44: FINISH_VISCHUNK(o0, f8, f10, g3)
400 45: FINISH_VISCHUNK(o0, f10, f12, g3)
401 46: FINISH_VISCHUNK(o0, f12, f14, g3)
402 47: UNEVEN_VISCHUNK(o0, f14, f0, g3)
403 48: FINISH_VISCHUNK(o0, f16, f18, g3)
404 49: FINISH_VISCHUNK(o0, f18, f20, g3)
405 50: FINISH_VISCHUNK(o0, f20, f22, g3)
406 51: FINISH_VISCHUNK(o0, f22, f24, g3)
407 52: FINISH_VISCHUNK(o0, f24, f26, g3)
408 53: FINISH_VISCHUNK(o0, f26, f28, g3)
409 54: FINISH_VISCHUNK(o0, f28, f30, g3)
410 55: UNEVEN_VISCHUNK(o0, f30, f0, g3)
411 56: FINISH_VISCHUNK(o0, f32, f34, g3)
412 57: FINISH_VISCHUNK(o0, f34, f36, g3)
413 58: FINISH_VISCHUNK(o0, f36, f38, g3)
414 59: FINISH_VISCHUNK(o0, f38, f40, g3)
415 60: FINISH_VISCHUNK(o0, f40, f42, g3)
416 61: FINISH_VISCHUNK(o0, f42, f44, g3)
417 62: FINISH_VISCHUNK(o0, f44, f46, g3)
418 63: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3)
420 93: EX_LD(LOAD(ldd, %o1, %f2))
423 faligndata %f0, %f2, %f8
424 EX_ST(STORE(std, %f8, %o0))
427 EX_LD(LOAD(ldd, %o1, %f0))
430 faligndata %f2, %f0, %f8
431 EX_ST(STORE(std, %f8, %o0))
438 1: EX_LD(LOAD(ldub, %o1, %o3))
441 EX_ST(STORE(stb, %o3, %o0))
445 2: membar #StoreLoad | #StoreStore
448 mov EX_RETVAL(%g5), %o0
451 70: /* 16 < len <= (5 * 64) */
455 72: andn %o2, 0xf, %o4
457 1: EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
458 EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
460 EX_ST(STORE(stx, %o5, %o1 + %o3))
462 EX_ST(STORE(stx, %g1, %o1 + %o3))
465 73: andcc %o2, 0x8, %g0
468 EX_LD(LOAD(ldx, %o1, %o5))
470 EX_ST(STORE(stx, %o5, %o1 + %o3))
472 1: andcc %o2, 0x4, %g0
475 EX_LD(LOAD(lduw, %o1, %o5))
477 EX_ST(STORE(stw, %o5, %o1 + %o3))
485 75: andcc %o0, 0x7, %g1
491 1: EX_LD(LOAD(ldub, %o1, %o5))
493 EX_ST(STORE(stb, %o5, %o1 + %o3))
509 EX_LD(LOAD(ldx, %o1, %g2))
513 1: EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
518 EX_ST(STORE(stx, %o5, %o0))
531 80: /* 0 < len <= 16 */
536 1: EX_LD(LOAD(lduw, %o1, %g1))
538 EX_ST(STORE(stw, %g1, %o1 + %o3))
543 mov EX_RETVAL(%g5), %o0
546 90: EX_LD(LOAD(ldub, %o1, %g1))
548 EX_ST(STORE(stb, %g1, %o1 + %o3))
552 mov EX_RETVAL(%g5), %o0