2 * BK Id: SCCS/s.misc.S 1.78 01/07/03 22:00:10 paulus
5 * This file contains miscellaneous low-level functions.
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
18 #include <linux/config.h>
19 #include <linux/sys.h>
20 #include <asm/unistd.h>
21 #include <asm/errno.h>
22 #include <asm/processor.h>
24 #include <asm/cache.h>
25 #include <asm/cputable.h>
27 #include <asm/ppc_asm.h>
41 * Returns (address we're running at) - (address we were linked at)
42 * for use before the text and data are mapped to KERNELBASE.
55 * add_reloc_offset(x) returns x + reloc_offset().
57 _GLOBAL(add_reloc_offset)
69 * sub_reloc_offset(x) returns x - reloc_offset().
71 _GLOBAL(sub_reloc_offset)
83 * reloc_got2 runs through the .got2 section adding an offset
88 lis r7,__got2_start@ha
89 addi r7,r7,__got2_start@l
91 addi r8,r8,__got2_end@l
112 * called with r3 = data offset and r4 = CPU number
115 _GLOBAL(identify_cpu)
116 addis r8,r3,cpu_specs@ha
117 addi r8,r8,cpu_specs@l
120 lwz r5,CPU_SPEC_PVR_MASK(r8)
122 lwz r6,CPU_SPEC_PVR_VALUE(r8)
125 addi r8,r8,CPU_SPEC_ENTRY_SIZE
128 addis r6,r3,cur_cpu_spec@ha
129 addi r6,r6,cur_cpu_spec@l
136 * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
137 * and writes nop's over sections of code that don't apply for this cpu.
138 * r3 = data offset (not changed)
140 _GLOBAL(do_cpu_ftr_fixups)
141 #ifndef CONFIG_PPC_ISERIES
142 /* Get CPU 0 features */
143 addis r6,r3,cur_cpu_spec@ha
144 addi r6,r6,cur_cpu_spec@l
147 lwz r4,CPU_SPEC_FEATURES(r4)
149 /* Get the fixup table */
150 addis r6,r3,__start___ftr_fixup@ha
151 addi r6,r6,__start___ftr_fixup@l
152 addis r7,r3,__stop___ftr_fixup@ha
153 addi r7,r7,__stop___ftr_fixup@l
159 lwz r8,-16(r6) /* mask */
161 lwz r9,-12(r6) /* value */
164 lwz r8,-8(r6) /* section begin */
165 lwz r9,-4(r6) /* section end */
168 /* write nops over the section of code */
169 /* todo: if large section, add a branch at the start of it */
173 lis r0,0x60000000@h /* nop */
175 andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
177 dcbst 0,r8 /* suboptimal, but simpler */
182 sync /* additional sync needed on g4 */
185 #else /* CONFIG_PPC_ISERIES */
187 #endif /* CONFIG_PPC_ISERIES */
190 * call_setup_cpu - call the setup_cpu function for this cpu
191 * r3 = data offset, r24 = cpu number
193 * Setup function is called with:
196 * r5 = ptr to CPU spec (relocated)
198 _GLOBAL(call_setup_cpu)
199 addis r5,r3,cur_cpu_spec@ha
200 addi r5,r5,cur_cpu_spec@l
204 lwz r6,CPU_SPEC_SETUP(r5)
210 #ifndef CONFIG_PPC_ISERIES /* iSeries version is in iSeries_misc.S */
211 /* void __save_flags_ptr(unsigned long *flags) */
212 _GLOBAL(__save_flags_ptr)
217 * Need these nops here for taking over save/restore to
238 _GLOBAL(__save_flags_ptr_end)
240 /* void __restore_flags(unsigned long flags) */
241 _GLOBAL(__restore_flags)
243 * Just set/clear the MSR_EE bit through restore/flags but do not
244 * change anything else. This is needed by the RT system and makes
249 /* Copy all except the MSR_EE bit from r4 (current MSR value)
250 to r3. This is the sort of thing the rlwimi instruction is
251 designed for. -- paulus. */
253 /* Check if things are setup the way we want _already_. */
279 _GLOBAL(__restore_flags_end)
282 mfmsr r0 /* Get current interrupt state */
283 rlwinm r3,r0,16+1,32-1,31 /* Extract old value of 'EE' */
284 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
285 SYNC /* Some chip revs have problems here... */
286 mtmsr r0 /* Update machine state */
289 * Need these nops here for taking over save/restore to
311 mfmsr r3 /* Get current state */
312 ori r3,r3,MSR_EE /* Turn on 'EE' bit */
313 SYNC /* Some chip revs have problems here... */
314 mtmsr r3 /* Update machine state */
317 * Need these nops here for taking over save/restore to
338 #endif /* CONFIG_PPC_ISERIES */
341 * complement mask on the msr then "or" some values on.
342 * _nmask_and_or_msr(nmask, value_to_or)
344 _GLOBAL(_nmask_and_or_msr)
345 mfmsr r0 /* Get current msr */
346 andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
347 or r0,r0,r4 /* Or on the bits in r4 (second parm) */
348 sync /* Some chip revs have problems here... */
350 mtmsr r0 /* Update machine state */
359 #if defined(CONFIG_40x) && defined(CONFIG_PIN_TLB)
360 /* This needs to be coordinated with other pinning functions since
361 * we don't keep a memory location of number of entries to reduce
362 * cache pollution during these operations.
367 tlbwe r3, r3, TLB_TAG /* just ensure V is clear */
368 addi r3, r3, 1 /* so r3 works fine for that */
369 cmpwi 0, r3, 61 /* reserve last two entries */
372 #elif defined(CONFIG_440)
376 tlbwe r3,r3,PPC440_TLB_PAGEID
382 #if defined(CONFIG_SMP)
385 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
388 lis r9,hash_table_lock@h
389 ori r9,r9,hash_table_lock@l
395 /* No 405 Erratum 77 fix needed here, because 4xx can't do SMP */
398 #endif /* CONFIG_SMP */
405 stw r0,0(r9) /* clear hash_table_lock */
408 #endif /* CONFIG_SMP */
409 #endif /* defined(CONFIG_40x) && defined(CONFIG_PIN_TLB) */
413 * Flush MMU TLB for a particular address
420 /* There are only 64 TLB entries, so r3 < 64, which means bit 25, is clear.
421 * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
423 tlbwe r3, r3, TLB_TAG
426 #elif defined(CONFIG_440)
427 mfspr r4,SPRN_MMUCR /* Get MMUCR */
428 lis r5,PPC440_MMUCR_STS@h
429 ori r5,r5,PPC440_MMUCR_TID@l /* Create mask */
430 andc r4,r4,r5 /* Clear out TID/STS bits */
431 mfspr r5,SPRN_PID /* Get PID */
432 or r4,r4,r5 /* Set TID bits */
433 mfmsr r6 /* Get MSR */
434 andi. r6,r6,MSR_IS@l /* TS=1? */
435 beq 11f /* If not, leave STS=0 */
436 oris r4,r4,PPC440_MMUCR_STS@h /* Set STS=1 */
437 11: mtspr SPRN_MMUCR, r4 /* Put MMUCR */
442 /* There are only 64 TLB entries, so r3 < 64,
443 * which means bit 22, is clear. Since 22 is
444 * the V bit in the TLB_PAGEID, loading this
445 * value will invalidate the TLB entry.
447 tlbwe r3, r3, PPC440_TLB_PAGEID
451 #if defined(CONFIG_SMP)
454 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
457 lis r9,hash_table_lock@h
458 ori r9,r9,hash_table_lock@l
467 #endif /* CONFIG_SMP */
474 stw r0,0(r9) /* clear hash_table_lock */
478 #endif /* CONFIG_40x */
482 * Flush instruction cache.
483 * This is a no-op on the 601.
485 _GLOBAL(flush_instruction_cache)
486 #if defined(CONFIG_8xx)
504 rlwinm r3,r3,16,16,31
506 beqlr /* for 601, do nothing */
507 /* 603/604 processor - use invalidate-all bit in HID0 */
511 #endif /* CONFIG_8xx/4xx */
515 #ifndef CONFIG_PPC_ISERIES /* iSeries version is in iSeries_misc.S */
517 * Write any modified data cache blocks out to memory
518 * and invalidate the corresponding instruction cache blocks.
519 * This is a no-op on the 601.
521 * flush_icache_range(unsigned long start, unsigned long stop)
523 _GLOBAL(flush_icache_range)
525 rlwinm r5,r5,16,16,31
527 beqlr /* for 601, do nothing */
528 li r5,L1_CACHE_LINE_SIZE-1
532 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
537 addi r3,r3,L1_CACHE_LINE_SIZE
539 sync /* wait for dcbst's to get to ram */
542 addi r6,r6,L1_CACHE_LINE_SIZE
544 sync /* additional sync needed on g4 */
548 * Write any modified data cache blocks out to memory.
549 * Does not invalidate the corresponding cache lines (especially for
550 * any corresponding instruction cache).
552 * clean_dcache_range(unsigned long start, unsigned long stop)
554 _GLOBAL(clean_dcache_range)
555 li r5,L1_CACHE_LINE_SIZE-1
559 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
564 addi r3,r3,L1_CACHE_LINE_SIZE
566 sync /* wait for dcbst's to get to ram */
570 * Write any modified data cache blocks out to memory and invalidate them.
571 * Does not invalidate the corresponding instruction cache blocks.
573 * flush_dcache_range(unsigned long start, unsigned long stop)
575 _GLOBAL(flush_dcache_range)
576 li r5,L1_CACHE_LINE_SIZE-1
580 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
585 addi r3,r3,L1_CACHE_LINE_SIZE
587 sync /* wait for dcbst's to get to ram */
591 * Like above, but invalidate the D-cache. This is used by the 8xx
592 * to invalidate the cache so the PPC core doesn't get stale data
593 * from the CPM (no cache snooping here :-).
595 * invalidate_dcache_range(unsigned long start, unsigned long stop)
597 _GLOBAL(invalidate_dcache_range)
598 li r5,L1_CACHE_LINE_SIZE-1
602 srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
607 addi r3,r3,L1_CACHE_LINE_SIZE
609 sync /* wait for dcbi's to get to ram */
612 #ifdef CONFIG_NOT_COHERENT_CACHE
613 /* This is a bad one....It is used by 'consistent_sync' functions when
614 * there isn't any handle on the virtual address needed by the usual
615 * cache flush instructions. On the MPC8xx, we can use the cache line
616 * flush command, on others all we can do is read enough data to completely
617 * reload the cache, flushing old data out.
621 * 40x cores have 8K or 16K dcache and 32 byte line size.
622 * 440 has a 32K dcache and 32 byte line size.
623 * 8xx has 1, 2, 4, 8K variants.
624 * For now, cover the worst case of the 440.
625 * When we get a cputable cache size entry we can do the right thing.
627 #define CACHE_NWAYS 64
628 #define CACHE_NLINES 16
630 _GLOBAL(flush_dcache_all)
631 li r4, (CACHE_NWAYS * CACHE_NLINES)
634 1: lwz r3, 0(r5) /* Load one word from every line */
635 addi r5, r5, L1_CACHE_LINE_SIZE
638 #endif /* CONFIG_NOT_COHERENT_CACHE */
641 * Flush a particular page from the data cache to RAM.
642 * Note: this is necessary because the instruction cache does *not*
643 * snoop from the data cache.
644 * This is a no-op on the 601 which has a unified cache.
646 * void __flush_dcache_icache(void *page)
648 _GLOBAL(__flush_dcache_icache)
650 rlwinm r5,r5,16,16,31
652 beqlr /* for 601, do nothing */
653 rlwinm r3,r3,0,0,19 /* Get page base address */
654 li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */
657 0: dcbst 0,r3 /* Write line to ram */
658 addi r3,r3,L1_CACHE_LINE_SIZE
663 addi r6,r6,L1_CACHE_LINE_SIZE
670 * Clear a page using the dcbz instruction, which doesn't cause any
671 * memory traffic (except to write out any cache lines which get
672 * displaced). This only works on cacheable memory.
675 li r0,4096/L1_CACHE_LINE_SIZE
686 addi r3,r3,L1_CACHE_LINE_SIZE
691 * Copy a whole page. We use the dcbz instruction on the destination
692 * to reduce memory traffic (it eliminates the unnecessary reads of
693 * the destination into cache). This requires that the destination
696 #define COPY_16_BYTES \
712 #if MAX_COPY_PREFETCH > 1
713 li r0,MAX_COPY_PREFETCH
717 addi r11,r11,L1_CACHE_LINE_SIZE
719 #else /* MAX_L1_COPY_PREFETCH == 1 */
721 li r11,L1_CACHE_LINE_SIZE+4
722 #endif /* MAX_L1_COPY_PREFETCH */
723 #endif /* CONFIG_8xx */
725 li r0,4096/L1_CACHE_LINE_SIZE
733 #if L1_CACHE_LINE_SIZE >= 32
735 #if L1_CACHE_LINE_SIZE >= 64
738 #if L1_CACHE_LINE_SIZE >= 128
748 #endif /* CONFIG_PPC_ISERIES */
751 * Atomic [test&set] exchange
753 * unsigned long xchg_u32(void *ptr, unsigned long val)
754 * Changes the memory location '*ptr' to be val and returns
755 * the previous value stored there.
758 mr r5,r3 /* Save pointer */
759 10: lwarx r3,0,r5 /* Fetch old value & reserve */
761 stwcx. r4,0,r5 /* Update with new value */
762 bne- 10b /* Retry if "reservation" (i.e. lock) lost */
766 * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
767 * void atomic_set_mask(atomic_t mask, atomic_t *addr);
769 _GLOBAL(atomic_clear_mask)
776 _GLOBAL(atomic_set_mask)
785 * I/O string operations
787 * insb(port, buf, len)
788 * outsb(port, buf, len)
789 * insw(port, buf, len)
790 * outsw(port, buf, len)
791 * insl(port, buf, len)
792 * outsl(port, buf, len)
793 * insw_ns(port, buf, len)
794 * outsw_ns(port, buf, len)
795 * insl_ns(port, buf, len)
796 * outsl_ns(port, buf, len)
798 * The *_ns versions don't do byte-swapping.
913 * Extended precision shifts.
915 * Updated to be valid for shift counts from 0 to 63 inclusive.
918 * R3/R4 has 64 bit value
922 * ashrdi3: arithmetic right shift (sign propagation)
923 * lshrdi3: logical right shift
924 * ashldi3: left shift
928 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
929 addi r7,r5,32 # could be xori, or addi with -32
930 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
931 rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
932 sraw r7,r3,r7 # t2 = MSW >> (count-32)
933 or r4,r4,r6 # LSW |= t1
934 slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
935 sraw r3,r3,r5 # MSW = MSW >> count
936 or r4,r4,r7 # LSW |= t2
941 slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
942 addi r7,r5,32 # could be xori, or addi with -32
943 srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
944 slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
945 or r3,r3,r6 # MSW |= t1
946 slw r4,r4,r5 # LSW = LSW << count
947 or r3,r3,r7 # MSW |= t2
952 srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
953 addi r7,r5,32 # could be xori, or addi with -32
954 slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
955 srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
956 or r4,r4,r6 # LSW |= t1
957 srw r3,r3,r5 # MSW = MSW >> count
958 or r4,r4,r7 # LSW |= t2
968 mr r3,r1 /* Close enough */
972 * These are used in the alignment trap handler when emulating
973 * single-precision loads and stores.
974 * We restore and save the fpscr so the task gets the same result
975 * and exceptions as if the cpu had performed the load or store.
990 lfd 0,-4(r5) /* load up fpscr value */
994 mffs 0 /* save new fpscr value */
999 lfd 0,-4(r5) /* load up fpscr value */
1003 mffs 0 /* save new fpscr value */
1009 * Create a kernel thread
1010 * kernel_thread(fn, arg, flags)
1012 _GLOBAL(kernel_thread)
1013 mr r6,r3 /* function */
1014 ori r3,r5,CLONE_VM /* flags */
1017 cmpi 0,r3,0 /* parent or child? */
1018 bnelr /* return if parent */
1019 li r0,0 /* make top-level stack frame */
1021 mtlr r6 /* fn addr in lr */
1022 mr r3,r4 /* load arg and call fn */
1024 li r0,__NR_exit /* exit after child exits */
1029 * This routine is just here to keep GCC happy - sigh...
1034 #define SYSCALL(name) \
1036 li r0,__NR_##name; \
1040 stw r3,errno@l(r4); \
1044 #define __NR__exit __NR_exit
1055 SYSCALL(delete_module)
1060 /* Why isn't this a) automatic, b) written in 'C'? */
1063 _GLOBAL(sys_call_table)
1064 .long sys_ni_syscall /* 0 - old "setup()" system call */
1069 .long sys_open /* 5 */
1074 .long sys_unlink /* 10 */
1079 .long sys_chmod /* 15 */
1081 .long sys_ni_syscall /* old break syscall holder */
1084 .long sys_getpid /* 20 */
1089 .long sys_stime /* 25 */
1094 .long sys_utime /* 30 */
1095 .long sys_ni_syscall /* old stty syscall holder */
1096 .long sys_ni_syscall /* old gtty syscall holder */
1099 .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
1104 .long sys_rmdir /* 40 */
1108 .long sys_ni_syscall /* old prof syscall holder */
1109 .long sys_brk /* 45 */
1114 .long sys_getegid /* 50 */
1116 .long sys_umount /* recycled never used phys() */
1117 .long sys_ni_syscall /* old lock syscall holder */
1119 .long sys_fcntl /* 55 */
1120 .long sys_ni_syscall /* old mpx syscall holder */
1122 .long sys_ni_syscall /* old ulimit syscall holder */
1124 .long sys_umask /* 60 */
1129 .long sys_getpgrp /* 65 */
1134 .long sys_setreuid /* 70 */
1136 .long sys_sigsuspend
1137 .long sys_sigpending
1138 .long sys_sethostname
1139 .long sys_setrlimit /* 75 */
1140 .long sys_old_getrlimit
1142 .long sys_gettimeofday
1143 .long sys_settimeofday
1144 .long sys_getgroups /* 80 */
1149 .long sys_readlink /* 85 */
1154 .long sys_mmap /* 90 */
1159 .long sys_fchown /* 95 */
1160 .long sys_getpriority
1161 .long sys_setpriority
1162 .long sys_ni_syscall /* old profil syscall holder */
1164 .long sys_fstatfs /* 100 */
1166 .long sys_socketcall
1169 .long sys_getitimer /* 105 */
1174 .long sys_ni_syscall /* 110 old iopl syscall */
1176 .long sys_ni_syscall /* old 'idle' syscall */
1177 .long sys_ni_syscall /* old vm86 syscall */
1179 .long sys_swapoff /* 115 */
1184 .long sys_clone /* 120 */
1185 .long sys_setdomainname
1187 .long sys_ni_syscall /* old modify_ldt syscall */
1189 .long sys_mprotect /* 125 */
1190 .long sys_sigprocmask
1191 .long sys_create_module
1192 .long sys_init_module
1193 .long sys_delete_module
1194 .long sys_get_kernel_syms /* 130 */
1199 .long sys_sysfs /* 135 */
1200 .long sys_personality
1201 .long sys_ni_syscall /* for afs_syscall */
1204 .long sys_llseek /* 140 */
1209 .long sys_readv /* 145 */
1214 .long sys_mlock /* 150 */
1217 .long sys_munlockall
1218 .long sys_sched_setparam
1219 .long sys_sched_getparam /* 155 */
1220 .long sys_sched_setscheduler
1221 .long sys_sched_getscheduler
1222 .long sys_sched_yield
1223 .long sys_sched_get_priority_max
1224 .long sys_sched_get_priority_min /* 160 */
1225 .long sys_sched_rr_get_interval
1229 .long sys_getresuid /* 165 */
1230 .long sys_query_module
1232 .long sys_nfsservctl
1234 .long sys_getresgid /* 170 */
1236 .long sys_rt_sigreturn
1237 .long sys_rt_sigaction
1238 .long sys_rt_sigprocmask
1239 .long sys_rt_sigpending /* 175 */
1240 .long sys_rt_sigtimedwait
1241 .long sys_rt_sigqueueinfo
1242 .long sys_rt_sigsuspend
1244 .long sys_pwrite /* 180 */
1249 .long sys_sigaltstack /* 185 */
1251 .long sys_ni_syscall /* streams1 */
1252 .long sys_ni_syscall /* streams2 */
1254 .long sys_getrlimit /* 190 */
1257 .long sys_truncate64
1258 .long sys_ftruncate64
1259 .long sys_stat64 /* 195 */
1262 .long sys_pciconfig_read
1263 .long sys_pciconfig_write
1264 .long sys_pciconfig_iobase /* 200 */
1265 .long sys_ni_syscall /* 201 - reserved - MacOnLinux - new */
1266 .long sys_getdents64
1267 .long sys_pivot_root
1269 .long sys_madvise /* 205 */
1274 .long sys_lsetxattr /* 210 */
1279 .long sys_listxattr /* 215 */
1280 .long sys_llistxattr
1281 .long sys_flistxattr
1282 .long sys_removexattr
1283 .long sys_lremovexattr
1284 .long sys_fremovexattr /* 220 */
1285 .long sys_ni_syscall /* reserved for sys_futex */
1286 .long sys_ni_syscall /* reserved for sys_sched_setaffinity */
1287 .long sys_ni_syscall /* reserved for sys_sched_getaffinity */
1288 .long sys_ni_syscall /* reserved for sys_security */
1289 .long sys_ni_syscall /* 225 reserved for Tux */
1290 .long sys_ni_syscall /* reserved for sys_sendfile64 */
1291 .long sys_ni_syscall /* reserved for sys_io_setup */
1292 .long sys_ni_syscall /* reserved for sys_io_destroy */
1293 .long sys_ni_syscall /* reserved for sys_io_getevents */
1294 .long sys_ni_syscall /* 230 reserved for sys_io_submit */
1295 .long sys_ni_syscall /* reserved for sys_io_cancel */
1297 .rept NR_syscalls-(.-sys_call_table)/4
1298 .long sys_ni_syscall