4 #include <linux/config.h> /* get configuration macros */
5 #include <linux/linkage.h>
6 #include <linux/kernel.h>
7 #include <linux/init.h>
8 #include <asm/segment.h>
11 #define prepare_to_switch() do { } while(0)
14 * switch_to(n) should switch tasks to task ptr, first checking that
15 * ptr isn't the current task, in which case it does nothing. This
16 * also clears the TS-flag if the task we switched to has used the
17 * math co-processor latest.
20 * switch_to() saves the extra registers, that are not saved
21 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
22 * a0-a1. Some of these are used by schedule() and its predecessors
23 * and so we might get see unexpected behaviors when a task returns
24 * with unexpected register values.
26 * syscall stores these registers itself and none of them are used
27 * by syscall after the function in the syscall has been called.
29 * Beware that resume now expects *next to be in d1 and the offset of
30 * tss to be in a1. This saves a few instructions as we no longer have
31 * to push them onto the stack and read them back right after.
33 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
35 * Changed 96/09/19 by Andreas Schwab
36 * pass prev in a0, next in a1, offset of tss in d1, and whether
37 * the mm structures are shared in d2 (to avoid atc flushing).
39 asmlinkage void resume(void);
40 #define switch_to(prev,next,last) { \
41 register void *_prev __asm__ ("a0") = (prev); \
42 register void *_next __asm__ ("a1") = (next); \
43 register void *_last __asm__ ("d1"); \
44 __asm__ __volatile__("jbsr " SYMBOL_NAME_STR(resume) \
45 : "=d" (_last) : "a" (_prev), "a" (_next) \
46 : "d0", /* "d1", */ "d2", "d3", "d4", "d5", "a0", "a1"); \
50 /* interrupt control.. */
52 #define __sti() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory")
54 #include <asm/hardirq.h>
56 if (MACH_IS_Q40 || !local_irq_count(smp_processor_id())) \
57 asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory"); \
60 #define __cli() asm volatile ("oriw #0x0700,%%sr": : : "memory")
61 #define __save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory")
62 #define __restore_flags(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory")
64 /* For spinlocks etc */
65 #define local_irq_save(x) ({ __save_flags(x); __cli(); })
66 #define local_irq_set(x) ({ __save_flags(x); __sti(); })
67 #define local_irq_restore(x) __restore_flags(x)
68 #define local_irq_disable() __cli()
69 #define local_irq_enable() __sti()
73 #define save_flags(x) __save_flags(x)
74 #define restore_flags(x) __restore_flags(x)
75 #define save_and_cli(x) do { save_flags(x); cli(); } while(0)
76 #define save_and_set(x) do { save_flags(x); sti(); } while(0)
79 * Force strict CPU ordering.
80 * Not really required on m68k...
82 #define nop() do { asm volatile ("nop"); barrier(); } while (0)
83 #define mb() barrier()
84 #define rmb() barrier()
85 #define wmb() barrier()
86 #define set_mb(var, value) do { xchg(&var, value); } while (0)
87 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
89 #define smp_mb() barrier()
90 #define smp_rmb() barrier()
91 #define smp_wmb() barrier()
94 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
95 #define tas(ptr) (xchg((ptr),1))
97 struct __xchg_dummy { unsigned long a[100]; };
98 #define __xg(x) ((volatile struct __xchg_dummy *)(x))
100 #ifndef CONFIG_RMW_INSNS
101 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
103 unsigned long tmp, flags;
113 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
119 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
125 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
128 restore_flags(flags);
132 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
141 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
149 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
157 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
164 #endif /* _M68K_SYSTEM_H */