1 #ifndef _ASM_IA64_PROCESSOR_H
2 #define _ASM_IA64_PROCESSOR_H
5 * Copyright (C) 1998-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Stephane Eranian <eranian@hpl.hp.com>
8 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
9 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
11 * 11/24/98 S.Eranian added ia64_set_iva()
12 * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API
13 * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support
16 #include <linux/config.h>
17 #include <linux/cache.h>
19 #include <asm/ptrace.h>
20 #include <asm/kregs.h>
21 #include <asm/types.h>
23 #define IA64_NUM_DBG_REGS 8
25 * Limits for PMC and PMD are set to less than maximum architected values
26 * but should be sufficient for a while
28 #define IA64_NUM_PMC_REGS 32
29 #define IA64_NUM_PMD_REGS 32
31 #define DEFAULT_MAP_BASE 0x2000000000000000
32 #define DEFAULT_TASK_SIZE 0xa000000000000000
35 * TASK_SIZE really is a mis-named. It really is the maximum user
36 * space address (plus one). On IA-64, there are five regions of 2TB
37 * each (assuming 8KB page size), for a total of 8TB of user virtual
40 #define TASK_SIZE (current->thread.task_size)
43 * This decides where the kernel will search for a free chunk of vm
44 * space during mmap's.
46 #define TASK_UNMAPPED_BASE (current->thread.map_base)
52 #define EISA_bus__is_a_macro /* for versions in ksyms.c */
54 #define MCA_bus__is_a_macro /* for versions in ksyms.c */
56 #define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */
57 #define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */
58 #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */
59 #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */
60 #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */
61 #define IA64_THREAD_KRBS_SYNCED (__IA64_UL(1) << 5) /* krbs synced with process vm? */
62 #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */
63 #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */
64 #define IA64_THREAD_XSTACK (__IA64_UL(1) << 8) /* stack executable by default? */
66 #define IA64_THREAD_UAC_SHIFT 3
67 #define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)
68 #define IA64_THREAD_FPEMU_SHIFT 6
69 #define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)
73 * This shift should be large enough to be able to represent
74 * 1000000/itc_freq with good accuracy while being small enough to fit
75 * 1000000<<IA64_USEC_PER_CYC_SHIFT in 64 bits.
77 #define IA64_USEC_PER_CYC_SHIFT 41
81 #include <linux/threads.h>
82 #include <linux/cache.h>
85 #include <asm/offsets.h>
88 #include <asm/unwind.h>
89 #include <asm/atomic.h>
91 /* like above but expressed as bitfields for more efficient access: */
127 __u64 reserved4 : 19;
131 * CPU type, hardware bug flags, and per-CPU state. Frequently used
132 * state comes earlier:
134 struct cpuinfo_ia64 {
135 /* irq_stat must be 64-bit aligned */
141 __u64 irq_and_bh_counts;
143 __u32 softirq_pending;
144 __u32 phys_stacked_size_p8; /* size of physical stacked registers + 8 */
145 __u64 itm_delta; /* # of clock cycles between clock ticks */
146 __u64 itm_next; /* interval timer mask value to use for next clock tick */
150 __u64 pgtable_cache_sz;
151 /* CPUID-derived information: */
160 __u64 itc_freq; /* frequency of ITC counter */
161 __u64 proc_freq; /* frequency of processor */
162 __u64 cyc_per_usec; /* itc_freq/1000000 */
163 __u64 usec_per_cyc; /* 2^IA64_USEC_PER_CYC_SHIFT*1000000/itc_freq */
164 __u64 unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
165 __u64 unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
168 __u32 ptce_stride[2];
169 struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */
170 # ifdef CONFIG_PERFMON
176 __u64 loops_per_jiffy;
179 __u64 prof_multiplier;
182 * This is written to by *other* CPUs,
183 * so isolate it in its own cacheline.
186 char pad[SMP_CACHE_BYTES] ____cacheline_aligned;
190 void *node_directory;
192 struct cpuinfo_ia64 *cpu_data[NR_CPUS];
194 /* Platform specific word. MUST BE LAST IN STRUCT */
195 __u64 platform_specific;
196 } __attribute__ ((aligned (PAGE_SIZE)));
199 * The "local" data pointer. It points to the per-CPU data of the currently executing
200 * CPU, much like "current" points to the per-task data of the currently executing task.
202 #define local_cpu_data ((struct cpuinfo_ia64 *) PERCPU_ADDR)
205 * On NUMA systems, cpu_data for each cpu is allocated during cpu_init() & is allocated on
206 * the node that contains the cpu. This minimizes off-node memory references. cpu_data
207 * for each cpu contains an array of pointers to the cpu_data structures of each of the
210 * On non-NUMA systems, cpu_data is a static array allocated at compile time. References
211 * to the cpu_data of another cpu is done by direct references to the appropriate entry of
215 # define cpu_data(cpu) local_cpu_data->cpu_data[cpu]
216 # define numa_node_id() (local_cpu_data->numa_node_id)
218 extern struct cpuinfo_ia64 _cpu_data[NR_CPUS];
219 # define cpu_data(cpu) (&_cpu_data[cpu])
222 extern void identify_cpu (struct cpuinfo_ia64 *);
223 extern void print_cpu_info (struct cpuinfo_ia64 *);
229 #define SET_UNALIGN_CTL(task,value) \
231 (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \
232 | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \
235 #define GET_UNALIGN_CTL(task,addr) \
237 put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \
241 #define SET_FPEMU_CTL(task,value) \
243 (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \
244 | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \
247 #define GET_FPEMU_CTL(task,addr) \
249 put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \
255 struct thread_struct {
256 __u64 ksp; /* kernel stack pointer */
257 unsigned long flags; /* various flags */
258 __u64 map_base; /* base address for get_unmapped_area() */
259 __u64 task_size; /* limit for task size */
260 struct siginfo *siginfo; /* current siginfo struct for ptrace() */
262 #ifdef CONFIG_IA32_SUPPORT
263 __u64 eflag; /* IA32 EFLAGS reg */
264 __u64 fsr; /* IA32 floating pt status reg */
265 __u64 fcr; /* IA32 floating pt control reg */
266 __u64 fir; /* IA32 fp except. instr. reg */
267 __u64 fdr; /* IA32 fp except. data reg */
268 __u64 csd; /* IA32 code selector descriptor */
269 __u64 ssd; /* IA32 stack selector descriptor */
270 __u64 old_k1; /* old value of ar.k1 */
271 __u64 old_iob; /* old IOBase value */
272 # define INIT_THREAD_IA32 0, 0, 0x17800000037fULL, 0, 0, 0, 0, 0, 0,
274 # define INIT_THREAD_IA32
275 #endif /* CONFIG_IA32_SUPPORT */
276 #ifdef CONFIG_PERFMON
277 __u64 pmc[IA64_NUM_PMC_REGS];
278 __u64 pmd[IA64_NUM_PMD_REGS];
279 unsigned long pfm_ovfl_block_reset;/* non-zero if we need to block or reset regs on ovfl */
280 void *pfm_context; /* pointer to detailed PMU context */
281 atomic_t pfm_notifiers_check; /* when >0, will cleanup ctx_notify_task in tasklist */
282 atomic_t pfm_owners_check; /* when >0, will cleanup ctx_owner in tasklist */
283 void *pfm_smpl_buf_list; /* list of sampling buffers to vfree */
284 # define INIT_THREAD_PM {0, }, {0, }, 0, NULL, {0}, {0}, NULL,
286 # define INIT_THREAD_PM
288 __u64 dbr[IA64_NUM_DBG_REGS];
289 __u64 ibr[IA64_NUM_DBG_REGS];
290 struct ia64_fpreg fph[96]; /* saved/loaded on demand */
294 #define INIT_THREAD { \
297 DEFAULT_MAP_BASE, /* map_base */ \
298 DEFAULT_TASK_SIZE, /* task_size */ \
304 {{{{0}}}, } /* fph */ \
307 #define start_thread(regs,new_ip,new_sp) do { \
309 regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL | IA64_PSR_SP)) \
310 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
311 regs->cr_iip = new_ip; \
312 regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \
314 regs->ar_bspstore = IA64_RBS_BOT; \
315 regs->ar_fpsr = FPSR_DEFAULT; \
317 regs->r8 = current->mm->dumpable; /* set "don't zap registers" flag */ \
318 regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
319 if (!__builtin_expect (current->mm->dumpable, 1)) { \
321 * Zap scratch regs to avoid leaking bits between processes with different \
327 * XXX fix me: everything below can go away once we stop preserving scratch \
328 * regs on a system call. \
331 regs->r1 = 0; regs->r2 = 0; regs->r3 = 0; \
332 regs->r13 = 0; regs->r14 = 0; regs->r15 = 0; \
333 regs->r9 = 0; regs->r11 = 0; \
334 regs->r16 = 0; regs->r17 = 0; regs->r18 = 0; regs->r19 = 0; \
335 regs->r20 = 0; regs->r21 = 0; regs->r22 = 0; regs->r23 = 0; \
336 regs->r24 = 0; regs->r25 = 0; regs->r26 = 0; regs->r27 = 0; \
337 regs->r28 = 0; regs->r29 = 0; regs->r30 = 0; regs->r31 = 0; \
339 regs->b0 = 0; regs->b7 = 0; \
340 regs->f6.u.bits[0] = 0; regs->f6.u.bits[1] = 0; \
341 regs->f7.u.bits[0] = 0; regs->f7.u.bits[1] = 0; \
342 regs->f8.u.bits[0] = 0; regs->f8.u.bits[1] = 0; \
343 regs->f9.u.bits[0] = 0; regs->f9.u.bits[1] = 0; \
347 /* Forward declarations, a strange C thing... */
352 * Free all resources held by a thread. This is called after the
353 * parent of DEAD_TASK has collected the exist status of the task via
356 #ifdef CONFIG_PERFMON
357 extern void release_thread (struct task_struct *task);
359 # define release_thread(dead_task)
363 * This is the mechanism for creating a new kernel thread.
365 * NOTE 1: Only a kernel-only process (ie the swapper or direct
366 * descendants who haven't done an "execve()") should use this: it
367 * will work within a system call from a "real" process, but the
368 * process memory space will not be free'd until both the parent and
369 * the child have exited.
371 * NOTE 2: This MUST NOT be an inlined function. Otherwise, we get
372 * into trouble in init/main.c when the child thread returns to
373 * do_basic_setup() and the timing is such that free_initmem() has
374 * been called already.
376 extern int kernel_thread (int (*fn)(void *), void *arg, unsigned long flags);
378 /* Copy and release all segment info associated with a VM */
379 #define copy_segments(tsk, mm) do { } while (0)
380 #define release_segments(mm) do { } while (0)
382 /* Get wait channel for task P. */
383 extern unsigned long get_wchan (struct task_struct *p);
385 /* Return instruction pointer of blocked task TSK. */
386 #define KSTK_EIP(tsk) \
388 struct pt_regs *_regs = ia64_task_regs(tsk); \
389 _regs->cr_iip + ia64_psr(_regs)->ri; \
392 /* Return stack pointer of blocked task TSK. */
393 #define KSTK_ESP(tsk) ((tsk)->thread.ksp)
395 static inline unsigned long
396 ia64_get_kr (unsigned long regnum)
401 case 0: asm volatile ("mov %0=ar.k0" : "=r"(r)); break;
402 case 1: asm volatile ("mov %0=ar.k1" : "=r"(r)); break;
403 case 2: asm volatile ("mov %0=ar.k2" : "=r"(r)); break;
404 case 3: asm volatile ("mov %0=ar.k3" : "=r"(r)); break;
405 case 4: asm volatile ("mov %0=ar.k4" : "=r"(r)); break;
406 case 5: asm volatile ("mov %0=ar.k5" : "=r"(r)); break;
407 case 6: asm volatile ("mov %0=ar.k6" : "=r"(r)); break;
408 case 7: asm volatile ("mov %0=ar.k7" : "=r"(r)); break;
414 ia64_set_kr (unsigned long regnum, unsigned long r)
417 case 0: asm volatile ("mov ar.k0=%0" :: "r"(r)); break;
418 case 1: asm volatile ("mov ar.k1=%0" :: "r"(r)); break;
419 case 2: asm volatile ("mov ar.k2=%0" :: "r"(r)); break;
420 case 3: asm volatile ("mov ar.k3=%0" :: "r"(r)); break;
421 case 4: asm volatile ("mov ar.k4=%0" :: "r"(r)); break;
422 case 5: asm volatile ("mov ar.k5=%0" :: "r"(r)); break;
423 case 6: asm volatile ("mov ar.k6=%0" :: "r"(r)); break;
424 case 7: asm volatile ("mov ar.k7=%0" :: "r"(r)); break;
428 static inline struct task_struct *
429 ia64_get_fpu_owner (void)
431 return (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER);
435 ia64_set_fpu_owner (struct task_struct *t)
437 ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) t);
440 extern void __ia64_init_fpu (void);
441 extern void __ia64_save_fpu (struct ia64_fpreg *fph);
442 extern void __ia64_load_fpu (struct ia64_fpreg *fph);
443 extern void ia64_save_debug_regs (unsigned long *save_area);
444 extern void ia64_load_debug_regs (unsigned long *save_area);
446 #ifdef CONFIG_IA32_SUPPORT
447 extern void ia32_save_state (struct task_struct *task);
448 extern void ia32_load_state (struct task_struct *task);
451 #define ia64_fph_enable() asm volatile (";; rsm psr.dfh;; srlz.d;;" ::: "memory");
452 #define ia64_fph_disable() asm volatile (";; ssm psr.dfh;; srlz.d;;" ::: "memory");
454 /* load fp 0.0 into fph */
456 ia64_init_fpu (void) {
462 /* save f32-f127 at FPH */
464 ia64_save_fpu (struct ia64_fpreg *fph) {
466 __ia64_save_fpu(fph);
470 /* load f32-f127 from FPH */
472 ia64_load_fpu (struct ia64_fpreg *fph) {
474 __ia64_load_fpu(fph);
481 asm volatile ("fc %0" :: "r"(addr) : "memory");
487 asm volatile (";; sync.i" ::: "memory");
493 asm volatile (";; srlz.i ;;" ::: "memory");
499 asm volatile (";; srlz.d" ::: "memory");
503 ia64_get_rr (__u64 reg_bits)
506 asm volatile ("mov %0=rr[%1]" : "=r"(r) : "r"(reg_bits) : "memory");
511 ia64_set_rr (__u64 reg_bits, __u64 rr_val)
513 asm volatile ("mov rr[%0]=%1" :: "r"(reg_bits), "r"(rr_val) : "memory");
520 asm volatile ("mov %0=cr.dcr" : "=r"(r));
525 ia64_set_dcr (__u64 val)
527 asm volatile ("mov cr.dcr=%0;;" :: "r"(val) : "memory");
535 asm volatile ("mov %0=cr.lid" : "=r"(r));
542 asm volatile ("invala" ::: "memory");
546 * Save the processor status flags in FLAGS and then clear the interrupt collection and
547 * interrupt enable bits. Don't trigger any mandatory RSE references while this bit is
554 asm volatile ("mov %0=psr;; rsm psr.i | psr.ic;; srlz.i;;" : "=r"(psr) :: "memory");
562 ia64_set_psr (__u64 psr)
564 asm volatile (";; mov psr.l=%0;; srlz.d" :: "r" (psr) : "memory");
568 * Insert a translation into an instruction and/or data translation
572 ia64_itr (__u64 target_mask, __u64 tr_num,
573 __u64 vmaddr, __u64 pte,
576 asm volatile ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory");
577 asm volatile ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory");
578 if (target_mask & 0x1)
579 asm volatile ("itr.i itr[%0]=%1"
580 :: "r"(tr_num), "r"(pte) : "memory");
581 if (target_mask & 0x2)
582 asm volatile (";;itr.d dtr[%0]=%1"
583 :: "r"(tr_num), "r"(pte) : "memory");
587 * Insert a translation into the instruction and/or data translation
591 ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
594 asm volatile ("mov cr.itir=%0" :: "r"(log_page_size << 2) : "memory");
595 asm volatile ("mov cr.ifa=%0;;" :: "r"(vmaddr) : "memory");
596 /* as per EAS2.6, itc must be the last instruction in an instruction group */
597 if (target_mask & 0x1)
598 asm volatile ("itc.i %0;;" :: "r"(pte) : "memory");
599 if (target_mask & 0x2)
600 asm volatile (";;itc.d %0;;" :: "r"(pte) : "memory");
604 * Purge a range of addresses from instruction and/or data translation
608 ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
610 if (target_mask & 0x1)
611 asm volatile ("ptr.i %0,%1" :: "r"(vmaddr), "r"(log_size << 2));
612 if (target_mask & 0x2)
613 asm volatile ("ptr.d %0,%1" :: "r"(vmaddr), "r"(log_size << 2));
616 /* Set the interrupt vector address. The address must be suitably aligned (32KB). */
618 ia64_set_iva (void *ivt_addr)
620 asm volatile ("mov cr.iva=%0;; srlz.i;;" :: "r"(ivt_addr) : "memory");
623 /* Set the page table address and control bits. */
625 ia64_set_pta (__u64 pta)
627 /* Note: srlz.i implies srlz.d */
628 asm volatile ("mov cr.pta=%0;; srlz.i;;" :: "r"(pta) : "memory");
632 ia64_get_cpuid (__u64 regnum)
636 asm ("mov %0=cpuid[%r1]" : "=r"(r) : "rO"(regnum));
643 asm ("mov cr.eoi=r0;; srlz.d;;" ::: "memory");
647 ia64_set_lrr0 (unsigned long val)
649 asm volatile ("mov cr.lrr0=%0;; srlz.d" :: "r"(val) : "memory");
652 #define cpu_relax() do { } while (0)
656 ia64_set_lrr1 (unsigned long val)
658 asm volatile ("mov cr.lrr1=%0;; srlz.d" :: "r"(val) : "memory");
662 ia64_set_pmv (__u64 val)
664 asm volatile ("mov cr.pmv=%0" :: "r"(val) : "memory");
668 ia64_get_pmc (__u64 regnum)
672 asm volatile ("mov %0=pmc[%1]" : "=r"(retval) : "r"(regnum));
677 ia64_set_pmc (__u64 regnum, __u64 value)
679 asm volatile ("mov pmc[%0]=%1" :: "r"(regnum), "r"(value));
683 ia64_get_pmd (__u64 regnum)
687 asm volatile ("mov %0=pmd[%1]" : "=r"(retval) : "r"(regnum));
692 ia64_set_pmd (__u64 regnum, __u64 value)
694 asm volatile ("mov pmd[%0]=%1" :: "r"(regnum), "r"(value));
698 * Given the address to which a spill occurred, return the unat bit
699 * number that corresponds to this address.
702 ia64_unat_pos (void *spill_addr)
704 return ((__u64) spill_addr >> 3) & 0x3f;
708 * Set the NaT bit of an integer register which was spilled at address
709 * SPILL_ADDR. UNAT is the mask to be updated.
712 ia64_set_unat (__u64 *unat, void *spill_addr, unsigned long nat)
714 __u64 bit = ia64_unat_pos(spill_addr);
715 __u64 mask = 1UL << bit;
717 *unat = (*unat & ~mask) | (nat << bit);
721 * Return saved PC of a blocked thread.
722 * Note that the only way T can block is through a call to schedule() -> switch_to().
724 static inline unsigned long
725 thread_saved_pc (struct thread_struct *t)
727 struct unw_frame_info info;
730 /* XXX ouch: Linus, please pass the task pointer to thread_saved_pc() instead! */
731 struct task_struct *p = (void *) ((unsigned long) t - IA64_TASK_THREAD_OFFSET);
733 unw_init_from_blocked_task(&info, p);
734 if (unw_unwind(&info) < 0)
736 unw_get_ip(&info, &ip);
741 * Get the current instruction/program counter value.
743 #define current_text_addr() \
744 ({ void *_pc; asm volatile ("mov %0=ip" : "=r" (_pc)); _pc; })
746 #define THREAD_SIZE IA64_STK_OFFSET
747 /* NOTE: The task struct and the stacks are allocated together. */
748 #define alloc_task_struct() \
749 ((struct task_struct *) __get_free_pages(GFP_KERNEL, IA64_TASK_STRUCT_LOG_NUM_PAGES))
750 #define free_task_struct(p) free_pages((unsigned long)(p), IA64_TASK_STRUCT_LOG_NUM_PAGES)
751 #define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
753 #define init_task (init_task_union.task)
754 #define init_stack (init_task_union.stack)
757 * Set the correctable machine check vector register
760 ia64_set_cmcv (__u64 val)
762 asm volatile ("mov cr.cmcv=%0" :: "r"(val) : "memory");
766 * Read the correctable machine check vector register
773 asm volatile ("mov %0=cr.cmcv" : "=r"(val) :: "memory");
781 asm volatile ("srlz.d;; mov %0=cr.ivr;; srlz.d;;" : "=r"(r));
786 ia64_set_tpr (__u64 val)
788 asm volatile ("mov cr.tpr=%0" :: "r"(val));
795 asm volatile ("mov %0=cr.tpr" : "=r"(r));
800 ia64_set_irr0 (__u64 val)
802 asm volatile("mov cr.irr0=%0;;" :: "r"(val) : "memory");
811 /* this is volatile because irr may change unbeknownst to gcc... */
812 asm volatile("mov %0=cr.irr0" : "=r"(val));
817 ia64_set_irr1 (__u64 val)
819 asm volatile("mov cr.irr1=%0;;" :: "r"(val) : "memory");
828 /* this is volatile because irr may change unbeknownst to gcc... */
829 asm volatile("mov %0=cr.irr1" : "=r"(val));
834 ia64_set_irr2 (__u64 val)
836 asm volatile("mov cr.irr2=%0;;" :: "r"(val) : "memory");
845 /* this is volatile because irr may change unbeknownst to gcc... */
846 asm volatile("mov %0=cr.irr2" : "=r"(val));
851 ia64_set_irr3 (__u64 val)
853 asm volatile("mov cr.irr3=%0;;" :: "r"(val) : "memory");
862 /* this is volatile because irr may change unbeknownst to gcc... */
863 asm volatile ("mov %0=cr.irr3" : "=r"(val));
872 asm ("mov %0=gp" : "=r"(val));
877 ia64_set_ibr (__u64 regnum, __u64 value)
879 asm volatile ("mov ibr[%0]=%1" :: "r"(regnum), "r"(value));
883 ia64_set_dbr (__u64 regnum, __u64 value)
885 asm volatile ("mov dbr[%0]=%1" :: "r"(regnum), "r"(value));
886 #ifdef CONFIG_ITANIUM
887 asm volatile (";; srlz.d");
892 ia64_get_ibr (__u64 regnum)
896 asm volatile ("mov %0=ibr[%1]" : "=r"(retval) : "r"(regnum));
901 ia64_get_dbr (__u64 regnum)
905 asm volatile ("mov %0=dbr[%1]" : "=r"(retval) : "r"(regnum));
906 #ifdef CONFIG_ITANIUM
907 asm volatile (";; srlz.d");
912 /* XXX remove the handcoded version once we have a sufficiently clever compiler... */
913 #ifdef SMART_COMPILER
914 # define ia64_rotr(w,n) \
916 __u64 _w = (w), _n = (n); \
918 (_w >> _n) | (_w << (64 - _n)); \
921 # define ia64_rotr(w,n) \
924 asm ("shrp %0=%1,%1,%2" : "=r"(result) : "r"(w), "i"(n)); \
929 #define ia64_rotl(w,n) ia64_rotr((w),(64)-(n))
932 ia64_thash (__u64 addr)
935 asm ("thash %0=%1" : "=r"(result) : "r" (addr));
940 ia64_tpa (__u64 addr)
943 asm ("tpa %0=%1" : "=r"(result) : "r"(addr));
947 #define ARCH_HAS_PREFETCH
948 #define ARCH_HAS_PREFETCHW
949 #define ARCH_HAS_SPINLOCK_PREFETCH
950 #define PREFETCH_STRIDE 256
953 prefetch (const void *x)
955 __asm__ __volatile__ ("lfetch [%0]" : : "r"(x));
959 prefetchw (const void *x)
961 __asm__ __volatile__ ("lfetch.excl [%0]" : : "r"(x));
964 #define spin_lock_prefetch(x) prefetchw(x)
966 #endif /* !__ASSEMBLY__ */
968 #endif /* _ASM_IA64_PROCESSOR_H */