5 * We need the APIC definitions automatically as part of 'smp.h'
8 #include <linux/config.h>
9 #include <linux/threads.h>
10 #include <linux/ptrace.h>
13 #ifdef CONFIG_X86_LOCAL_APIC
15 #include <asm/fixmap.h>
16 #include <asm/bitops.h>
17 #include <asm/mpspec.h>
18 #ifdef CONFIG_X86_IO_APIC
19 #include <asm/io_apic.h>
31 * Private routines/data
34 extern void smp_alloc_memory(void);
35 extern unsigned long phys_cpu_present_map;
36 extern unsigned long cpu_online_map;
37 extern volatile unsigned long smp_invalidate_needed;
39 extern void smp_flush_tlb(void);
40 extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
41 extern void smp_send_reschedule(int cpu);
42 extern void smp_invalidate_rcv(void); /* Process an NMI */
43 extern void (*mtrr_hook) (void);
44 extern void zap_low_mappings (void);
47 * On x86 all CPUs are mapped 1:1 to the APIC space.
48 * This simplifies scheduling and IPI sending and
49 * compresses data structures.
51 extern inline int cpu_logical_map(int cpu)
55 extern inline int cpu_number_map(int cpu)
61 * Some lowlevel functions might want to know about
62 * the real APIC ID <-> CPU # mapping.
64 extern volatile int x86_apicid_to_cpu[NR_CPUS];
65 extern volatile int x86_cpu_to_apicid[NR_CPUS];
68 * General functions that each host system must provide.
71 extern void smp_boot_cpus(void);
72 extern void smp_store_cpu_info(int id); /* Store per CPU info (like the initial udelay numbers */
75 * This function is needed by all SMP systems. It must _always_ be valid
76 * from the initial startup. We map APIC_BASE very early in page_setup(),
77 * so this is correct in the x86 case.
80 #define smp_processor_id() read_pda(cpunumber)
82 #define stack_smp_processor_id() (stack_current()->processor)
85 extern __inline int hard_smp_processor_id(void)
87 /* we don't want to mark this access volatile - bad code generation */
88 return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
91 #endif /* !ASSEMBLY */
93 #define NO_PROC_ID 0xFF /* No processor magic marker */
96 * This magic constant controls our willingness to transfer
97 * a process across CPUs. Such a transfer incurs misses on the L1
98 * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
99 * gut feeling is this will vary by board in value. For a board
100 * with separate L2 cache it probably depends also on the RSS, and
101 * for a board with shared L2 cache it ought to decay fast as other
105 #define PROC_CHANGE_PENALTY 15 /* Schedule penalty */
110 #define INT_DELIVERY_MODE 1 /* logical delivery */
111 #define TARGET_CPUS 1
114 #define stack_smp_processor_id() 0