projects
/
powerpc.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Merge ../linux-2.6/
[powerpc.git]
/
arch
/
x86_64
/
kernel
/
smp.c
diff --git
a/arch/x86_64/kernel/smp.c
b/arch/x86_64/kernel/smp.c
index
4a6628b
..
5a1c0a3
100644
(file)
--- a/
arch/x86_64/kernel/smp.c
+++ b/
arch/x86_64/kernel/smp.c
@@
-135,10
+135,10
@@
asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
cpu = smp_processor_id();
/*
cpu = smp_processor_id();
/*
- * orig_rax contains the
interrupt vector - 256
.
+ * orig_rax contains the
negated interrupt vector
.
* Use that to determine where the sender put the data.
*/
* Use that to determine where the sender put the data.
*/
- sender =
regs->orig_rax + 256
- INVALIDATE_TLB_VECTOR_START;
+ sender =
~regs->orig_rax
- INVALIDATE_TLB_VECTOR_START;
f = &per_cpu(flush_state, sender);
if (!cpu_isset(cpu, f->flush_cpumask))
f = &per_cpu(flush_state, sender);
if (!cpu_isset(cpu, f->flush_cpumask))
@@
-224,6
+224,7
@@
void flush_tlb_current_task(void)
flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
preempt_enable();
}
+EXPORT_SYMBOL(flush_tlb_current_task);
void flush_tlb_mm (struct mm_struct * mm)
{
void flush_tlb_mm (struct mm_struct * mm)
{
@@
-244,6
+245,7
@@
void flush_tlb_mm (struct mm_struct * mm)
preempt_enable();
}
preempt_enable();
}
+EXPORT_SYMBOL(flush_tlb_mm);
void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
{
void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
{
@@
-266,6
+268,7
@@
void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
preempt_enable();
}
preempt_enable();
}
+EXPORT_SYMBOL(flush_tlb_page);
static void do_flush_tlb_all(void* info)
{
static void do_flush_tlb_all(void* info)
{
@@
-443,6
+446,7
@@
int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
spin_unlock(&call_lock);
return 0;
}
spin_unlock(&call_lock);
return 0;
}
+EXPORT_SYMBOL(smp_call_function);
void smp_stop_cpu(void)
{
void smp_stop_cpu(void)
{
@@
-460,7
+464,7
@@
static void smp_really_stop_cpu(void *dummy)
{
smp_stop_cpu();
for (;;)
{
smp_stop_cpu();
for (;;)
- asm("hlt");
+ halt();
}
void smp_send_stop(void)
}
void smp_send_stop(void)
@@
-470,7
+474,7
@@
void smp_send_stop(void)
return;
/* Don't deadlock on the call lock in panic */
if (!spin_trylock(&call_lock)) {
return;
/* Don't deadlock on the call lock in panic */
if (!spin_trylock(&call_lock)) {
- /* ignore locking because we have paniced anyways */
+ /* ignore locking because we have panic
k
ed anyways */
nolock = 1;
}
__smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
nolock = 1;
}
__smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
@@
-520,13
+524,13
@@
asmlinkage void smp_call_function_interrupt(void)
int safe_smp_processor_id(void)
{
int safe_smp_processor_id(void)
{
-
int
apicid, i;
+
unsigned
apicid, i;
if (disable_apic)
return 0;
apicid = hard_smp_processor_id();
if (disable_apic)
return 0;
apicid = hard_smp_processor_id();
- if (x86_cpu_to_apicid[apicid] == apicid)
+ if (
apicid < NR_CPUS &&
x86_cpu_to_apicid[apicid] == apicid)
return apicid;
for (i = 0; i < NR_CPUS; ++i) {
return apicid;
for (i = 0; i < NR_CPUS; ++i) {