*/
#include <linux/module.h>
+#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/profile.h>
static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
-#define NUM_MSR_MAPS (sizeof(msrpm_ranges) / sizeof(*msrpm_ranges))
+#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
#define MSRS_RANGE_SIZE 2048
#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
wrmsrl(MSR_VM_HSAVE_PA, 0);
rdmsrl(MSR_EFER, efer);
wrmsrl(MSR_EFER, efer & ~MSR_EFER_SVME_MASK);
- per_cpu(svm_data, raw_smp_processor_id()) = 0;
+ per_cpu(svm_data, raw_smp_processor_id()) = NULL;
__free_page(svm_data->save_area);
kfree(svm_data);
}
control->intercept = (1ULL << INTERCEPT_INTR) |
(1ULL << INTERCEPT_NMI) |
+ (1ULL << INTERCEPT_SMI) |
/*
* selective cr0 intercept bug?
* 0: 0f 22 d8 mov %eax,%cr3
save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
save->cs.limit = 0xffff;
- save->cs.base = 0xffff0000;
+ /*
+ * cs.base should really be 0xffff0000, but vmx can't handle that, so
+ * be consistent with it.
+ *
+ * Replace when we have real mode working for vmx.
+ */
+ save->cs.base = 0xf0000;
save->gdtr.limit = 0xffff;
save->idtr.limit = 0xffff;
* cr0 val on cpu init should be 0x60000010, we enable cpu
* cache by default. the orderly way is to enable cache in bios.
*/
- save->cr0 = 0x00000010 | CR0_PG_MASK;
+ save->cr0 = 0x00000010 | CR0_PG_MASK | CR0_WP_MASK;
save->cr4 = CR4_PAE_MASK;
/* rdx = ?? */
}
kfree(vcpu->svm);
}
-static struct kvm_vcpu *svm_vcpu_load(struct kvm_vcpu *vcpu)
+static void svm_vcpu_load(struct kvm_vcpu *vcpu)
{
get_cpu();
- return vcpu;
}
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
put_cpu();
}
+static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
+{
+}
+
static void svm_cache_regs(struct kvm_vcpu *vcpu)
{
vcpu->regs[VCPU_REGS_RAX] = vcpu->svm->vmcb->save.rax;
case VCPU_SREG_LDTR: return &save->ldtr;
}
BUG();
- return 0;
+ return NULL;
}
static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
}
#endif
vcpu->svm->cr0 = cr0;
- vcpu->svm->vmcb->save.cr0 = cr0 | CR0_PG_MASK;
+ vcpu->svm->vmcb->save.cr0 = cr0 | CR0_PG_MASK | CR0_WP_MASK;
vcpu->cr0 = cr0;
}
return 0;
*addr_override = 0;
- *seg = 0;
+ *seg = NULL;
for (i = 0; i < ins_length; i++)
switch (inst[i]) {
case 0xf0:
addr_mask = io_adress(vcpu, _in, &kvm_run->io.address);
if (!addr_mask) {
- printk(KERN_DEBUG "%s: get io address failed\n", __FUNCTION__);
+ printk(KERN_DEBUG "%s: get io address failed\n",
+ __FUNCTION__);
return 1;
}
if (kvm_run->io.rep) {
- kvm_run->io.count = vcpu->regs[VCPU_REGS_RCX] & addr_mask;
+ kvm_run->io.count
+ = vcpu->regs[VCPU_REGS_RCX] & addr_mask;
kvm_run->io.string_down = (vcpu->svm->vmcb->save.rflags
& X86_EFLAGS_DF) != 0;
}
- } else {
+ } else
kvm_run->io.value = vcpu->svm->vmcb->save.rax;
- }
return 0;
}
-
static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
return 1;
return 0;
}
+static int vmmcall_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+ vcpu->svm->vmcb->save.rip += 3;
+ return kvm_hypercall(vcpu, kvm_run);
+}
+
static int invalid_op_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
inject_ud(vcpu);
static int emulate_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
- if (emulate_instruction(vcpu, 0, 0, 0) != EMULATE_DONE)
+ if (emulate_instruction(vcpu, NULL, 0, 0) != EMULATE_DONE)
printk(KERN_ERR "%s: failed\n", __FUNCTION__);
return 1;
}
case MSR_K6_STAR:
vcpu->svm->vmcb->save.star = data;
break;
-#ifdef CONFIG_X86_64_
+#ifdef CONFIG_X86_64
case MSR_LSTAR:
vcpu->svm->vmcb->save.lstar = data;
break;
[SVM_EXIT_TASK_SWITCH] = task_switch_interception,
[SVM_EXIT_SHUTDOWN] = shutdown_interception,
[SVM_EXIT_VMRUN] = invalid_op_interception,
- [SVM_EXIT_VMMCALL] = invalid_op_interception,
+ [SVM_EXIT_VMMCALL] = vmmcall_interception,
[SVM_EXIT_VMLOAD] = invalid_op_interception,
[SVM_EXIT_VMSAVE] = invalid_op_interception,
[SVM_EXIT_STGI] = invalid_op_interception,
__FUNCTION__, vcpu->svm->vmcb->control.exit_int_info,
exit_code);
- if (exit_code >= sizeof(svm_exit_handlers) / sizeof(*svm_exit_handlers)
+ if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
|| svm_exit_handlers[exit_code] == 0) {
kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
printk(KERN_ERR "%s: 0x%x @ 0x%llx cr0 0x%lx rflags 0x%llx\n",
return 0;
}
+static void
+svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
+{
+ /*
+ * Patch in the VMMCALL instruction:
+ */
+ hypercall[0] = 0x0f;
+ hypercall[1] = 0x01;
+ hypercall[2] = 0xd9;
+ hypercall[3] = 0xc3;
+}
+
static struct kvm_arch_ops svm_arch_ops = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
.vcpu_load = svm_vcpu_load,
.vcpu_put = svm_vcpu_put,
+ .vcpu_decache = svm_vcpu_decache,
.set_guest_debug = svm_guest_debug,
.get_msr = svm_get_msr,
.run = svm_vcpu_run,
.skip_emulated_instruction = skip_emulated_instruction,
.vcpu_setup = svm_vcpu_setup,
+ .patch_hypercall = svm_patch_hypercall,
};
static int __init svm_init(void)