#include <linux/kvm.h>
#include <linux/module.h>
#include <linux/errno.h>
+#include <linux/magic.h>
#include <asm/processor.h>
#include <linux/percpu.h>
#include <linux/gfp.h>
#include <asm/desc.h>
#include <linux/sysdev.h>
#include <linux/cpu.h>
+#include <linux/file.h>
#include <linux/fs.h>
#include <linux/mount.h>
static struct dentry *debugfs_dir;
-#define KVMFS_MAGIC 0x19700426
struct vfsmount *kvmfs_mnt;
#define MAX_IO_MSRS 256
#endif
+static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
+ unsigned long arg);
+
+static struct inode *kvmfs_inode(struct file_operations *fops)
+{
+ int error = -ENOMEM;
+ struct inode *inode = new_inode(kvmfs_mnt->mnt_sb);
+
+ if (!inode)
+ goto eexit_1;
+
+ inode->i_fop = fops;
+
+ /*
+ * Mark the inode dirty from the very beginning,
+ * that way it will never be moved to the dirty
+ * list because mark_inode_dirty() will think
+ * that it already _is_ on the dirty list.
+ */
+ inode->i_state = I_DIRTY;
+ inode->i_mode = S_IRUSR | S_IWUSR;
+ inode->i_uid = current->fsuid;
+ inode->i_gid = current->fsgid;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ return inode;
+
+eexit_1:
+ return ERR_PTR(error);
+}
+
+static struct file *kvmfs_file(struct inode *inode, void *private_data)
+{
+ struct file *file = get_empty_filp();
+
+ if (!file)
+ return ERR_PTR(-ENFILE);
+
+ file->f_path.mnt = mntget(kvmfs_mnt);
+ file->f_path.dentry = d_alloc_anon(inode);
+ if (!file->f_path.dentry)
+ return ERR_PTR(-ENOMEM);
+ file->f_mapping = inode->i_mapping;
+
+ file->f_pos = 0;
+ file->f_flags = O_RDWR;
+ file->f_op = inode->i_fop;
+ file->f_mode = FMODE_READ | FMODE_WRITE;
+ file->f_version = 0;
+ file->private_data = private_data;
+ return file;
+}
+
unsigned long segment_base(u16 selector)
{
struct descriptor_table gdt;
unsigned now;
unsigned offset;
hva_t guest_buf;
+ gfn_t gfn;
paddr = gva_to_hpa(vcpu, addr);
if (is_error_hpa(paddr))
break;
+ gfn = vcpu->mmu.gva_to_gpa(vcpu, addr) >> PAGE_SHIFT;
+ mark_page_dirty(vcpu->kvm, gfn);
guest_buf = (hva_t)kmap_atomic(
pfn_to_page(paddr >> PAGE_SHIFT), KM_USER0);
offset = addr & ~PAGE_MASK;
}
EXPORT_SYMBOL_GPL(kvm_write_guest);
-static int vcpu_slot(struct kvm_vcpu *vcpu)
+/*
+ * Switches to specified vcpu, until a matching vcpu_put()
+ */
+static void vcpu_load(struct kvm_vcpu *vcpu)
{
- return vcpu - vcpu->kvm->vcpus;
+ mutex_lock(&vcpu->mutex);
+ kvm_arch_ops->vcpu_load(vcpu);
}
/*
- * Switches to specified vcpu, until a matching vcpu_put()
+ * Switches to specified vcpu, until a matching vcpu_put(). Will return NULL
+ * if the slot is not populated.
*/
-static struct kvm_vcpu *vcpu_load(struct kvm *kvm, int vcpu_slot)
+static struct kvm_vcpu *vcpu_load_slot(struct kvm *kvm, int slot)
{
- struct kvm_vcpu *vcpu = &kvm->vcpus[vcpu_slot];
+ struct kvm_vcpu *vcpu = &kvm->vcpus[slot];
mutex_lock(&vcpu->mutex);
- if (unlikely(!vcpu->vmcs)) {
+ if (!vcpu->vmcs) {
mutex_unlock(&vcpu->mutex);
return NULL;
}
- return kvm_arch_ops->vcpu_load(vcpu);
+ kvm_arch_ops->vcpu_load(vcpu);
+ return vcpu;
}
static void vcpu_put(struct kvm_vcpu *vcpu)
mutex_unlock(&vcpu->mutex);
}
-static int kvm_dev_open(struct inode *inode, struct file *filp)
+static struct kvm *kvm_create_vm(void)
{
struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
int i;
if (!kvm)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
spin_lock_init(&kvm->lock);
INIT_LIST_HEAD(&kvm->active_mmu_pages);
list_add(&kvm->vm_list, &vm_list);
spin_unlock(&kvm_lock);
}
- filp->private_data = kvm;
+ return kvm;
+}
+
+static int kvm_dev_open(struct inode *inode, struct file *filp)
+{
return 0;
}
static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
{
- if (!vcpu_load(vcpu->kvm, vcpu_slot(vcpu)))
+ if (!vcpu->vmcs)
return;
+ vcpu_load(vcpu);
kvm_mmu_destroy(vcpu);
vcpu_put(vcpu);
kvm_arch_ops->vcpu_free(vcpu);
static int kvm_dev_release(struct inode *inode, struct file *filp)
{
- struct kvm *kvm = filp->private_data;
+ return 0;
+}
+static void kvm_destroy_vm(struct kvm *kvm)
+{
spin_lock(&kvm_lock);
list_del(&kvm->vm_list);
spin_unlock(&kvm_lock);
kvm_free_vcpus(kvm);
kvm_free_physmem(kvm);
kfree(kvm);
+}
+
+static int kvm_vm_release(struct inode *inode, struct file *filp)
+{
+ struct kvm *kvm = filp->private_data;
+
+ kvm_destroy_vm(kvm);
return 0;
}
}
EXPORT_SYMBOL_GPL(fx_init);
-/*
- * Creates some virtual cpus. Good luck creating more than one.
- */
-static int kvm_dev_ioctl_create_vcpu(struct kvm *kvm, int n)
+static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot)
{
- int r;
- struct kvm_vcpu *vcpu;
-
- r = -EINVAL;
- if (!valid_vcpu(n))
- goto out;
-
- vcpu = &kvm->vcpus[n];
-
- mutex_lock(&vcpu->mutex);
-
- if (vcpu->vmcs) {
- mutex_unlock(&vcpu->mutex);
- return -EEXIST;
- }
-
- vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
- FX_IMAGE_ALIGN);
- vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
-
- r = kvm_arch_ops->vcpu_create(vcpu);
- if (r < 0)
- goto out_free_vcpus;
-
- r = kvm_mmu_create(vcpu);
- if (r < 0)
- goto out_free_vcpus;
-
- kvm_arch_ops->vcpu_load(vcpu);
- r = kvm_mmu_setup(vcpu);
- if (r >= 0)
- r = kvm_arch_ops->vcpu_setup(vcpu);
- vcpu_put(vcpu);
-
- if (r < 0)
- goto out_free_vcpus;
-
- return 0;
-
-out_free_vcpus:
- kvm_free_vcpu(vcpu);
- mutex_unlock(&vcpu->mutex);
-out:
- return r;
+ spin_lock(&vcpu->kvm->lock);
+ kvm_mmu_slot_remove_write_access(vcpu, slot);
+ spin_unlock(&vcpu->kvm->lock);
}
/*
*
* Discontiguous memory is allowed, mostly for framebuffers.
*/
-static int kvm_dev_ioctl_set_memory_region(struct kvm *kvm,
- struct kvm_memory_region *mem)
+static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
+ struct kvm_memory_region *mem)
{
int r;
gfn_t base_gfn;
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
struct kvm_vcpu *vcpu;
- vcpu = vcpu_load(kvm, i);
+ vcpu = vcpu_load_slot(kvm, i);
if (!vcpu)
continue;
+ if (new.flags & KVM_MEM_LOG_DIRTY_PAGES)
+ do_remove_write_access(vcpu, mem->slot);
kvm_mmu_reset_context(vcpu);
vcpu_put(vcpu);
}
return r;
}
-static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot)
-{
- spin_lock(&vcpu->kvm->lock);
- kvm_mmu_slot_remove_write_access(vcpu, slot);
- spin_unlock(&vcpu->kvm->lock);
-}
-
/*
* Get (and clear) the dirty memory log for a memory slot.
*/
-static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm,
- struct kvm_dirty_log *log)
+static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log)
{
struct kvm_memory_slot *memslot;
int r, i;
if (!memslot->dirty_bitmap)
goto out;
- n = ALIGN(memslot->npages, 8) / 8;
+ n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
- for (i = 0; !any && i < n; ++i)
+ for (i = 0; !any && i < n/sizeof(long); ++i)
any = memslot->dirty_bitmap[i];
r = -EFAULT;
if (any) {
cleared = 0;
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- struct kvm_vcpu *vcpu = vcpu_load(kvm, i);
+ struct kvm_vcpu *vcpu;
+ vcpu = vcpu_load_slot(kvm, i);
if (!vcpu)
continue;
if (!cleared) {
return 0;
page = gfn_to_page(m, gpa >> PAGE_SHIFT);
kvm_mmu_pre_write(vcpu, gpa, bytes);
+ mark_page_dirty(vcpu->kvm, gpa >> PAGE_SHIFT);
virt = kmap_atomic(page, KM_USER0);
memcpy(virt + offset_in_page(gpa), &val, bytes);
kunmap_atomic(virt, KM_USER0);
if (is_error_hpa(para_state_hpa))
goto err_gp;
+ mark_page_dirty(vcpu->kvm, para_state_gpa >> PAGE_SHIFT);
para_state_page = pfn_to_page(para_state_hpa >> PAGE_SHIFT);
para_state = kmap_atomic(para_state_page, KM_USER0);
vcpu->para_state_gpa = para_state_gpa;
vcpu->hypercall_gpa = hypercall_gpa;
+ mark_page_dirty(vcpu->kvm, hypercall_gpa >> PAGE_SHIFT);
hypercall = kmap_atomic(pfn_to_page(hypercall_hpa >> PAGE_SHIFT),
KM_USER1) + (hypercall_hpa & ~PAGE_MASK);
kvm_arch_ops->patch_hypercall(vcpu, hypercall);
{
vcpu_put(vcpu);
cond_resched();
- /* Cannot fail - no vcpu unplug yet. */
- vcpu_load(vcpu->kvm, vcpu_slot(vcpu));
+ vcpu_load(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_resched);
}
EXPORT_SYMBOL_GPL(save_msrs);
-static int kvm_dev_ioctl_run(struct kvm *kvm, struct kvm_run *kvm_run)
+static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
- struct kvm_vcpu *vcpu;
int r;
- if (!valid_vcpu(kvm_run->vcpu))
- return -EINVAL;
-
- vcpu = vcpu_load(kvm, kvm_run->vcpu);
- if (!vcpu)
- return -ENOENT;
+ vcpu_load(vcpu);
/* re-sync apic's tpr */
vcpu->cr8 = kvm_run->cr8;
return r;
}
-static int kvm_dev_ioctl_get_regs(struct kvm *kvm, struct kvm_regs *regs)
+static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
+ struct kvm_regs *regs)
{
- struct kvm_vcpu *vcpu;
-
- if (!valid_vcpu(regs->vcpu))
- return -EINVAL;
-
- vcpu = vcpu_load(kvm, regs->vcpu);
- if (!vcpu)
- return -ENOENT;
+ vcpu_load(vcpu);
kvm_arch_ops->cache_regs(vcpu);
return 0;
}
-static int kvm_dev_ioctl_set_regs(struct kvm *kvm, struct kvm_regs *regs)
+static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
+ struct kvm_regs *regs)
{
- struct kvm_vcpu *vcpu;
-
- if (!valid_vcpu(regs->vcpu))
- return -EINVAL;
-
- vcpu = vcpu_load(kvm, regs->vcpu);
- if (!vcpu)
- return -ENOENT;
+ vcpu_load(vcpu);
vcpu->regs[VCPU_REGS_RAX] = regs->rax;
vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
return kvm_arch_ops->get_segment(vcpu, var, seg);
}
-static int kvm_dev_ioctl_get_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
+static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
{
- struct kvm_vcpu *vcpu;
struct descriptor_table dt;
- if (!valid_vcpu(sregs->vcpu))
- return -EINVAL;
- vcpu = vcpu_load(kvm, sregs->vcpu);
- if (!vcpu)
- return -ENOENT;
+ vcpu_load(vcpu);
get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
return kvm_arch_ops->set_segment(vcpu, var, seg);
}
-static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs)
+static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+ struct kvm_sregs *sregs)
{
- struct kvm_vcpu *vcpu;
int mmu_reset_needed = 0;
int i;
struct descriptor_table dt;
- if (!valid_vcpu(sregs->vcpu))
- return -EINVAL;
- vcpu = vcpu_load(kvm, sregs->vcpu);
- if (!vcpu)
- return -ENOENT;
+ vcpu_load(vcpu);
set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
*
* @return number of msrs set successfully.
*/
-static int __msr_io(struct kvm *kvm, struct kvm_msrs *msrs,
+static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
struct kvm_msr_entry *entries,
int (*do_msr)(struct kvm_vcpu *vcpu,
unsigned index, u64 *data))
{
- struct kvm_vcpu *vcpu;
int i;
- if (!valid_vcpu(msrs->vcpu))
- return -EINVAL;
-
- vcpu = vcpu_load(kvm, msrs->vcpu);
- if (!vcpu)
- return -ENOENT;
+ vcpu_load(vcpu);
for (i = 0; i < msrs->nmsrs; ++i)
if (do_msr(vcpu, entries[i].index, &entries[i].data))
*
* @return number of msrs set successfully.
*/
-static int msr_io(struct kvm *kvm, struct kvm_msrs __user *user_msrs,
+static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
int (*do_msr)(struct kvm_vcpu *vcpu,
unsigned index, u64 *data),
int writeback)
if (copy_from_user(entries, user_msrs->entries, size))
goto out_free;
- r = n = __msr_io(kvm, &msrs, entries, do_msr);
+ r = n = __msr_io(vcpu, &msrs, entries, do_msr);
if (r < 0)
goto out_free;
/*
* Translate a guest virtual address to a guest physical address.
*/
-static int kvm_dev_ioctl_translate(struct kvm *kvm, struct kvm_translation *tr)
+static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+ struct kvm_translation *tr)
{
unsigned long vaddr = tr->linear_address;
- struct kvm_vcpu *vcpu;
gpa_t gpa;
- vcpu = vcpu_load(kvm, tr->vcpu);
- if (!vcpu)
- return -ENOENT;
- spin_lock(&kvm->lock);
+ vcpu_load(vcpu);
+ spin_lock(&vcpu->kvm->lock);
gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
tr->physical_address = gpa;
tr->valid = gpa != UNMAPPED_GVA;
tr->writeable = 1;
tr->usermode = 0;
- spin_unlock(&kvm->lock);
+ spin_unlock(&vcpu->kvm->lock);
vcpu_put(vcpu);
return 0;
}
-static int kvm_dev_ioctl_interrupt(struct kvm *kvm, struct kvm_interrupt *irq)
+static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+ struct kvm_interrupt *irq)
{
- struct kvm_vcpu *vcpu;
-
- if (!valid_vcpu(irq->vcpu))
- return -EINVAL;
if (irq->irq < 0 || irq->irq >= 256)
return -EINVAL;
- vcpu = vcpu_load(kvm, irq->vcpu);
- if (!vcpu)
- return -ENOENT;
+ vcpu_load(vcpu);
set_bit(irq->irq, vcpu->irq_pending);
set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
return 0;
}
-static int kvm_dev_ioctl_debug_guest(struct kvm *kvm,
- struct kvm_debug_guest *dbg)
+static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
+ struct kvm_debug_guest *dbg)
{
- struct kvm_vcpu *vcpu;
int r;
- if (!valid_vcpu(dbg->vcpu))
- return -EINVAL;
- vcpu = vcpu_load(kvm, dbg->vcpu);
- if (!vcpu)
- return -ENOENT;
+ vcpu_load(vcpu);
r = kvm_arch_ops->set_guest_debug(vcpu, dbg);
return r;
}
-static long kvm_dev_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
+static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{
- struct kvm *kvm = filp->private_data;
+ struct kvm_vcpu *vcpu = filp->private_data;
+
+ fput(vcpu->kvm->filp);
+ return 0;
+}
+
+static struct file_operations kvm_vcpu_fops = {
+ .release = kvm_vcpu_release,
+ .unlocked_ioctl = kvm_vcpu_ioctl,
+ .compat_ioctl = kvm_vcpu_ioctl,
+};
+
+/*
+ * Allocates an inode for the vcpu.
+ */
+static int create_vcpu_fd(struct kvm_vcpu *vcpu)
+{
+ int fd, r;
+ struct inode *inode;
+ struct file *file;
+
+ atomic_inc(&vcpu->kvm->filp->f_count);
+ inode = kvmfs_inode(&kvm_vcpu_fops);
+ if (IS_ERR(inode)) {
+ r = PTR_ERR(inode);
+ goto out1;
+ }
+
+ file = kvmfs_file(inode, vcpu);
+ if (IS_ERR(file)) {
+ r = PTR_ERR(file);
+ goto out2;
+ }
+
+ r = get_unused_fd();
+ if (r < 0)
+ goto out3;
+ fd = r;
+ fd_install(fd, file);
+
+ return fd;
+
+out3:
+ fput(file);
+out2:
+ iput(inode);
+out1:
+ fput(vcpu->kvm->filp);
+ return r;
+}
+
+/*
+ * Creates some virtual cpus. Good luck creating more than one.
+ */
+static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
+{
+ int r;
+ struct kvm_vcpu *vcpu;
+
+ r = -EINVAL;
+ if (!valid_vcpu(n))
+ goto out;
+
+ vcpu = &kvm->vcpus[n];
+
+ mutex_lock(&vcpu->mutex);
+
+ if (vcpu->vmcs) {
+ mutex_unlock(&vcpu->mutex);
+ return -EEXIST;
+ }
+
+ vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
+ FX_IMAGE_ALIGN);
+ vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
+
+ r = kvm_arch_ops->vcpu_create(vcpu);
+ if (r < 0)
+ goto out_free_vcpus;
+
+ r = kvm_mmu_create(vcpu);
+ if (r < 0)
+ goto out_free_vcpus;
+
+ kvm_arch_ops->vcpu_load(vcpu);
+ r = kvm_mmu_setup(vcpu);
+ if (r >= 0)
+ r = kvm_arch_ops->vcpu_setup(vcpu);
+ vcpu_put(vcpu);
+
+ if (r < 0)
+ goto out_free_vcpus;
+
+ r = create_vcpu_fd(vcpu);
+ if (r < 0)
+ goto out_free_vcpus;
+
+ return r;
+
+out_free_vcpus:
+ kvm_free_vcpu(vcpu);
+ mutex_unlock(&vcpu->mutex);
+out:
+ return r;
+}
+
+static long kvm_vcpu_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
int r = -EINVAL;
switch (ioctl) {
- case KVM_GET_API_VERSION:
- r = KVM_API_VERSION;
- break;
- case KVM_CREATE_VCPU:
- r = kvm_dev_ioctl_create_vcpu(kvm, arg);
- if (r)
- goto out;
- break;
case KVM_RUN: {
struct kvm_run kvm_run;
r = -EFAULT;
if (copy_from_user(&kvm_run, argp, sizeof kvm_run))
goto out;
- r = kvm_dev_ioctl_run(kvm, &kvm_run);
+ r = kvm_vcpu_ioctl_run(vcpu, &kvm_run);
if (r < 0 && r != -EINTR)
goto out;
if (copy_to_user(argp, &kvm_run, sizeof kvm_run)) {
case KVM_GET_REGS: {
struct kvm_regs kvm_regs;
- r = -EFAULT;
- if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
- goto out;
- r = kvm_dev_ioctl_get_regs(kvm, &kvm_regs);
+ memset(&kvm_regs, 0, sizeof kvm_regs);
+ r = kvm_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
if (r)
goto out;
r = -EFAULT;
r = -EFAULT;
if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
goto out;
- r = kvm_dev_ioctl_set_regs(kvm, &kvm_regs);
+ r = kvm_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
if (r)
goto out;
r = 0;
case KVM_GET_SREGS: {
struct kvm_sregs kvm_sregs;
- r = -EFAULT;
- if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
- goto out;
- r = kvm_dev_ioctl_get_sregs(kvm, &kvm_sregs);
+ memset(&kvm_sregs, 0, sizeof kvm_sregs);
+ r = kvm_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
if (r)
goto out;
r = -EFAULT;
r = -EFAULT;
if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
goto out;
- r = kvm_dev_ioctl_set_sregs(kvm, &kvm_sregs);
+ r = kvm_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
if (r)
goto out;
r = 0;
r = -EFAULT;
if (copy_from_user(&tr, argp, sizeof tr))
goto out;
- r = kvm_dev_ioctl_translate(kvm, &tr);
+ r = kvm_vcpu_ioctl_translate(vcpu, &tr);
if (r)
goto out;
r = -EFAULT;
r = -EFAULT;
if (copy_from_user(&irq, argp, sizeof irq))
goto out;
- r = kvm_dev_ioctl_interrupt(kvm, &irq);
+ r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
if (r)
goto out;
r = 0;
r = -EFAULT;
if (copy_from_user(&dbg, argp, sizeof dbg))
goto out;
- r = kvm_dev_ioctl_debug_guest(kvm, &dbg);
+ r = kvm_vcpu_ioctl_debug_guest(vcpu, &dbg);
if (r)
goto out;
r = 0;
break;
}
+ case KVM_GET_MSRS:
+ r = msr_io(vcpu, argp, get_msr, 1);
+ break;
+ case KVM_SET_MSRS:
+ r = msr_io(vcpu, argp, do_set_msr, 0);
+ break;
+ default:
+ ;
+ }
+out:
+ return r;
+}
+
+static long kvm_vm_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ struct kvm *kvm = filp->private_data;
+ void __user *argp = (void __user *)arg;
+ int r = -EINVAL;
+
+ switch (ioctl) {
+ case KVM_CREATE_VCPU:
+ r = kvm_vm_ioctl_create_vcpu(kvm, arg);
+ if (r < 0)
+ goto out;
+ break;
case KVM_SET_MEMORY_REGION: {
struct kvm_memory_region kvm_mem;
r = -EFAULT;
if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
goto out;
- r = kvm_dev_ioctl_set_memory_region(kvm, &kvm_mem);
+ r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_mem);
if (r)
goto out;
break;
r = -EFAULT;
if (copy_from_user(&log, argp, sizeof log))
goto out;
- r = kvm_dev_ioctl_get_dirty_log(kvm, &log);
+ r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
if (r)
goto out;
break;
}
- case KVM_GET_MSRS:
- r = msr_io(kvm, argp, get_msr, 1);
+ default:
+ ;
+ }
+out:
+ return r;
+}
+
+static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
+ unsigned long address,
+ int *type)
+{
+ struct kvm *kvm = vma->vm_file->private_data;
+ unsigned long pgoff;
+ struct kvm_memory_slot *slot;
+ struct page *page;
+
+ *type = VM_FAULT_MINOR;
+ pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+ slot = gfn_to_memslot(kvm, pgoff);
+ if (!slot)
+ return NOPAGE_SIGBUS;
+ page = gfn_to_page(slot, pgoff);
+ if (!page)
+ return NOPAGE_SIGBUS;
+ get_page(page);
+ return page;
+}
+
+static struct vm_operations_struct kvm_vm_vm_ops = {
+ .nopage = kvm_vm_nopage,
+};
+
+static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &kvm_vm_vm_ops;
+ return 0;
+}
+
+static struct file_operations kvm_vm_fops = {
+ .release = kvm_vm_release,
+ .unlocked_ioctl = kvm_vm_ioctl,
+ .compat_ioctl = kvm_vm_ioctl,
+ .mmap = kvm_vm_mmap,
+};
+
+static int kvm_dev_ioctl_create_vm(void)
+{
+ int fd, r;
+ struct inode *inode;
+ struct file *file;
+ struct kvm *kvm;
+
+ inode = kvmfs_inode(&kvm_vm_fops);
+ if (IS_ERR(inode)) {
+ r = PTR_ERR(inode);
+ goto out1;
+ }
+
+ kvm = kvm_create_vm();
+ if (IS_ERR(kvm)) {
+ r = PTR_ERR(kvm);
+ goto out2;
+ }
+
+ file = kvmfs_file(inode, kvm);
+ if (IS_ERR(file)) {
+ r = PTR_ERR(file);
+ goto out3;
+ }
+ kvm->filp = file;
+
+ r = get_unused_fd();
+ if (r < 0)
+ goto out4;
+ fd = r;
+ fd_install(fd, file);
+
+ return fd;
+
+out4:
+ fput(file);
+out3:
+ kvm_destroy_vm(kvm);
+out2:
+ iput(inode);
+out1:
+ return r;
+}
+
+static long kvm_dev_ioctl(struct file *filp,
+ unsigned int ioctl, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ int r = -EINVAL;
+
+ switch (ioctl) {
+ case KVM_GET_API_VERSION:
+ r = KVM_API_VERSION;
break;
- case KVM_SET_MSRS:
- r = msr_io(kvm, argp, do_set_msr, 0);
+ case KVM_CREATE_VM:
+ r = kvm_dev_ioctl_create_vm();
break;
case KVM_GET_MSR_INDEX_LIST: {
struct kvm_msr_list __user *user_msr_list = argp;
return r;
}
-static struct page *kvm_dev_nopage(struct vm_area_struct *vma,
- unsigned long address,
- int *type)
-{
- struct kvm *kvm = vma->vm_file->private_data;
- unsigned long pgoff;
- struct kvm_memory_slot *slot;
- struct page *page;
-
- *type = VM_FAULT_MINOR;
- pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- slot = gfn_to_memslot(kvm, pgoff);
- if (!slot)
- return NOPAGE_SIGBUS;
- page = gfn_to_page(slot, pgoff);
- if (!page)
- return NOPAGE_SIGBUS;
- get_page(page);
- return page;
-}
-
-static struct vm_operations_struct kvm_dev_vm_ops = {
- .nopage = kvm_dev_nopage,
-};
-
-static int kvm_dev_mmap(struct file *file, struct vm_area_struct *vma)
-{
- vma->vm_ops = &kvm_dev_vm_ops;
- return 0;
-}
-
static struct file_operations kvm_chardev_ops = {
.open = kvm_dev_open,
.release = kvm_dev_release,
.unlocked_ioctl = kvm_dev_ioctl,
.compat_ioctl = kvm_dev_ioctl,
- .mmap = kvm_dev_mmap,
};
static struct miscdevice kvm_dev = {
static int kvmfs_get_sb(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data, struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "kvm:", NULL, KVMFS_MAGIC, mnt);
+ return get_sb_pseudo(fs_type, "kvm:", NULL, KVMFS_SUPER_MAGIC, mnt);
}
static struct file_system_type kvm_fs_type = {
bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
memset(__va(bad_page_address), 0, PAGE_SIZE);
- return r;
+ return 0;
out:
kvm_exit_debug();