* the COPYING file in the top-level directory.
*
*/
+
+#include "vmx.h"
+#include "kvm.h"
+
#include <linux/types.h>
#include <linux/string.h>
-#include <asm/page.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/module.h>
-#include <asm/cmpxchg.h>
-#include "vmx.h"
-#include "kvm.h"
+#include <asm/page.h>
+#include <asm/cmpxchg.h>
#undef MMU_DEBUG
BUG_ON(!(*spte & PT_WRITABLE_MASK));
rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
rmap_remove(vcpu, spte);
- kvm_arch_ops->tlb_flush(vcpu);
set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
+ kvm_flush_remote_tlbs(vcpu->kvm);
}
}
rmap_remove(vcpu, &pt[i]);
pt[i] = 0;
}
- kvm_arch_ops->tlb_flush(vcpu);
+ kvm_flush_remote_tlbs(vcpu->kvm);
return;
}
ent &= PT64_BASE_ADDR_MASK;
mmu_page_remove_parent_pte(vcpu, page_header(ent), &pt[i]);
}
+ kvm_flush_remote_tlbs(vcpu->kvm);
}
static void kvm_mmu_put_page(struct kvm_vcpu *vcpu,
int i;
struct kvm_mmu_page *page;
+ if (!VALID_PAGE(vcpu->mmu.root_hpa))
+ return;
#ifdef CONFIG_X86_64
if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->mmu.root_hpa;
- ASSERT(VALID_PAGE(root));
page = page_header(root);
--page->root_count;
vcpu->mmu.root_hpa = INVALID_PAGE;
hpa_t root = vcpu->mmu.pae_root[i];
if (root) {
- ASSERT(VALID_PAGE(root));
root &= PT64_BASE_ADDR_MASK;
page = page_header(root);
--page->root_count;
context->free = nonpaging_free;
context->root_level = 0;
context->shadow_root_level = PT32E_ROOT_LEVEL;
- mmu_alloc_roots(vcpu);
- ASSERT(VALID_PAGE(context->root_hpa));
- kvm_arch_ops->set_cr3(vcpu, context->root_hpa);
+ context->root_hpa = INVALID_PAGE;
return 0;
}
{
pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
mmu_free_roots(vcpu);
- if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
- kvm_mmu_free_some_pages(vcpu);
- mmu_alloc_roots(vcpu);
- kvm_mmu_flush_tlb(vcpu);
- kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
}
static void inject_page_fault(struct kvm_vcpu *vcpu,
context->free = paging_free;
context->root_level = level;
context->shadow_root_level = level;
- mmu_alloc_roots(vcpu);
- ASSERT(VALID_PAGE(context->root_hpa));
- kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
- (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
+ context->root_hpa = INVALID_PAGE;
return 0;
}
context->free = paging_free;
context->root_level = PT32_ROOT_LEVEL;
context->shadow_root_level = PT32E_ROOT_LEVEL;
- mmu_alloc_roots(vcpu);
- ASSERT(VALID_PAGE(context->root_hpa));
- kvm_arch_ops->set_cr3(vcpu, context->root_hpa |
- (vcpu->cr3 & (CR3_PCD_MASK | CR3_WPT_MASK)));
+ context->root_hpa = INVALID_PAGE;
return 0;
}
ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
- mmu_topup_memory_caches(vcpu);
if (!is_paging(vcpu))
return nonpaging_init_context(vcpu);
else if (is_long_mode(vcpu))
}
int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
+{
+ destroy_kvm_mmu(vcpu);
+ return init_kvm_mmu(vcpu);
+}
+
+int kvm_mmu_load(struct kvm_vcpu *vcpu)
{
int r;
- destroy_kvm_mmu(vcpu);
- r = init_kvm_mmu(vcpu);
- if (r < 0)
- goto out;
+ spin_lock(&vcpu->kvm->lock);
r = mmu_topup_memory_caches(vcpu);
+ if (r)
+ goto out;
+ mmu_alloc_roots(vcpu);
+ kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
+ kvm_mmu_flush_tlb(vcpu);
out:
+ spin_unlock(&vcpu->kvm->lock);
return r;
}
+EXPORT_SYMBOL_GPL(kvm_mmu_load);
+
+void kvm_mmu_unload(struct kvm_vcpu *vcpu)
+{
+ mmu_free_roots(vcpu);
+}
static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *page,
}
}
*spte = 0;
+ kvm_flush_remote_tlbs(vcpu->kvm);
}
static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
}
mmu_free_memory_caches(vcpu);
- kvm_arch_ops->tlb_flush(vcpu);
+ kvm_flush_remote_tlbs(vcpu->kvm);
init_kvm_mmu(vcpu);
}