static int dbg = 1;
#endif
+#ifndef MMU_DEBUG
+#define ASSERT(x) do { } while (0)
+#else
#define ASSERT(x) \
if (!(x)) { \
printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
__FILE__, __LINE__, #x); \
}
+#endif
#define PT64_PT_BITS 9
#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
struct kvm_rmap_desc *more;
};
+static struct kmem_cache *pte_chain_cache;
+static struct kmem_cache *rmap_desc_cache;
+
static int is_write_protection(struct kvm_vcpu *vcpu)
{
return vcpu->cr0 & CR0_WP_MASK;
}
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
- size_t objsize, int min)
+ struct kmem_cache *base_cache, int min,
+ gfp_t gfp_flags)
{
void *obj;
if (cache->nobjs >= min)
return 0;
while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
- obj = kzalloc(objsize, GFP_NOWAIT);
+ obj = kmem_cache_zalloc(base_cache, gfp_flags);
if (!obj)
return -ENOMEM;
cache->objects[cache->nobjs++] = obj;
kfree(mc->objects[--mc->nobjs]);
}
-static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
+static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags)
{
int r;
r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
- sizeof(struct kvm_pte_chain), 4);
+ pte_chain_cache, 4, gfp_flags);
if (r)
goto out;
r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
- sizeof(struct kvm_rmap_desc), 1);
+ rmap_desc_cache, 1, gfp_flags);
out:
return r;
}
+static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
+{
+ int r;
+
+ r = __mmu_topup_memory_caches(vcpu, GFP_NOWAIT);
+ if (r < 0) {
+ spin_unlock(&vcpu->kvm->lock);
+ kvm_arch_ops->vcpu_put(vcpu);
+ r = __mmu_topup_memory_caches(vcpu, GFP_KERNEL);
+ kvm_arch_ops->vcpu_load(vcpu);
+ spin_lock(&vcpu->kvm->lock);
+ }
+ return r;
+}
+
static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{
mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
}
}
+#ifdef MMU_DEBUG
static int is_empty_shadow_page(hpa_t page_hpa)
{
u64 *pos;
}
return 1;
}
+#endif
static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
{
static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
{
- ++kvm_stat.tlb_flush;
+ ++vcpu->stat.tlb_flush;
kvm_arch_ops->tlb_flush(vcpu);
}
init_kvm_mmu(vcpu);
}
+void kvm_mmu_module_exit(void)
+{
+ if (pte_chain_cache)
+ kmem_cache_destroy(pte_chain_cache);
+ if (rmap_desc_cache)
+ kmem_cache_destroy(rmap_desc_cache);
+}
+
+int kvm_mmu_module_init(void)
+{
+ pte_chain_cache = kmem_cache_create("kvm_pte_chain",
+ sizeof(struct kvm_pte_chain),
+ 0, 0, NULL, NULL);
+ if (!pte_chain_cache)
+ goto nomem;
+ rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
+ sizeof(struct kvm_rmap_desc),
+ 0, 0, NULL, NULL);
+ if (!rmap_desc_cache)
+ goto nomem;
+
+ return 0;
+
+nomem:
+ kvm_mmu_module_exit();
+ return -ENOMEM;
+}
+
#ifdef AUDIT
static const char *audit_msg;
for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
u64 ent = pt[i];
- if (!ent & PT_PRESENT_MASK)
+ if (!(ent & PT_PRESENT_MASK))
continue;
va = canonicalize(va);