and changed files
[powerpc.git] / drivers / kvm / mmu.c
index 9ff7480..e8e2281 100644 (file)
@@ -52,11 +52,15 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
 static int dbg = 1;
 #endif
 
+#ifndef MMU_DEBUG
+#define ASSERT(x) do { } while (0)
+#else
 #define ASSERT(x)                                                      \
        if (!(x)) {                                                     \
                printk(KERN_WARNING "assertion failed %s:%d: %s\n",     \
                       __FILE__, __LINE__, #x);                         \
        }
+#endif
 
 #define PT64_PT_BITS 9
 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
@@ -159,6 +163,9 @@ struct kvm_rmap_desc {
        struct kvm_rmap_desc *more;
 };
 
+static struct kmem_cache *pte_chain_cache;
+static struct kmem_cache *rmap_desc_cache;
+
 static int is_write_protection(struct kvm_vcpu *vcpu)
 {
        return vcpu->cr0 & CR0_WP_MASK;
@@ -196,14 +203,15 @@ static int is_rmap_pte(u64 pte)
 }
 
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
-                                 size_t objsize, int min)
+                                 struct kmem_cache *base_cache, int min,
+                                 gfp_t gfp_flags)
 {
        void *obj;
 
        if (cache->nobjs >= min)
                return 0;
        while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
-               obj = kzalloc(objsize, GFP_NOWAIT);
+               obj = kmem_cache_zalloc(base_cache, gfp_flags);
                if (!obj)
                        return -ENOMEM;
                cache->objects[cache->nobjs++] = obj;
@@ -217,20 +225,35 @@ static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
                kfree(mc->objects[--mc->nobjs]);
 }
 
-static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
+static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags)
 {
        int r;
 
        r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
-                                  sizeof(struct kvm_pte_chain), 4);
+                                  pte_chain_cache, 4, gfp_flags);
        if (r)
                goto out;
        r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
-                                  sizeof(struct kvm_rmap_desc), 1);
+                                  rmap_desc_cache, 1, gfp_flags);
 out:
        return r;
 }
 
+static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
+{
+       int r;
+
+       r = __mmu_topup_memory_caches(vcpu, GFP_NOWAIT);
+       if (r < 0) {
+               spin_unlock(&vcpu->kvm->lock);
+               kvm_arch_ops->vcpu_put(vcpu);
+               r = __mmu_topup_memory_caches(vcpu, GFP_KERNEL);
+               kvm_arch_ops->vcpu_load(vcpu);
+               spin_lock(&vcpu->kvm->lock);
+       }
+       return r;
+}
+
 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
 {
        mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
@@ -415,6 +438,7 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
        }
 }
 
+#ifdef MMU_DEBUG
 static int is_empty_shadow_page(hpa_t page_hpa)
 {
        u64 *pos;
@@ -429,6 +453,7 @@ static int is_empty_shadow_page(hpa_t page_hpa)
                }
        return 1;
 }
+#endif
 
 static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
 {
@@ -911,7 +936,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
 
 static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
 {
-       ++kvm_stat.tlb_flush;
+       ++vcpu->stat.tlb_flush;
        kvm_arch_ops->tlb_flush(vcpu);
 }
 
@@ -1333,6 +1358,34 @@ void kvm_mmu_zap_all(struct kvm_vcpu *vcpu)
        init_kvm_mmu(vcpu);
 }
 
+void kvm_mmu_module_exit(void)
+{
+       if (pte_chain_cache)
+               kmem_cache_destroy(pte_chain_cache);
+       if (rmap_desc_cache)
+               kmem_cache_destroy(rmap_desc_cache);
+}
+
+int kvm_mmu_module_init(void)
+{
+       pte_chain_cache = kmem_cache_create("kvm_pte_chain",
+                                           sizeof(struct kvm_pte_chain),
+                                           0, 0, NULL, NULL);
+       if (!pte_chain_cache)
+               goto nomem;
+       rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
+                                           sizeof(struct kvm_rmap_desc),
+                                           0, 0, NULL, NULL);
+       if (!rmap_desc_cache)
+               goto nomem;
+
+       return 0;
+
+nomem:
+       kvm_mmu_module_exit();
+       return -ENOMEM;
+}
+
 #ifdef AUDIT
 
 static const char *audit_msg;
@@ -1355,7 +1408,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
        for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
                u64 ent = pt[i];
 
-               if (!ent & PT_PRESENT_MASK)
+               if (!(ent & PT_PRESENT_MASK))
                        continue;
 
                va = canonicalize(va);