Revert "ACPI: Only use IPI on known broken machines (AMD, Dothan/BaniasPentium M)"
[powerpc.git] / drivers / kvm / mmu.c
index be79377..e85b4c7 100644 (file)
@@ -131,7 +131,7 @@ static int dbg = 1;
        (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
 
 
-#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & PAGE_MASK)
+#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
 #define PT64_DIR_BASE_ADDR_MASK \
        (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
 
@@ -298,18 +298,18 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
        if (!is_rmap_pte(*spte))
                return;
        page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
-       if (!page->private) {
+       if (!page_private(page)) {
                rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
-               page->private = (unsigned long)spte;
-       } else if (!(page->private & 1)) {
+               set_page_private(page,(unsigned long)spte);
+       } else if (!(page_private(page) & 1)) {
                rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
                desc = mmu_alloc_rmap_desc(vcpu);
-               desc->shadow_ptes[0] = (u64 *)page->private;
+               desc->shadow_ptes[0] = (u64 *)page_private(page);
                desc->shadow_ptes[1] = spte;
-               page->private = (unsigned long)desc | 1;
+               set_page_private(page,(unsigned long)desc | 1);
        } else {
                rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
-               desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
+               desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
                while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
                        desc = desc->more;
                if (desc->shadow_ptes[RMAP_EXT-1]) {
@@ -337,12 +337,12 @@ static void rmap_desc_remove_entry(struct kvm_vcpu *vcpu,
        if (j != 0)
                return;
        if (!prev_desc && !desc->more)
-               page->private = (unsigned long)desc->shadow_ptes[0];
+               set_page_private(page,(unsigned long)desc->shadow_ptes[0]);
        else
                if (prev_desc)
                        prev_desc->more = desc->more;
                else
-                       page->private = (unsigned long)desc->more | 1;
+                       set_page_private(page,(unsigned long)desc->more | 1);
        mmu_free_rmap_desc(vcpu, desc);
 }
 
@@ -356,20 +356,20 @@ static void rmap_remove(struct kvm_vcpu *vcpu, u64 *spte)
        if (!is_rmap_pte(*spte))
                return;
        page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
-       if (!page->private) {
+       if (!page_private(page)) {
                printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
                BUG();
-       } else if (!(page->private & 1)) {
+       } else if (!(page_private(page) & 1)) {
                rmap_printk("rmap_remove:  %p %llx 1->0\n", spte, *spte);
-               if ((u64 *)page->private != spte) {
+               if ((u64 *)page_private(page) != spte) {
                        printk(KERN_ERR "rmap_remove:  %p %llx 1->BUG\n",
                               spte, *spte);
                        BUG();
                }
-               page->private = 0;
+               set_page_private(page,0);
        } else {
                rmap_printk("rmap_remove:  %p %llx many->many\n", spte, *spte);
-               desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
+               desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
                prev_desc = NULL;
                while (desc) {
                        for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
@@ -398,16 +398,16 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
        BUG_ON(!slot);
        page = gfn_to_page(slot, gfn);
 
-       while (page->private) {
-               if (!(page->private & 1))
-                       spte = (u64 *)page->private;
+       while (page_private(page)) {
+               if (!(page_private(page) & 1))
+                       spte = (u64 *)page_private(page);
                else {
-                       desc = (struct kvm_rmap_desc *)(page->private & ~1ul);
+                       desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
                        spte = desc->shadow_ptes[0];
                }
                BUG_ON(!spte);
-               BUG_ON((*spte & PT64_BASE_ADDR_MASK) !=
-                      page_to_pfn(page) << PAGE_SHIFT);
+               BUG_ON((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT
+                      != page_to_pfn(page));
                BUG_ON(!(*spte & PT_PRESENT_MASK));
                BUG_ON(!(*spte & PT_WRITABLE_MASK));
                rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
@@ -1093,22 +1093,40 @@ out:
        return r;
 }
 
+static void mmu_pre_write_zap_pte(struct kvm_vcpu *vcpu,
+                                 struct kvm_mmu_page *page,
+                                 u64 *spte)
+{
+       u64 pte;
+       struct kvm_mmu_page *child;
+
+       pte = *spte;
+       if (is_present_pte(pte)) {
+               if (page->role.level == PT_PAGE_TABLE_LEVEL)
+                       rmap_remove(vcpu, spte);
+               else {
+                       child = page_header(pte & PT64_BASE_ADDR_MASK);
+                       mmu_page_remove_parent_pte(vcpu, child, spte);
+               }
+       }
+       *spte = 0;
+}
+
 void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
 {
        gfn_t gfn = gpa >> PAGE_SHIFT;
        struct kvm_mmu_page *page;
-       struct kvm_mmu_page *child;
        struct hlist_node *node, *n;
        struct hlist_head *bucket;
        unsigned index;
        u64 *spte;
-       u64 pte;
        unsigned offset = offset_in_page(gpa);
        unsigned pte_size;
        unsigned page_offset;
        unsigned misaligned;
        int level;
        int flooded = 0;
+       int npte;
 
        pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
        if (gfn == vcpu->last_pt_write_gfn) {
@@ -1144,22 +1162,26 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
                }
                page_offset = offset;
                level = page->role.level;
+               npte = 1;
                if (page->role.glevels == PT32_ROOT_LEVEL) {
-                       page_offset <<= 1;          /* 32->64 */
+                       page_offset <<= 1;      /* 32->64 */
+                       /*
+                        * A 32-bit pde maps 4MB while the shadow pdes map
+                        * only 2MB.  So we need to double the offset again
+                        * and zap two pdes instead of one.
+                        */
+                       if (level == PT32_ROOT_LEVEL) {
+                               page_offset <<= 1;
+                               npte = 2;
+                       }
                        page_offset &= ~PAGE_MASK;
                }
                spte = __va(page->page_hpa);
                spte += page_offset / sizeof(*spte);
-               pte = *spte;
-               if (is_present_pte(pte)) {
-                       if (level == PT_PAGE_TABLE_LEVEL)
-                               rmap_remove(vcpu, spte);
-                       else {
-                               child = page_header(pte & PT64_BASE_ADDR_MASK);
-                               mmu_page_remove_parent_pte(vcpu, child, spte);
-                       }
+               while (npte--) {
+                       mmu_pre_write_zap_pte(vcpu, page, spte);
+                       ++spte;
                }
-               *spte = 0;
        }
 }
 
@@ -1218,7 +1240,7 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
                INIT_LIST_HEAD(&page_header->link);
                if ((page = alloc_page(GFP_KERNEL)) == NULL)
                        goto error_1;
-               page->private = (unsigned long)page_header;
+               set_page_private(page, (unsigned long)page_header);
                page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT;
                memset(__va(page_header->page_hpa), 0, PAGE_SIZE);
                list_add(&page_header->link, &vcpu->free_pages);