2 * Kernel-based Virtual Machine driver for Linux
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
9 * Copyright (C) 2006 Qumranet, Inc.
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
21 * We need the mmu code to access both 32-bit and 64-bit guest ptes,
22 * so the code in this file is compiled twice, once per pte size.
26 #define pt_element_t u64
27 #define guest_walker guest_walker64
28 #define FNAME(name) paging##64_##name
29 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
30 #define PT_DIR_BASE_ADDR_MASK PT64_DIR_BASE_ADDR_MASK
31 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
32 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
33 #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
34 #define PT_LEVEL_BITS PT64_LEVEL_BITS
36 #define PT_MAX_FULL_LEVELS 4
37 #define CMPXCHG cmpxchg
39 #define CMPXCHG cmpxchg64
40 #define PT_MAX_FULL_LEVELS 2
43 #define pt_element_t u32
44 #define guest_walker guest_walker32
45 #define FNAME(name) paging##32_##name
46 #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
47 #define PT_DIR_BASE_ADDR_MASK PT32_DIR_BASE_ADDR_MASK
48 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
49 #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
50 #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
51 #define PT_LEVEL_BITS PT32_LEVEL_BITS
52 #define PT_MAX_FULL_LEVELS 2
53 #define CMPXCHG cmpxchg
55 #error Invalid PTTYPE value
58 #define gpte_to_gfn FNAME(gpte_to_gfn)
59 #define gpte_to_gfn_pde FNAME(gpte_to_gfn_pde)
62 * The guest_walker structure emulates the behavior of the hardware page
67 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
69 pt_element_t inherited_ar;
74 static gfn_t gpte_to_gfn(pt_element_t gpte)
76 return (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT;
79 static gfn_t gpte_to_gfn_pde(pt_element_t gpte)
81 return (gpte & PT_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
84 static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
85 gfn_t table_gfn, unsigned index,
86 pt_element_t orig_pte, pt_element_t new_pte)
92 page = gfn_to_page(kvm, table_gfn);
93 table = kmap_atomic(page, KM_USER0);
95 ret = CMPXCHG(&table[index], orig_pte, new_pte);
97 kunmap_atomic(table, KM_USER0);
99 kvm_release_page_dirty(page);
101 return (ret != orig_pte);
105 * Fetch a guest pte for a guest virtual address
107 static int FNAME(walk_addr)(struct guest_walker *walker,
108 struct kvm_vcpu *vcpu, gva_t addr,
109 int write_fault, int user_fault, int fetch_fault)
116 pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
118 walker->level = vcpu->mmu.root_level;
121 if (!is_long_mode(vcpu)) {
122 pte = vcpu->pdptrs[(addr >> 30) & 3];
123 if (!is_present_pte(pte))
128 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
129 (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
131 walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK;
134 index = PT_INDEX(addr, walker->level);
136 table_gfn = gpte_to_gfn(pte);
137 pte_gpa = gfn_to_gpa(table_gfn);
138 pte_gpa += index * sizeof(pt_element_t);
139 walker->table_gfn[walker->level - 1] = table_gfn;
140 pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
141 walker->level - 1, table_gfn);
143 kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
145 if (!is_present_pte(pte))
148 if (write_fault && !is_writeble_pte(pte))
149 if (user_fault || is_write_protection(vcpu))
152 if (user_fault && !(pte & PT_USER_MASK))
156 if (fetch_fault && is_nx(vcpu) && (pte & PT64_NX_MASK))
160 if (!(pte & PT_ACCESSED_MASK)) {
161 mark_page_dirty(vcpu->kvm, table_gfn);
162 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
163 index, pte, pte|PT_ACCESSED_MASK))
165 pte |= PT_ACCESSED_MASK;
168 if (walker->level == PT_PAGE_TABLE_LEVEL) {
169 walker->gfn = gpte_to_gfn(pte);
173 if (walker->level == PT_DIRECTORY_LEVEL
174 && (pte & PT_PAGE_SIZE_MASK)
175 && (PTTYPE == 64 || is_pse(vcpu))) {
176 walker->gfn = gpte_to_gfn_pde(pte);
177 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
178 if (PTTYPE == 32 && is_cpuid_PSE36())
179 walker->gfn += pse36_gfn_delta(pte);
183 walker->inherited_ar &= pte;
187 if (write_fault && !is_dirty_pte(pte)) {
190 mark_page_dirty(vcpu->kvm, table_gfn);
191 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
195 pte |= PT_DIRTY_MASK;
196 kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte));
200 pgprintk("%s: pte %llx\n", __FUNCTION__, (u64)pte);
204 walker->error_code = 0;
208 walker->error_code = PFERR_PRESENT_MASK;
212 walker->error_code |= PFERR_WRITE_MASK;
214 walker->error_code |= PFERR_USER_MASK;
216 walker->error_code |= PFERR_FETCH_MASK;
220 static void FNAME(set_pte)(struct kvm_vcpu *vcpu, pt_element_t gpte,
221 u64 *shadow_pte, u64 access_bits,
222 int user_fault, int write_fault,
223 int *ptwrite, struct guest_walker *walker,
226 int dirty = gpte & PT_DIRTY_MASK;
228 int was_rmapped = is_rmap_pte(*shadow_pte);
231 pgprintk("%s: spte %llx gpte %llx access %llx write_fault %d"
232 " user_fault %d gfn %lx\n",
233 __FUNCTION__, *shadow_pte, (u64)gpte, access_bits,
234 write_fault, user_fault, gfn);
238 * We don't set the accessed bit, since we sometimes want to see
239 * whether the guest actually used the pte (in order to detect
242 spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
243 spte |= gpte & PT64_NX_MASK;
245 access_bits &= ~PT_WRITABLE_MASK;
247 page = gfn_to_page(vcpu->kvm, gfn);
249 spte |= PT_PRESENT_MASK;
250 if (access_bits & PT_USER_MASK)
251 spte |= PT_USER_MASK;
253 if (is_error_page(page)) {
254 set_shadow_pte(shadow_pte,
255 shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
256 kvm_release_page_clean(page);
260 spte |= page_to_phys(page);
262 if ((access_bits & PT_WRITABLE_MASK)
263 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
264 struct kvm_mmu_page *shadow;
266 spte |= PT_WRITABLE_MASK;
268 mmu_unshadow(vcpu->kvm, gfn);
272 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
274 pgprintk("%s: found shadow page for %lx, marking ro\n",
276 access_bits &= ~PT_WRITABLE_MASK;
277 if (is_writeble_pte(spte)) {
278 spte &= ~PT_WRITABLE_MASK;
279 kvm_x86_ops->tlb_flush(vcpu);
288 if (access_bits & PT_WRITABLE_MASK)
289 mark_page_dirty(vcpu->kvm, gfn);
291 pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
292 set_shadow_pte(shadow_pte, spte);
293 page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
295 rmap_add(vcpu, shadow_pte, gfn);
296 if (!is_rmap_pte(*shadow_pte))
297 kvm_release_page_clean(page);
300 kvm_release_page_clean(page);
301 if (!ptwrite || !*ptwrite)
302 vcpu->last_pte_updated = shadow_pte;
305 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
306 u64 *spte, const void *pte, int bytes,
311 gpte = *(const pt_element_t *)pte;
312 if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
313 if (!offset_in_pte && !is_present_pte(gpte))
314 set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
317 if (bytes < sizeof(pt_element_t))
319 pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
320 FNAME(set_pte)(vcpu, gpte, spte, PT_USER_MASK | PT_WRITABLE_MASK, 0,
321 0, NULL, NULL, gpte_to_gfn(gpte));
325 * Fetch a shadow pte for a specific level in the paging hierarchy.
327 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
328 struct guest_walker *walker,
329 int user_fault, int write_fault, int *ptwrite)
335 if (!is_present_pte(walker->pte))
338 shadow_addr = vcpu->mmu.root_hpa;
339 level = vcpu->mmu.shadow_root_level;
340 if (level == PT32E_ROOT_LEVEL) {
341 shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3];
342 shadow_addr &= PT64_BASE_ADDR_MASK;
347 u32 index = SHADOW_PT_INDEX(addr, level);
348 struct kvm_mmu_page *shadow_page;
352 unsigned hugepage_access = 0;
354 shadow_ent = ((u64 *)__va(shadow_addr)) + index;
355 if (is_shadow_present_pte(*shadow_ent)) {
356 if (level == PT_PAGE_TABLE_LEVEL)
358 shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
362 if (level == PT_PAGE_TABLE_LEVEL)
365 if (level - 1 == PT_PAGE_TABLE_LEVEL
366 && walker->level == PT_DIRECTORY_LEVEL) {
368 hugepage_access = walker->pte;
369 hugepage_access &= PT_USER_MASK | PT_WRITABLE_MASK;
370 if (!is_dirty_pte(walker->pte))
371 hugepage_access &= ~PT_WRITABLE_MASK;
372 hugepage_access >>= PT_WRITABLE_SHIFT;
373 if (walker->pte & PT64_NX_MASK)
374 hugepage_access |= (1 << 2);
375 table_gfn = gpte_to_gfn(walker->pte);
378 table_gfn = walker->table_gfn[level - 2];
380 shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
381 metaphysical, hugepage_access,
383 shadow_addr = __pa(shadow_page->spt);
384 shadow_pte = shadow_addr | PT_PRESENT_MASK | PT_ACCESSED_MASK
385 | PT_WRITABLE_MASK | PT_USER_MASK;
386 *shadow_ent = shadow_pte;
389 FNAME(set_pte)(vcpu, walker->pte, shadow_ent,
390 walker->inherited_ar, user_fault, write_fault,
391 ptwrite, walker, walker->gfn);
397 * Page fault handler. There are several causes for a page fault:
398 * - there is no shadow pte for the guest pte
399 * - write access through a shadow pte marked read only so that we can set
401 * - write access to a shadow pte marked read only so we can update the page
402 * dirty bitmap, when userspace requests it
403 * - mmio access; in this case we will never install a present shadow pte
404 * - normal guest page fault due to the guest pte marked not present, not
405 * writable, or not executable
407 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
408 * a negative value on error.
410 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
413 int write_fault = error_code & PFERR_WRITE_MASK;
414 int user_fault = error_code & PFERR_USER_MASK;
415 int fetch_fault = error_code & PFERR_FETCH_MASK;
416 struct guest_walker walker;
421 pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
422 kvm_mmu_audit(vcpu, "pre page fault");
424 r = mmu_topup_memory_caches(vcpu);
429 * Look up the shadow pte for the faulting address.
431 r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
435 * The page is not mapped by the guest. Let the guest handle it.
438 pgprintk("%s: guest page fault\n", __FUNCTION__);
439 inject_page_fault(vcpu, addr, walker.error_code);
440 vcpu->last_pt_write_count = 0; /* reset fork detector */
444 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
446 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
447 shadow_pte, *shadow_pte, write_pt);
450 vcpu->last_pt_write_count = 0; /* reset fork detector */
453 * mmio: emulate if accessible, otherwise its a guest fault.
455 if (is_io_pte(*shadow_pte))
458 ++vcpu->stat.pf_fixed;
459 kvm_mmu_audit(vcpu, "post page fault (fixed)");
464 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
466 struct guest_walker walker;
467 gpa_t gpa = UNMAPPED_GVA;
470 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0);
473 gpa = gfn_to_gpa(walker.gfn);
474 gpa |= vaddr & ~PAGE_MASK;
480 static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
481 struct kvm_mmu_page *sp)
487 if (sp->role.metaphysical
488 || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
489 nonpaging_prefetch_page(vcpu, sp);
494 offset = sp->role.quadrant << PT64_LEVEL_BITS;
495 page = gfn_to_page(vcpu->kvm, sp->gfn);
496 gpt = kmap_atomic(page, KM_USER0);
497 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
498 if (is_present_pte(gpt[offset + i]))
499 sp->spt[i] = shadow_trap_nonpresent_pte;
501 sp->spt[i] = shadow_notrap_nonpresent_pte;
502 kunmap_atomic(gpt, KM_USER0);
503 kvm_release_page_clean(page);
509 #undef PT_BASE_ADDR_MASK
511 #undef SHADOW_PT_INDEX
513 #undef PT_DIR_BASE_ADDR_MASK
515 #undef PT_MAX_FULL_LEVELS
517 #undef gpte_to_gfn_pde