2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * Derived from arch/arm/kvm/coproc.c:
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Authors: Rusty Russell <rusty@rustcorp.com.au>
8 * Christoffer Dall <c.dall@virtualopensystems.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <linux/bsearch.h>
24 #include <linux/kvm_host.h>
26 #include <linux/printk.h>
27 #include <linux/uaccess.h>
29 #include <asm/cacheflush.h>
30 #include <asm/cputype.h>
31 #include <asm/debug-monitors.h>
33 #include <asm/kvm_arm.h>
34 #include <asm/kvm_asm.h>
35 #include <asm/kvm_coproc.h>
36 #include <asm/kvm_emulate.h>
37 #include <asm/kvm_host.h>
38 #include <asm/kvm_mmu.h>
39 #include <asm/perf_event.h>
40 #include <asm/sysreg.h>
42 #include <trace/events/kvm.h>
49 * All of this file is extremly similar to the ARM coproc.c, but the
50 * types are different. My gut feeling is that it should be pretty
51 * easy to merge, but that would be an ABI breakage -- again. VFP
52 * would also need to be abstracted.
54 * For AArch32, we only take care of what is being trapped. Anything
55 * that has to do with init and userspace access has to go via the
59 static bool read_from_write_only(struct kvm_vcpu *vcpu,
60 struct sys_reg_params *params,
61 const struct sys_reg_desc *r)
63 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
64 print_sys_reg_instr(params);
65 kvm_inject_undefined(vcpu);
69 static bool write_to_read_only(struct kvm_vcpu *vcpu,
70 struct sys_reg_params *params,
71 const struct sys_reg_desc *r)
73 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
74 print_sys_reg_instr(params);
75 kvm_inject_undefined(vcpu);
79 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
80 static u32 cache_levels;
82 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
85 /* Which cache CCSIDR represents depends on CSSELR value. */
86 static u32 get_ccsidr(u32 csselr)
90 /* Make sure noone else changes CSSELR during this! */
92 write_sysreg(csselr, csselr_el1);
94 ccsidr = read_sysreg(ccsidr_el1);
101 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
103 static bool access_dcsw(struct kvm_vcpu *vcpu,
104 struct sys_reg_params *p,
105 const struct sys_reg_desc *r)
108 return read_from_write_only(vcpu, p, r);
110 kvm_set_way_flush(vcpu);
115 * Generic accessor for VM registers. Only called as long as HCR_TVM
116 * is set. If the guest enables the MMU, we stop trapping the VM
117 * sys_regs and leave it in complete control of the caches.
119 static bool access_vm_reg(struct kvm_vcpu *vcpu,
120 struct sys_reg_params *p,
121 const struct sys_reg_desc *r)
123 bool was_enabled = vcpu_has_cache_enabled(vcpu);
125 BUG_ON(!p->is_write);
127 if (!p->is_aarch32) {
128 vcpu_sys_reg(vcpu, r->reg) = p->regval;
131 vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
132 vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
135 kvm_toggle_cache(vcpu, was_enabled);
140 * Trap handler for the GICv3 SGI generation system register.
141 * Forward the request to the VGIC emulation.
142 * The cp15_64 code makes sure this automatically works
143 * for both AArch64 and AArch32 accesses.
145 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
146 struct sys_reg_params *p,
147 const struct sys_reg_desc *r)
150 return read_from_write_only(vcpu, p, r);
152 vgic_v3_dispatch_sgi(vcpu, p->regval);
157 static bool access_gic_sre(struct kvm_vcpu *vcpu,
158 struct sys_reg_params *p,
159 const struct sys_reg_desc *r)
162 return ignore_write(vcpu, p);
164 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
168 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
169 struct sys_reg_params *p,
170 const struct sys_reg_desc *r)
173 return ignore_write(vcpu, p);
175 return read_zero(vcpu, p);
178 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
179 struct sys_reg_params *p,
180 const struct sys_reg_desc *r)
183 return ignore_write(vcpu, p);
185 p->regval = (1 << 3);
190 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
191 struct sys_reg_params *p,
192 const struct sys_reg_desc *r)
195 return ignore_write(vcpu, p);
197 p->regval = read_sysreg(dbgauthstatus_el1);
203 * We want to avoid world-switching all the DBG registers all the
206 * - If we've touched any debug register, it is likely that we're
207 * going to touch more of them. It then makes sense to disable the
208 * traps and start doing the save/restore dance
209 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
210 * then mandatory to save/restore the registers, as the guest
213 * For this, we use a DIRTY bit, indicating the guest has modified the
214 * debug registers, used as follow:
217 * - If the dirty bit is set (because we're coming back from trapping),
218 * disable the traps, save host registers, restore guest registers.
219 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
220 * set the dirty bit, disable the traps, save host registers,
221 * restore guest registers.
222 * - Otherwise, enable the traps
225 * - If the dirty bit is set, save guest registers, restore host
226 * registers and clear the dirty bit. This ensure that the host can
227 * now use the debug registers.
229 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
230 struct sys_reg_params *p,
231 const struct sys_reg_desc *r)
234 vcpu_sys_reg(vcpu, r->reg) = p->regval;
235 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
237 p->regval = vcpu_sys_reg(vcpu, r->reg);
240 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
246 * reg_to_dbg/dbg_to_reg
248 * A 32 bit write to a debug register leave top bits alone
249 * A 32 bit read from a debug register only returns the bottom bits
251 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
252 * hyp.S code switches between host and guest values in future.
254 static void reg_to_dbg(struct kvm_vcpu *vcpu,
255 struct sys_reg_params *p,
262 val |= ((*dbg_reg >> 32) << 32);
266 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
269 static void dbg_to_reg(struct kvm_vcpu *vcpu,
270 struct sys_reg_params *p,
273 p->regval = *dbg_reg;
275 p->regval &= 0xffffffffUL;
278 static bool trap_bvr(struct kvm_vcpu *vcpu,
279 struct sys_reg_params *p,
280 const struct sys_reg_desc *rd)
282 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
285 reg_to_dbg(vcpu, p, dbg_reg);
287 dbg_to_reg(vcpu, p, dbg_reg);
289 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
294 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
295 const struct kvm_one_reg *reg, void __user *uaddr)
297 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
299 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
304 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
305 const struct kvm_one_reg *reg, void __user *uaddr)
307 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
309 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
314 static void reset_bvr(struct kvm_vcpu *vcpu,
315 const struct sys_reg_desc *rd)
317 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
320 static bool trap_bcr(struct kvm_vcpu *vcpu,
321 struct sys_reg_params *p,
322 const struct sys_reg_desc *rd)
324 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
327 reg_to_dbg(vcpu, p, dbg_reg);
329 dbg_to_reg(vcpu, p, dbg_reg);
331 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
336 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
337 const struct kvm_one_reg *reg, void __user *uaddr)
339 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
341 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
347 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
348 const struct kvm_one_reg *reg, void __user *uaddr)
350 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
352 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
357 static void reset_bcr(struct kvm_vcpu *vcpu,
358 const struct sys_reg_desc *rd)
360 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
363 static bool trap_wvr(struct kvm_vcpu *vcpu,
364 struct sys_reg_params *p,
365 const struct sys_reg_desc *rd)
367 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
370 reg_to_dbg(vcpu, p, dbg_reg);
372 dbg_to_reg(vcpu, p, dbg_reg);
374 trace_trap_reg(__func__, rd->reg, p->is_write,
375 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
380 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
381 const struct kvm_one_reg *reg, void __user *uaddr)
383 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
385 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
390 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
391 const struct kvm_one_reg *reg, void __user *uaddr)
393 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
395 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
400 static void reset_wvr(struct kvm_vcpu *vcpu,
401 const struct sys_reg_desc *rd)
403 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
406 static bool trap_wcr(struct kvm_vcpu *vcpu,
407 struct sys_reg_params *p,
408 const struct sys_reg_desc *rd)
410 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
413 reg_to_dbg(vcpu, p, dbg_reg);
415 dbg_to_reg(vcpu, p, dbg_reg);
417 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
422 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
423 const struct kvm_one_reg *reg, void __user *uaddr)
425 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
427 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
432 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
433 const struct kvm_one_reg *reg, void __user *uaddr)
435 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
437 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
442 static void reset_wcr(struct kvm_vcpu *vcpu,
443 const struct sys_reg_desc *rd)
445 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
448 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
450 vcpu_sys_reg(vcpu, AMAIR_EL1) = read_sysreg(amair_el1);
453 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
458 * Map the vcpu_id into the first three affinity level fields of
459 * the MPIDR. We limit the number of VCPUs in level 0 due to a
460 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
461 * of the GICv3 to be able to address each CPU directly when
464 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
465 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
466 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
467 vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
470 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
474 pmcr = read_sysreg(pmcr_el0);
476 * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
477 * except PMCR.E resetting to zero.
479 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
480 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
481 vcpu_sys_reg(vcpu, PMCR_EL0) = val;
484 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
486 u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
487 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
490 kvm_inject_undefined(vcpu);
495 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
497 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
500 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
502 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
505 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
507 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
510 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
512 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
515 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
516 const struct sys_reg_desc *r)
520 if (!kvm_arm_pmu_v3_ready(vcpu))
521 return trap_raz_wi(vcpu, p, r);
523 if (pmu_access_el0_disabled(vcpu))
527 /* Only update writeable bits of PMCR */
528 val = vcpu_sys_reg(vcpu, PMCR_EL0);
529 val &= ~ARMV8_PMU_PMCR_MASK;
530 val |= p->regval & ARMV8_PMU_PMCR_MASK;
531 vcpu_sys_reg(vcpu, PMCR_EL0) = val;
532 kvm_pmu_handle_pmcr(vcpu, val);
534 /* PMCR.P & PMCR.C are RAZ */
535 val = vcpu_sys_reg(vcpu, PMCR_EL0)
536 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
543 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
544 const struct sys_reg_desc *r)
546 if (!kvm_arm_pmu_v3_ready(vcpu))
547 return trap_raz_wi(vcpu, p, r);
549 if (pmu_access_event_counter_el0_disabled(vcpu))
553 vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
555 /* return PMSELR.SEL field */
556 p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0)
557 & ARMV8_PMU_COUNTER_MASK;
562 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
563 const struct sys_reg_desc *r)
567 if (!kvm_arm_pmu_v3_ready(vcpu))
568 return trap_raz_wi(vcpu, p, r);
572 if (pmu_access_el0_disabled(vcpu))
576 pmceid = read_sysreg(pmceid0_el0);
578 pmceid = read_sysreg(pmceid1_el0);
585 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
589 pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);
590 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
591 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
592 kvm_inject_undefined(vcpu);
599 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
600 struct sys_reg_params *p,
601 const struct sys_reg_desc *r)
605 if (!kvm_arm_pmu_v3_ready(vcpu))
606 return trap_raz_wi(vcpu, p, r);
608 if (r->CRn == 9 && r->CRm == 13) {
611 if (pmu_access_event_counter_el0_disabled(vcpu))
614 idx = vcpu_sys_reg(vcpu, PMSELR_EL0)
615 & ARMV8_PMU_COUNTER_MASK;
616 } else if (r->Op2 == 0) {
618 if (pmu_access_cycle_counter_el0_disabled(vcpu))
621 idx = ARMV8_PMU_CYCLE_IDX;
625 } else if (r->CRn == 0 && r->CRm == 9) {
627 if (pmu_access_event_counter_el0_disabled(vcpu))
630 idx = ARMV8_PMU_CYCLE_IDX;
631 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
633 if (pmu_access_event_counter_el0_disabled(vcpu))
636 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
641 if (!pmu_counter_idx_valid(vcpu, idx))
645 if (pmu_access_el0_disabled(vcpu))
648 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
650 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
656 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
657 const struct sys_reg_desc *r)
661 if (!kvm_arm_pmu_v3_ready(vcpu))
662 return trap_raz_wi(vcpu, p, r);
664 if (pmu_access_el0_disabled(vcpu))
667 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
669 idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
670 reg = PMEVTYPER0_EL0 + idx;
671 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
672 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
673 if (idx == ARMV8_PMU_CYCLE_IDX)
677 reg = PMEVTYPER0_EL0 + idx;
682 if (!pmu_counter_idx_valid(vcpu, idx))
686 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
687 vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
689 p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
695 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
696 const struct sys_reg_desc *r)
700 if (!kvm_arm_pmu_v3_ready(vcpu))
701 return trap_raz_wi(vcpu, p, r);
703 if (pmu_access_el0_disabled(vcpu))
706 mask = kvm_pmu_valid_counter_mask(vcpu);
708 val = p->regval & mask;
710 /* accessing PMCNTENSET_EL0 */
711 vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
712 kvm_pmu_enable_counter(vcpu, val);
714 /* accessing PMCNTENCLR_EL0 */
715 vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
716 kvm_pmu_disable_counter(vcpu, val);
719 p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
725 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
726 const struct sys_reg_desc *r)
728 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
730 if (!kvm_arm_pmu_v3_ready(vcpu))
731 return trap_raz_wi(vcpu, p, r);
733 if (!vcpu_mode_priv(vcpu)) {
734 kvm_inject_undefined(vcpu);
739 u64 val = p->regval & mask;
742 /* accessing PMINTENSET_EL1 */
743 vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
745 /* accessing PMINTENCLR_EL1 */
746 vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
748 p->regval = vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
754 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
755 const struct sys_reg_desc *r)
757 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
759 if (!kvm_arm_pmu_v3_ready(vcpu))
760 return trap_raz_wi(vcpu, p, r);
762 if (pmu_access_el0_disabled(vcpu))
767 /* accessing PMOVSSET_EL0 */
768 vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
770 /* accessing PMOVSCLR_EL0 */
771 vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
773 p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
779 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
780 const struct sys_reg_desc *r)
784 if (!kvm_arm_pmu_v3_ready(vcpu))
785 return trap_raz_wi(vcpu, p, r);
788 return read_from_write_only(vcpu, p, r);
790 if (pmu_write_swinc_el0_disabled(vcpu))
793 mask = kvm_pmu_valid_counter_mask(vcpu);
794 kvm_pmu_software_increment(vcpu, p->regval & mask);
798 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
799 const struct sys_reg_desc *r)
801 if (!kvm_arm_pmu_v3_ready(vcpu))
802 return trap_raz_wi(vcpu, p, r);
805 if (!vcpu_mode_priv(vcpu)) {
806 kvm_inject_undefined(vcpu);
810 vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval
811 & ARMV8_PMU_USERENR_MASK;
813 p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0)
814 & ARMV8_PMU_USERENR_MASK;
820 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
821 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
822 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
823 trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
824 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
825 trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
826 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
827 trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
828 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
829 trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
831 /* Macro to expand the PMEVCNTRn_EL0 register */
832 #define PMU_PMEVCNTR_EL0(n) \
833 { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
834 access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
836 /* Macro to expand the PMEVTYPERn_EL0 register */
837 #define PMU_PMEVTYPER_EL0(n) \
838 { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
839 access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
841 static bool access_cntp_tval(struct kvm_vcpu *vcpu,
842 struct sys_reg_params *p,
843 const struct sys_reg_desc *r)
845 u64 now = kvm_phys_timer_read();
849 kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL,
852 cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
853 p->regval = cval - now;
859 static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
860 struct sys_reg_params *p,
861 const struct sys_reg_desc *r)
864 kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval);
866 p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
871 static bool access_cntp_cval(struct kvm_vcpu *vcpu,
872 struct sys_reg_params *p,
873 const struct sys_reg_desc *r)
876 kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval);
878 p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
883 /* Read a sanitised cpufeature ID register by sys_reg_desc */
884 static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
886 u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
887 (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
888 u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
890 if (id == SYS_ID_AA64PFR0_EL1) {
891 if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT))
892 pr_err_once("kvm [%i]: SVE unsupported for guests, suppressing\n",
893 task_pid_nr(current));
895 val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
901 /* cpufeature ID register access trap handlers */
903 static bool __access_id_reg(struct kvm_vcpu *vcpu,
904 struct sys_reg_params *p,
905 const struct sys_reg_desc *r,
909 return write_to_read_only(vcpu, p, r);
911 p->regval = read_id_reg(r, raz);
915 static bool access_id_reg(struct kvm_vcpu *vcpu,
916 struct sys_reg_params *p,
917 const struct sys_reg_desc *r)
919 return __access_id_reg(vcpu, p, r, false);
922 static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
923 struct sys_reg_params *p,
924 const struct sys_reg_desc *r)
926 return __access_id_reg(vcpu, p, r, true);
929 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
930 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
931 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
934 * cpufeature ID register user accessors
936 * For now, these registers are immutable for userspace, so no values
937 * are stored, and for set_id_reg() we don't allow the effective value
940 static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
943 const u64 id = sys_reg_to_index(rd);
944 const u64 val = read_id_reg(rd, raz);
946 return reg_to_user(uaddr, &val, id);
949 static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
952 const u64 id = sys_reg_to_index(rd);
956 err = reg_from_user(&val, uaddr, id);
960 /* This is what we mean by invariant: you can't change it. */
961 if (val != read_id_reg(rd, raz))
967 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
968 const struct kvm_one_reg *reg, void __user *uaddr)
970 return __get_id_reg(rd, uaddr, false);
973 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
974 const struct kvm_one_reg *reg, void __user *uaddr)
976 return __set_id_reg(rd, uaddr, false);
979 static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
980 const struct kvm_one_reg *reg, void __user *uaddr)
982 return __get_id_reg(rd, uaddr, true);
985 static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
986 const struct kvm_one_reg *reg, void __user *uaddr)
988 return __set_id_reg(rd, uaddr, true);
991 /* sys_reg_desc initialiser for known cpufeature ID registers */
992 #define ID_SANITISED(name) { \
993 SYS_DESC(SYS_##name), \
994 .access = access_id_reg, \
995 .get_user = get_id_reg, \
996 .set_user = set_id_reg, \
1000 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1001 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1002 * (1 <= crm < 8, 0 <= Op2 < 8).
1004 #define ID_UNALLOCATED(crm, op2) { \
1005 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1006 .access = access_raz_id_reg, \
1007 .get_user = get_raz_id_reg, \
1008 .set_user = set_raz_id_reg, \
1012 * sys_reg_desc initialiser for known ID registers that we hide from guests.
1013 * For now, these are exposed just like unallocated ID regs: they appear
1014 * RAZ for the guest.
1016 #define ID_HIDDEN(name) { \
1017 SYS_DESC(SYS_##name), \
1018 .access = access_raz_id_reg, \
1019 .get_user = get_raz_id_reg, \
1020 .set_user = set_raz_id_reg, \
1024 * Architected system registers.
1025 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1027 * Debug handling: We do trap most, if not all debug related system
1028 * registers. The implementation is good enough to ensure that a guest
1029 * can use these with minimal performance degradation. The drawback is
1030 * that we don't implement any of the external debug, none of the
1031 * OSlock protocol. This should be revisited if we ever encounter a
1032 * more demanding guest...
1034 static const struct sys_reg_desc sys_reg_descs[] = {
1035 { SYS_DESC(SYS_DC_ISW), access_dcsw },
1036 { SYS_DESC(SYS_DC_CSW), access_dcsw },
1037 { SYS_DESC(SYS_DC_CISW), access_dcsw },
1039 DBG_BCR_BVR_WCR_WVR_EL1(0),
1040 DBG_BCR_BVR_WCR_WVR_EL1(1),
1041 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1042 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1043 DBG_BCR_BVR_WCR_WVR_EL1(2),
1044 DBG_BCR_BVR_WCR_WVR_EL1(3),
1045 DBG_BCR_BVR_WCR_WVR_EL1(4),
1046 DBG_BCR_BVR_WCR_WVR_EL1(5),
1047 DBG_BCR_BVR_WCR_WVR_EL1(6),
1048 DBG_BCR_BVR_WCR_WVR_EL1(7),
1049 DBG_BCR_BVR_WCR_WVR_EL1(8),
1050 DBG_BCR_BVR_WCR_WVR_EL1(9),
1051 DBG_BCR_BVR_WCR_WVR_EL1(10),
1052 DBG_BCR_BVR_WCR_WVR_EL1(11),
1053 DBG_BCR_BVR_WCR_WVR_EL1(12),
1054 DBG_BCR_BVR_WCR_WVR_EL1(13),
1055 DBG_BCR_BVR_WCR_WVR_EL1(14),
1056 DBG_BCR_BVR_WCR_WVR_EL1(15),
1058 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1059 { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
1060 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
1061 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1062 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1063 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1064 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1065 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1067 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1068 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1069 // DBGDTR[TR]X_EL0 share the same encoding
1070 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1072 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1074 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1077 * ID regs: all ID_SANITISED() entries here must have corresponding
1078 * entries in arm64_ftr_regs[].
1081 /* AArch64 mappings of the AArch32 ID registers */
1083 ID_SANITISED(ID_PFR0_EL1),
1084 ID_SANITISED(ID_PFR1_EL1),
1085 ID_SANITISED(ID_DFR0_EL1),
1086 ID_HIDDEN(ID_AFR0_EL1),
1087 ID_SANITISED(ID_MMFR0_EL1),
1088 ID_SANITISED(ID_MMFR1_EL1),
1089 ID_SANITISED(ID_MMFR2_EL1),
1090 ID_SANITISED(ID_MMFR3_EL1),
1093 ID_SANITISED(ID_ISAR0_EL1),
1094 ID_SANITISED(ID_ISAR1_EL1),
1095 ID_SANITISED(ID_ISAR2_EL1),
1096 ID_SANITISED(ID_ISAR3_EL1),
1097 ID_SANITISED(ID_ISAR4_EL1),
1098 ID_SANITISED(ID_ISAR5_EL1),
1099 ID_SANITISED(ID_MMFR4_EL1),
1100 ID_UNALLOCATED(2,7),
1103 ID_SANITISED(MVFR0_EL1),
1104 ID_SANITISED(MVFR1_EL1),
1105 ID_SANITISED(MVFR2_EL1),
1106 ID_UNALLOCATED(3,3),
1107 ID_UNALLOCATED(3,4),
1108 ID_UNALLOCATED(3,5),
1109 ID_UNALLOCATED(3,6),
1110 ID_UNALLOCATED(3,7),
1112 /* AArch64 ID registers */
1114 ID_SANITISED(ID_AA64PFR0_EL1),
1115 ID_SANITISED(ID_AA64PFR1_EL1),
1116 ID_UNALLOCATED(4,2),
1117 ID_UNALLOCATED(4,3),
1118 ID_UNALLOCATED(4,4),
1119 ID_UNALLOCATED(4,5),
1120 ID_UNALLOCATED(4,6),
1121 ID_UNALLOCATED(4,7),
1124 ID_SANITISED(ID_AA64DFR0_EL1),
1125 ID_SANITISED(ID_AA64DFR1_EL1),
1126 ID_UNALLOCATED(5,2),
1127 ID_UNALLOCATED(5,3),
1128 ID_HIDDEN(ID_AA64AFR0_EL1),
1129 ID_HIDDEN(ID_AA64AFR1_EL1),
1130 ID_UNALLOCATED(5,6),
1131 ID_UNALLOCATED(5,7),
1134 ID_SANITISED(ID_AA64ISAR0_EL1),
1135 ID_SANITISED(ID_AA64ISAR1_EL1),
1136 ID_UNALLOCATED(6,2),
1137 ID_UNALLOCATED(6,3),
1138 ID_UNALLOCATED(6,4),
1139 ID_UNALLOCATED(6,5),
1140 ID_UNALLOCATED(6,6),
1141 ID_UNALLOCATED(6,7),
1144 ID_SANITISED(ID_AA64MMFR0_EL1),
1145 ID_SANITISED(ID_AA64MMFR1_EL1),
1146 ID_SANITISED(ID_AA64MMFR2_EL1),
1147 ID_UNALLOCATED(7,3),
1148 ID_UNALLOCATED(7,4),
1149 ID_UNALLOCATED(7,5),
1150 ID_UNALLOCATED(7,6),
1151 ID_UNALLOCATED(7,7),
1153 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1154 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1155 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1156 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1157 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1159 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1160 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1161 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1162 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1163 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1165 { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
1166 { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
1168 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1169 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1171 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1173 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1174 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1175 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1176 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1177 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1178 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1179 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1180 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1181 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1182 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1184 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1185 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1187 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1189 { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
1191 { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
1192 { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
1193 { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
1194 { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
1195 { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
1196 { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
1197 { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
1198 { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
1199 { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
1200 { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
1201 { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
1203 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
1204 * in 32bit mode. Here we choose to reset it as zero for consistency.
1206 { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
1207 { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
1209 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1210 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1212 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval },
1213 { SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl },
1214 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval },
1217 PMU_PMEVCNTR_EL0(0),
1218 PMU_PMEVCNTR_EL0(1),
1219 PMU_PMEVCNTR_EL0(2),
1220 PMU_PMEVCNTR_EL0(3),
1221 PMU_PMEVCNTR_EL0(4),
1222 PMU_PMEVCNTR_EL0(5),
1223 PMU_PMEVCNTR_EL0(6),
1224 PMU_PMEVCNTR_EL0(7),
1225 PMU_PMEVCNTR_EL0(8),
1226 PMU_PMEVCNTR_EL0(9),
1227 PMU_PMEVCNTR_EL0(10),
1228 PMU_PMEVCNTR_EL0(11),
1229 PMU_PMEVCNTR_EL0(12),
1230 PMU_PMEVCNTR_EL0(13),
1231 PMU_PMEVCNTR_EL0(14),
1232 PMU_PMEVCNTR_EL0(15),
1233 PMU_PMEVCNTR_EL0(16),
1234 PMU_PMEVCNTR_EL0(17),
1235 PMU_PMEVCNTR_EL0(18),
1236 PMU_PMEVCNTR_EL0(19),
1237 PMU_PMEVCNTR_EL0(20),
1238 PMU_PMEVCNTR_EL0(21),
1239 PMU_PMEVCNTR_EL0(22),
1240 PMU_PMEVCNTR_EL0(23),
1241 PMU_PMEVCNTR_EL0(24),
1242 PMU_PMEVCNTR_EL0(25),
1243 PMU_PMEVCNTR_EL0(26),
1244 PMU_PMEVCNTR_EL0(27),
1245 PMU_PMEVCNTR_EL0(28),
1246 PMU_PMEVCNTR_EL0(29),
1247 PMU_PMEVCNTR_EL0(30),
1248 /* PMEVTYPERn_EL0 */
1249 PMU_PMEVTYPER_EL0(0),
1250 PMU_PMEVTYPER_EL0(1),
1251 PMU_PMEVTYPER_EL0(2),
1252 PMU_PMEVTYPER_EL0(3),
1253 PMU_PMEVTYPER_EL0(4),
1254 PMU_PMEVTYPER_EL0(5),
1255 PMU_PMEVTYPER_EL0(6),
1256 PMU_PMEVTYPER_EL0(7),
1257 PMU_PMEVTYPER_EL0(8),
1258 PMU_PMEVTYPER_EL0(9),
1259 PMU_PMEVTYPER_EL0(10),
1260 PMU_PMEVTYPER_EL0(11),
1261 PMU_PMEVTYPER_EL0(12),
1262 PMU_PMEVTYPER_EL0(13),
1263 PMU_PMEVTYPER_EL0(14),
1264 PMU_PMEVTYPER_EL0(15),
1265 PMU_PMEVTYPER_EL0(16),
1266 PMU_PMEVTYPER_EL0(17),
1267 PMU_PMEVTYPER_EL0(18),
1268 PMU_PMEVTYPER_EL0(19),
1269 PMU_PMEVTYPER_EL0(20),
1270 PMU_PMEVTYPER_EL0(21),
1271 PMU_PMEVTYPER_EL0(22),
1272 PMU_PMEVTYPER_EL0(23),
1273 PMU_PMEVTYPER_EL0(24),
1274 PMU_PMEVTYPER_EL0(25),
1275 PMU_PMEVTYPER_EL0(26),
1276 PMU_PMEVTYPER_EL0(27),
1277 PMU_PMEVTYPER_EL0(28),
1278 PMU_PMEVTYPER_EL0(29),
1279 PMU_PMEVTYPER_EL0(30),
1281 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
1282 * in 32bit mode. Here we choose to reset it as zero for consistency.
1284 { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
1286 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1287 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1288 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x70 },
1291 static bool trap_dbgidr(struct kvm_vcpu *vcpu,
1292 struct sys_reg_params *p,
1293 const struct sys_reg_desc *r)
1296 return ignore_write(vcpu, p);
1298 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1299 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1300 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
1302 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
1303 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
1304 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
1305 | (6 << 16) | (el3 << 14) | (el3 << 12));
1310 static bool trap_debug32(struct kvm_vcpu *vcpu,
1311 struct sys_reg_params *p,
1312 const struct sys_reg_desc *r)
1315 vcpu_cp14(vcpu, r->reg) = p->regval;
1316 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
1318 p->regval = vcpu_cp14(vcpu, r->reg);
1324 /* AArch32 debug register mappings
1326 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1327 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1329 * All control registers and watchpoint value registers are mapped to
1330 * the lower 32 bits of their AArch64 equivalents. We share the trap
1331 * handlers with the above AArch64 code which checks what mode the
1335 static bool trap_xvr(struct kvm_vcpu *vcpu,
1336 struct sys_reg_params *p,
1337 const struct sys_reg_desc *rd)
1339 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
1344 val &= 0xffffffffUL;
1345 val |= p->regval << 32;
1348 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
1350 p->regval = *dbg_reg >> 32;
1353 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
1358 #define DBG_BCR_BVR_WCR_WVR(n) \
1360 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1362 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1364 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1366 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1368 #define DBGBXVR(n) \
1369 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
1372 * Trapped cp14 registers. We generally ignore most of the external
1373 * debug, on the principle that they don't really make sense to a
1374 * guest. Revisit this one day, would this principle change.
1376 static const struct sys_reg_desc cp14_regs[] = {
1378 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
1380 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1382 DBG_BCR_BVR_WCR_WVR(0),
1384 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1385 DBG_BCR_BVR_WCR_WVR(1),
1387 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
1389 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
1390 DBG_BCR_BVR_WCR_WVR(2),
1391 /* DBGDTR[RT]Xint */
1392 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1393 /* DBGDTR[RT]Xext */
1394 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1395 DBG_BCR_BVR_WCR_WVR(3),
1396 DBG_BCR_BVR_WCR_WVR(4),
1397 DBG_BCR_BVR_WCR_WVR(5),
1399 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1401 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1402 DBG_BCR_BVR_WCR_WVR(6),
1404 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
1405 DBG_BCR_BVR_WCR_WVR(7),
1406 DBG_BCR_BVR_WCR_WVR(8),
1407 DBG_BCR_BVR_WCR_WVR(9),
1408 DBG_BCR_BVR_WCR_WVR(10),
1409 DBG_BCR_BVR_WCR_WVR(11),
1410 DBG_BCR_BVR_WCR_WVR(12),
1411 DBG_BCR_BVR_WCR_WVR(13),
1412 DBG_BCR_BVR_WCR_WVR(14),
1413 DBG_BCR_BVR_WCR_WVR(15),
1415 /* DBGDRAR (32bit) */
1416 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
1420 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
1423 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
1427 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
1430 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
1443 /* DBGDSAR (32bit) */
1444 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
1447 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
1449 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
1451 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
1453 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
1455 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
1457 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
1460 /* Trapped cp14 64bit registers */
1461 static const struct sys_reg_desc cp14_64_regs[] = {
1462 /* DBGDRAR (64bit) */
1463 { Op1( 0), CRm( 1), .access = trap_raz_wi },
1465 /* DBGDSAR (64bit) */
1466 { Op1( 0), CRm( 2), .access = trap_raz_wi },
1469 /* Macro to expand the PMEVCNTRn register */
1470 #define PMU_PMEVCNTR(n) \
1472 { Op1(0), CRn(0b1110), \
1473 CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1476 /* Macro to expand the PMEVTYPERn register */
1477 #define PMU_PMEVTYPER(n) \
1479 { Op1(0), CRn(0b1110), \
1480 CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
1481 access_pmu_evtyper }
1484 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
1485 * depending on the way they are accessed (as a 32bit or a 64bit
1488 static const struct sys_reg_desc cp15_regs[] = {
1489 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1491 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
1492 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1493 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
1494 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
1495 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
1496 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
1497 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
1498 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
1499 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
1500 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
1501 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
1504 * DC{C,I,CI}SW operations:
1506 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
1507 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
1508 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
1511 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
1512 { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
1513 { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
1514 { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
1515 { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
1516 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
1517 { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
1518 { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
1519 { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
1520 { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
1521 { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
1522 { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
1523 { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
1524 { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
1525 { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
1527 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
1528 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
1529 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
1530 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
1533 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
1535 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
1602 { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
1605 static const struct sys_reg_desc cp15_64_regs[] = {
1606 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1607 { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
1608 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
1609 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
1612 /* Target specific emulation tables */
1613 static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
1615 void kvm_register_target_sys_reg_table(unsigned int target,
1616 struct kvm_sys_reg_target_table *table)
1618 target_tables[target] = table;
1621 /* Get specific register table for this target. */
1622 static const struct sys_reg_desc *get_target_table(unsigned target,
1626 struct kvm_sys_reg_target_table *table;
1628 table = target_tables[target];
1630 *num = table->table64.num;
1631 return table->table64.table;
1633 *num = table->table32.num;
1634 return table->table32.table;
1638 #define reg_to_match_value(x) \
1640 unsigned long val; \
1641 val = (x)->Op0 << 14; \
1642 val |= (x)->Op1 << 11; \
1643 val |= (x)->CRn << 7; \
1644 val |= (x)->CRm << 3; \
1649 static int match_sys_reg(const void *key, const void *elt)
1651 const unsigned long pval = (unsigned long)key;
1652 const struct sys_reg_desc *r = elt;
1654 return pval - reg_to_match_value(r);
1657 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
1658 const struct sys_reg_desc table[],
1661 unsigned long pval = reg_to_match_value(params);
1663 return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
1666 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
1668 kvm_inject_undefined(vcpu);
1672 static void perform_access(struct kvm_vcpu *vcpu,
1673 struct sys_reg_params *params,
1674 const struct sys_reg_desc *r)
1677 * Not having an accessor means that we have configured a trap
1678 * that we don't know how to handle. This certainly qualifies
1679 * as a gross bug that should be fixed right away.
1683 /* Skip instruction if instructed so */
1684 if (likely(r->access(vcpu, params, r)))
1685 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1689 * emulate_cp -- tries to match a sys_reg access in a handling table, and
1690 * call the corresponding trap handler.
1692 * @params: pointer to the descriptor of the access
1693 * @table: array of trap descriptors
1694 * @num: size of the trap descriptor array
1696 * Return 0 if the access has been handled, and -1 if not.
1698 static int emulate_cp(struct kvm_vcpu *vcpu,
1699 struct sys_reg_params *params,
1700 const struct sys_reg_desc *table,
1703 const struct sys_reg_desc *r;
1706 return -1; /* Not handled */
1708 r = find_reg(params, table, num);
1711 perform_access(vcpu, params, r);
1719 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
1720 struct sys_reg_params *params)
1722 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
1726 case ESR_ELx_EC_CP15_32:
1727 case ESR_ELx_EC_CP15_64:
1730 case ESR_ELx_EC_CP14_MR:
1731 case ESR_ELx_EC_CP14_64:
1738 kvm_err("Unsupported guest CP%d access at: %08lx\n",
1739 cp, *vcpu_pc(vcpu));
1740 print_sys_reg_instr(params);
1741 kvm_inject_undefined(vcpu);
1745 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
1746 * @vcpu: The VCPU pointer
1747 * @run: The kvm_run struct
1749 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1750 const struct sys_reg_desc *global,
1752 const struct sys_reg_desc *target_specific,
1755 struct sys_reg_params params;
1756 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1757 int Rt = kvm_vcpu_sys_get_rt(vcpu);
1758 int Rt2 = (hsr >> 10) & 0x1f;
1760 params.is_aarch32 = true;
1761 params.is_32bit = false;
1762 params.CRm = (hsr >> 1) & 0xf;
1763 params.is_write = ((hsr & 1) == 0);
1766 params.Op1 = (hsr >> 16) & 0xf;
1771 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
1772 * backends between AArch32 and AArch64, we get away with it.
1774 if (params.is_write) {
1775 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
1776 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
1780 * Try to emulate the coprocessor access using the target
1781 * specific table first, and using the global table afterwards.
1782 * If either of the tables contains a handler, handle the
1783 * potential register operation in the case of a read and return
1786 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) ||
1787 !emulate_cp(vcpu, ¶ms, global, nr_global)) {
1788 /* Split up the value between registers for the read side */
1789 if (!params.is_write) {
1790 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
1791 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
1797 unhandled_cp_access(vcpu, ¶ms);
1802 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
1803 * @vcpu: The VCPU pointer
1804 * @run: The kvm_run struct
1806 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
1807 const struct sys_reg_desc *global,
1809 const struct sys_reg_desc *target_specific,
1812 struct sys_reg_params params;
1813 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1814 int Rt = kvm_vcpu_sys_get_rt(vcpu);
1816 params.is_aarch32 = true;
1817 params.is_32bit = true;
1818 params.CRm = (hsr >> 1) & 0xf;
1819 params.regval = vcpu_get_reg(vcpu, Rt);
1820 params.is_write = ((hsr & 1) == 0);
1821 params.CRn = (hsr >> 10) & 0xf;
1823 params.Op1 = (hsr >> 14) & 0x7;
1824 params.Op2 = (hsr >> 17) & 0x7;
1826 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) ||
1827 !emulate_cp(vcpu, ¶ms, global, nr_global)) {
1828 if (!params.is_write)
1829 vcpu_set_reg(vcpu, Rt, params.regval);
1833 unhandled_cp_access(vcpu, ¶ms);
1837 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1839 const struct sys_reg_desc *target_specific;
1842 target_specific = get_target_table(vcpu->arch.target, false, &num);
1843 return kvm_handle_cp_64(vcpu,
1844 cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
1845 target_specific, num);
1848 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1850 const struct sys_reg_desc *target_specific;
1853 target_specific = get_target_table(vcpu->arch.target, false, &num);
1854 return kvm_handle_cp_32(vcpu,
1855 cp15_regs, ARRAY_SIZE(cp15_regs),
1856 target_specific, num);
1859 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
1861 return kvm_handle_cp_64(vcpu,
1862 cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
1866 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1868 return kvm_handle_cp_32(vcpu,
1869 cp14_regs, ARRAY_SIZE(cp14_regs),
1873 static int emulate_sys_reg(struct kvm_vcpu *vcpu,
1874 struct sys_reg_params *params)
1877 const struct sys_reg_desc *table, *r;
1879 table = get_target_table(vcpu->arch.target, true, &num);
1881 /* Search target-specific then generic table. */
1882 r = find_reg(params, table, num);
1884 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1887 perform_access(vcpu, params, r);
1889 kvm_err("Unsupported guest sys_reg access at: %lx\n",
1891 print_sys_reg_instr(params);
1892 kvm_inject_undefined(vcpu);
1897 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
1898 const struct sys_reg_desc *table, size_t num)
1902 for (i = 0; i < num; i++)
1904 table[i].reset(vcpu, &table[i]);
1908 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
1909 * @vcpu: The VCPU pointer
1910 * @run: The kvm_run struct
1912 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1914 struct sys_reg_params params;
1915 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
1916 int Rt = kvm_vcpu_sys_get_rt(vcpu);
1919 trace_kvm_handle_sys_reg(esr);
1921 params.is_aarch32 = false;
1922 params.is_32bit = false;
1923 params.Op0 = (esr >> 20) & 3;
1924 params.Op1 = (esr >> 14) & 0x7;
1925 params.CRn = (esr >> 10) & 0xf;
1926 params.CRm = (esr >> 1) & 0xf;
1927 params.Op2 = (esr >> 17) & 0x7;
1928 params.regval = vcpu_get_reg(vcpu, Rt);
1929 params.is_write = !(esr & 1);
1931 ret = emulate_sys_reg(vcpu, ¶ms);
1933 if (!params.is_write)
1934 vcpu_set_reg(vcpu, Rt, params.regval);
1938 /******************************************************************************
1940 *****************************************************************************/
1942 static bool index_to_params(u64 id, struct sys_reg_params *params)
1944 switch (id & KVM_REG_SIZE_MASK) {
1945 case KVM_REG_SIZE_U64:
1946 /* Any unused index bits means it's not valid. */
1947 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
1948 | KVM_REG_ARM_COPROC_MASK
1949 | KVM_REG_ARM64_SYSREG_OP0_MASK
1950 | KVM_REG_ARM64_SYSREG_OP1_MASK
1951 | KVM_REG_ARM64_SYSREG_CRN_MASK
1952 | KVM_REG_ARM64_SYSREG_CRM_MASK
1953 | KVM_REG_ARM64_SYSREG_OP2_MASK))
1955 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
1956 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
1957 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
1958 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
1959 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
1960 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
1961 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
1962 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
1963 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
1964 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
1971 const struct sys_reg_desc *find_reg_by_id(u64 id,
1972 struct sys_reg_params *params,
1973 const struct sys_reg_desc table[],
1976 if (!index_to_params(id, params))
1979 return find_reg(params, table, num);
1982 /* Decode an index value, and find the sys_reg_desc entry. */
1983 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
1987 const struct sys_reg_desc *table, *r;
1988 struct sys_reg_params params;
1990 /* We only do sys_reg for now. */
1991 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
1994 table = get_target_table(vcpu->arch.target, true, &num);
1995 r = find_reg_by_id(id, ¶ms, table, num);
1997 r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
1999 /* Not saved in the sys_reg array and not otherwise accessible? */
2000 if (r && !(r->reg || r->get_user))
2007 * These are the invariant sys_reg registers: we let the guest see the
2008 * host versions of these, so they're part of the guest state.
2010 * A future CPU may provide a mechanism to present different values to
2011 * the guest, or a future kvm may trap them.
2014 #define FUNCTION_INVARIANT(reg) \
2015 static void get_##reg(struct kvm_vcpu *v, \
2016 const struct sys_reg_desc *r) \
2018 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
2021 FUNCTION_INVARIANT(midr_el1)
2022 FUNCTION_INVARIANT(ctr_el0)
2023 FUNCTION_INVARIANT(revidr_el1)
2024 FUNCTION_INVARIANT(clidr_el1)
2025 FUNCTION_INVARIANT(aidr_el1)
2027 /* ->val is filled in by kvm_sys_reg_table_init() */
2028 static struct sys_reg_desc invariant_sys_regs[] = {
2029 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
2030 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
2031 { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
2032 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
2033 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
2036 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
2038 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
2043 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
2045 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
2050 static int get_invariant_sys_reg(u64 id, void __user *uaddr)
2052 struct sys_reg_params params;
2053 const struct sys_reg_desc *r;
2055 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2056 ARRAY_SIZE(invariant_sys_regs));
2060 return reg_to_user(uaddr, &r->val, id);
2063 static int set_invariant_sys_reg(u64 id, void __user *uaddr)
2065 struct sys_reg_params params;
2066 const struct sys_reg_desc *r;
2068 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
2070 r = find_reg_by_id(id, ¶ms, invariant_sys_regs,
2071 ARRAY_SIZE(invariant_sys_regs));
2075 err = reg_from_user(&val, uaddr, id);
2079 /* This is what we mean by invariant: you can't change it. */
2086 static bool is_valid_cache(u32 val)
2090 if (val >= CSSELR_MAX)
2093 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
2095 ctype = (cache_levels >> (level * 3)) & 7;
2098 case 0: /* No cache */
2100 case 1: /* Instruction cache only */
2102 case 2: /* Data cache only */
2103 case 4: /* Unified cache */
2105 case 3: /* Separate instruction and data caches */
2107 default: /* Reserved: we can't know instruction or data. */
2112 static int demux_c15_get(u64 id, void __user *uaddr)
2115 u32 __user *uval = uaddr;
2117 /* Fail if we have unknown bits set. */
2118 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2119 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2122 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2123 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2124 if (KVM_REG_SIZE(id) != 4)
2126 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2127 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2128 if (!is_valid_cache(val))
2131 return put_user(get_ccsidr(val), uval);
2137 static int demux_c15_set(u64 id, void __user *uaddr)
2140 u32 __user *uval = uaddr;
2142 /* Fail if we have unknown bits set. */
2143 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2144 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2147 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2148 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2149 if (KVM_REG_SIZE(id) != 4)
2151 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2152 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2153 if (!is_valid_cache(val))
2156 if (get_user(newval, uval))
2159 /* This is also invariant: you can't change it. */
2160 if (newval != get_ccsidr(val))
2168 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2170 const struct sys_reg_desc *r;
2171 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2173 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2174 return demux_c15_get(reg->id, uaddr);
2176 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2179 r = index_to_sys_reg_desc(vcpu, reg->id);
2181 return get_invariant_sys_reg(reg->id, uaddr);
2184 return (r->get_user)(vcpu, r, reg, uaddr);
2186 return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
2189 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2191 const struct sys_reg_desc *r;
2192 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2194 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2195 return demux_c15_set(reg->id, uaddr);
2197 if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
2200 r = index_to_sys_reg_desc(vcpu, reg->id);
2202 return set_invariant_sys_reg(reg->id, uaddr);
2205 return (r->set_user)(vcpu, r, reg, uaddr);
2207 return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
2210 static unsigned int num_demux_regs(void)
2212 unsigned int i, count = 0;
2214 for (i = 0; i < CSSELR_MAX; i++)
2215 if (is_valid_cache(i))
2221 static int write_demux_regids(u64 __user *uindices)
2223 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2226 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2227 for (i = 0; i < CSSELR_MAX; i++) {
2228 if (!is_valid_cache(i))
2230 if (put_user(val | i, uindices))
2237 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2239 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2240 KVM_REG_ARM64_SYSREG |
2241 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2242 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2243 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2244 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2245 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2248 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2253 if (put_user(sys_reg_to_index(reg), *uind))
2260 static int walk_one_sys_reg(const struct sys_reg_desc *rd,
2262 unsigned int *total)
2265 * Ignore registers we trap but don't save,
2266 * and for which no custom user accessor is provided.
2268 if (!(rd->reg || rd->get_user))
2271 if (!copy_reg_to_user(rd, uind))
2278 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
2279 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
2281 const struct sys_reg_desc *i1, *i2, *end1, *end2;
2282 unsigned int total = 0;
2286 /* We check for duplicates here, to allow arch-specific overrides. */
2287 i1 = get_target_table(vcpu->arch.target, true, &num);
2290 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
2292 BUG_ON(i1 == end1 || i2 == end2);
2294 /* Walk carefully, as both tables may refer to the same register. */
2296 int cmp = cmp_sys_reg(i1, i2);
2297 /* target-specific overrides generic entry. */
2299 err = walk_one_sys_reg(i1, &uind, &total);
2301 err = walk_one_sys_reg(i2, &uind, &total);
2306 if (cmp <= 0 && ++i1 == end1)
2308 if (cmp >= 0 && ++i2 == end2)
2314 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
2316 return ARRAY_SIZE(invariant_sys_regs)
2318 + walk_sys_regs(vcpu, (u64 __user *)NULL);
2321 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
2326 /* Then give them all the invariant registers' indices. */
2327 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
2328 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
2333 err = walk_sys_regs(vcpu, uindices);
2338 return write_demux_regids(uindices);
2341 static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
2345 for (i = 1; i < n; i++) {
2346 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2347 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
2355 void kvm_sys_reg_table_init(void)
2358 struct sys_reg_desc clidr;
2360 /* Make sure tables are unique and in order. */
2361 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
2362 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
2363 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
2364 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
2365 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
2366 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
2368 /* We abuse the reset function to overwrite the table itself. */
2369 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
2370 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
2373 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
2375 * If software reads the Cache Type fields from Ctype1
2376 * upwards, once it has seen a value of 0b000, no caches
2377 * exist at further-out levels of the hierarchy. So, for
2378 * example, if Ctype3 is the first Cache Type field with a
2379 * value of 0b000, the values of Ctype4 to Ctype7 must be
2382 get_clidr_el1(NULL, &clidr); /* Ugly... */
2383 cache_levels = clidr.val;
2384 for (i = 0; i < 7; i++)
2385 if (((cache_levels >> (i*3)) & 7) == 0)
2387 /* Clear all higher bits. */
2388 cache_levels &= (1 << (i*3))-1;
2392 * kvm_reset_sys_regs - sets system registers to reset value
2393 * @vcpu: The VCPU pointer
2395 * This function finds the right table above and sets the registers on the
2396 * virtual CPU struct to their architecturally defined reset values.
2398 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2401 const struct sys_reg_desc *table;
2403 /* Catch someone adding a register without putting in reset entry. */
2404 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
2406 /* Generic chip reset first (so target could override). */
2407 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2409 table = get_target_table(vcpu->arch.target, true, &num);
2410 reset_sys_reg_descs(vcpu, table, num);
2412 for (num = 1; num < NR_SYS_REGS; num++)
2413 if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
2414 panic("Didn't reset vcpu_sys_reg(%zi)", num);