Home Home > GIT Browse > stable
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJiri Slaby <jslaby@suse.cz>2019-08-16 22:01:45 +0200
committerJiri Slaby <jslaby@suse.cz>2019-08-16 22:25:11 +0200
commita4c9c325f712690f56c8e4fb4267ab66aa8fb800 (patch)
tree7a04d9593bbeadfbf5fd68e4ca5f191425520313
parentc9b2f3ab2f8aff21829a7a3414c09df39491575e (diff)
KVM: arm/arm64: Sync ICH_VMCR_EL2 back when about to block
-rw-r--r--patches.kernel.org/5.2.9-138-KVM-arm-arm64-Sync-ICH_VMCR_EL2-back-when-about.patch186
-rw-r--r--series.conf1
2 files changed, 187 insertions, 0 deletions
diff --git a/patches.kernel.org/5.2.9-138-KVM-arm-arm64-Sync-ICH_VMCR_EL2-back-when-about.patch b/patches.kernel.org/5.2.9-138-KVM-arm-arm64-Sync-ICH_VMCR_EL2-back-when-about.patch
new file mode 100644
index 0000000000..ef1333701f
--- /dev/null
+++ b/patches.kernel.org/5.2.9-138-KVM-arm-arm64-Sync-ICH_VMCR_EL2-back-when-about.patch
@@ -0,0 +1,186 @@
+From: Marc Zyngier <maz@kernel.org>
+Date: Fri, 2 Aug 2019 10:28:32 +0100
+Subject: [PATCH] KVM: arm/arm64: Sync ICH_VMCR_EL2 back when about to block
+References: bnc#1012628
+Patch-mainline: 5.2.9
+Git-commit: 5eeaf10eec394b28fad2c58f1f5c3a5da0e87d1c
+
+commit 5eeaf10eec394b28fad2c58f1f5c3a5da0e87d1c upstream.
+
+Since commit commit 328e56647944 ("KVM: arm/arm64: vgic: Defer
+touching GICH_VMCR to vcpu_load/put"), we leave ICH_VMCR_EL2 (or
+its GICv2 equivalent) loaded as long as we can, only syncing it
+back when we're scheduled out.
+
+There is a small snag with that though: kvm_vgic_vcpu_pending_irq(),
+which is indirectly called from kvm_vcpu_check_block(), needs to
+evaluate the guest's view of ICC_PMR_EL1. At the point were we
+call kvm_vcpu_check_block(), the vcpu is still loaded, and whatever
+changes to PMR is not visible in memory until we do a vcpu_put().
+
+Things go really south if the guest does the following:
+
+ mov x0, #0 // or any small value masking interrupts
+ msr ICC_PMR_EL1, x0
+
+ [vcpu preempted, then rescheduled, VMCR sampled]
+
+ mov x0, #ff // allow all interrupts
+ msr ICC_PMR_EL1, x0
+ wfi // traps to EL2, so samping of VMCR
+
+ [interrupt arrives just after WFI]
+
+Here, the hypervisor's view of PMR is zero, while the guest has enabled
+its interrupts. kvm_vgic_vcpu_pending_irq() will then say that no
+interrupts are pending (despite an interrupt being received) and we'll
+block for no reason. If the guest doesn't have a periodic interrupt
+firing once it has blocked, it will stay there forever.
+
+To avoid this unfortuante situation, let's resync VMCR from
+kvm_arch_vcpu_blocking(), ensuring that a following kvm_vcpu_check_block()
+will observe the latest value of PMR.
+
+This has been found by booting an arm64 Linux guest with the pseudo NMI
+feature, and thus using interrupt priorities to mask interrupts instead
+of the usual PSTATE masking.
+
+Cc: stable@vger.kernel.org # 4.12
+Fixes: 328e56647944 ("KVM: arm/arm64: vgic: Defer touching GICH_VMCR to vcpu_load/put")
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ include/kvm/arm_vgic.h | 1 +
+ virt/kvm/arm/arm.c | 11 +++++++++++
+ virt/kvm/arm/vgic/vgic-v2.c | 9 ++++++++-
+ virt/kvm/arm/vgic/vgic-v3.c | 7 ++++++-
+ virt/kvm/arm/vgic/vgic.c | 11 +++++++++++
+ virt/kvm/arm/vgic/vgic.h | 2 ++
+ 6 files changed, 39 insertions(+), 2 deletions(-)
+
+diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
+index 46bbc949c20a..7a30524a80ee 100644
+--- a/include/kvm/arm_vgic.h
++++ b/include/kvm/arm_vgic.h
+@@ -350,6 +350,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
+
+ void kvm_vgic_load(struct kvm_vcpu *vcpu);
+ void kvm_vgic_put(struct kvm_vcpu *vcpu);
++void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
+
+ #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
+ #define vgic_initialized(k) ((k)->arch.vgic.initialized)
+diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
+index bd5c55916d0d..9b778c51af1b 100644
+--- a/virt/kvm/arm/arm.c
++++ b/virt/kvm/arm/arm.c
+@@ -323,6 +323,17 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+
+ void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
+ {
++ /*
++ * If we're about to block (most likely because we've just hit a
++ * WFI), we need to sync back the state of the GIC CPU interface
++ * so that we have the lastest PMR and group enables. This ensures
++ * that kvm_arch_vcpu_runnable has up-to-date data to decide
++ * whether we have pending interrupts.
++ */
++ preempt_disable();
++ kvm_vgic_vmcr_sync(vcpu);
++ preempt_enable();
++
+ kvm_vgic_v4_enable_doorbell(vcpu);
+ }
+
+diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
+index 6dd5ad706c92..96aab77d0471 100644
+--- a/virt/kvm/arm/vgic/vgic-v2.c
++++ b/virt/kvm/arm/vgic/vgic-v2.c
+@@ -484,10 +484,17 @@ void vgic_v2_load(struct kvm_vcpu *vcpu)
+ kvm_vgic_global_state.vctrl_base + GICH_APR);
+ }
+
+-void vgic_v2_put(struct kvm_vcpu *vcpu)
++void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
+ {
+ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+
+ cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
++}
++
++void vgic_v2_put(struct kvm_vcpu *vcpu)
++{
++ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
++
++ vgic_v2_vmcr_sync(vcpu);
+ cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
+ }
+diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
+index c2c9ce009f63..0c653a1e5215 100644
+--- a/virt/kvm/arm/vgic/vgic-v3.c
++++ b/virt/kvm/arm/vgic/vgic-v3.c
+@@ -662,12 +662,17 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
+ __vgic_v3_activate_traps(vcpu);
+ }
+
+-void vgic_v3_put(struct kvm_vcpu *vcpu)
++void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
+ {
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+
+ if (likely(cpu_if->vgic_sre))
+ cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
++}
++
++void vgic_v3_put(struct kvm_vcpu *vcpu)
++{
++ vgic_v3_vmcr_sync(vcpu);
+
+ kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
+
+diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
+index 04786c8ec77e..13d4b38a94ec 100644
+--- a/virt/kvm/arm/vgic/vgic.c
++++ b/virt/kvm/arm/vgic/vgic.c
+@@ -919,6 +919,17 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
+ vgic_v3_put(vcpu);
+ }
+
++void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
++{
++ if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
++ return;
++
++ if (kvm_vgic_global_state.type == VGIC_V2)
++ vgic_v2_vmcr_sync(vcpu);
++ else
++ vgic_v3_vmcr_sync(vcpu);
++}
++
+ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
+ {
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
+index 57205beaa981..11adbdac1d56 100644
+--- a/virt/kvm/arm/vgic/vgic.h
++++ b/virt/kvm/arm/vgic/vgic.h
+@@ -193,6 +193,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
+ void vgic_v2_init_lrs(void);
+ void vgic_v2_load(struct kvm_vcpu *vcpu);
+ void vgic_v2_put(struct kvm_vcpu *vcpu);
++void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
+
+ void vgic_v2_save_state(struct kvm_vcpu *vcpu);
+ void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
+@@ -223,6 +224,7 @@ bool vgic_v3_check_base(struct kvm *kvm);
+
+ void vgic_v3_load(struct kvm_vcpu *vcpu);
+ void vgic_v3_put(struct kvm_vcpu *vcpu);
++void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
+
+ bool vgic_has_its(struct kvm *kvm);
+ int kvm_vgic_register_its_device(void);
+--
+2.22.0
+
diff --git a/series.conf b/series.conf
index 10ab275b5c..f23f7ea896 100644
--- a/series.conf
+++ b/series.conf
@@ -1158,6 +1158,7 @@
patches.kernel.org/5.2.9-135-NFSv4-Check-the-return-value-of-update_open_sta.patch
patches.kernel.org/5.2.9-136-NFSv4-Fix-an-Oops-in-nfs4_do_setattr.patch
patches.kernel.org/5.2.9-137-KVM-Fix-leak-vCPU-s-VMCS-value-into-other-pCPU.patch
+ patches.kernel.org/5.2.9-138-KVM-arm-arm64-Sync-ICH_VMCR_EL2-back-when-about.patch
########################################################
# Build fixes that apply to the vanilla kernel too.