Home Home > GIT Browse > stable
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJiri Slaby <jslaby@suse.cz>2019-08-16 22:01:45 +0200
committerJiri Slaby <jslaby@suse.cz>2019-08-16 22:25:11 +0200
commitc9b2f3ab2f8aff21829a7a3414c09df39491575e (patch)
treec40bbda6b95f8a5a8afe9f0463288f46bfb294d8
parent6c3c370a9a36031669389f1f221ff1d12168bc2e (diff)
KVM: Fix leak vCPU's VMCS value into other pCPU (bnc#1012628).
-rw-r--r--patches.kernel.org/5.2.9-137-KVM-Fix-leak-vCPU-s-VMCS-value-into-other-pCPU.patch217
-rw-r--r--series.conf1
2 files changed, 218 insertions, 0 deletions
diff --git a/patches.kernel.org/5.2.9-137-KVM-Fix-leak-vCPU-s-VMCS-value-into-other-pCPU.patch b/patches.kernel.org/5.2.9-137-KVM-Fix-leak-vCPU-s-VMCS-value-into-other-pCPU.patch
new file mode 100644
index 0000000000..c4803fbb6d
--- /dev/null
+++ b/patches.kernel.org/5.2.9-137-KVM-Fix-leak-vCPU-s-VMCS-value-into-other-pCPU.patch
@@ -0,0 +1,217 @@
+From: Wanpeng Li <wanpengli@tencent.com>
+Date: Mon, 5 Aug 2019 10:03:19 +0800
+Subject: [PATCH] KVM: Fix leak vCPU's VMCS value into other pCPU
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+References: bnc#1012628
+Patch-mainline: 5.2.9
+Git-commit: 17e433b54393a6269acbcb792da97791fe1592d8
+
+commit 17e433b54393a6269acbcb792da97791fe1592d8 upstream.
+
+After commit d73eb57b80b (KVM: Boost vCPUs that are delivering interrupts), a
+five years old bug is exposed. Running ebizzy benchmark in three 80 vCPUs VMs
+on one 80 pCPUs Skylake server, a lot of rcu_sched stall warning splatting
+in the VMs after stress testing:
+
+ INFO: rcu_sched detected stalls on CPUs/tasks: { 4 41 57 62 77} (detected by 15, t=60004 jiffies, g=899, c=898, q=15073)
+ Call Trace:
+ flush_tlb_mm_range+0x68/0x140
+ tlb_flush_mmu.part.75+0x37/0xe0
+ tlb_finish_mmu+0x55/0x60
+ zap_page_range+0x142/0x190
+ SyS_madvise+0x3cd/0x9c0
+ system_call_fastpath+0x1c/0x21
+
+swait_active() sustains to be true before finish_swait() is called in
+kvm_vcpu_block(), voluntarily preempted vCPUs are taken into account
+by kvm_vcpu_on_spin() loop greatly increases the probability condition
+kvm_arch_vcpu_runnable(vcpu) is checked and can be true, when APICv
+is enabled the yield-candidate vCPU's VMCS RVI field leaks(by
+vmx_sync_pir_to_irr()) into spinning-on-a-taken-lock vCPU's current
+VMCS.
+
+This patch fixes it by checking conservatively a subset of events.
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Marc Zyngier <Marc.Zyngier@arm.com>
+Cc: stable@vger.kernel.org
+Fixes: 98f4a1467 (KVM: add kvm_arch_vcpu_runnable() test to kvm_vcpu_on_spin() loop)
+Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ arch/powerpc/kvm/powerpc.c | 5 +++++
+ arch/x86/include/asm/kvm_host.h | 1 +
+ arch/x86/kvm/svm.c | 6 ++++++
+ arch/x86/kvm/vmx/vmx.c | 6 ++++++
+ arch/x86/kvm/x86.c | 16 ++++++++++++++++
+ include/linux/kvm_host.h | 1 +
+ virt/kvm/kvm_main.c | 25 ++++++++++++++++++++++++-
+ 7 files changed, 59 insertions(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
+index 6d704ad2472b..993017dd83ca 100644
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -50,6 +50,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
+ return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
+ }
+
++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
++{
++ return kvm_arch_vcpu_runnable(vcpu);
++}
++
+ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+ {
+ return false;
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 8253925c5e8c..921c609c2af7 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1169,6 +1169,7 @@ struct kvm_x86_ops {
+ int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
+ uint32_t guest_irq, bool set);
+ void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
++ bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
+
+ int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
+ bool *expired);
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 3be96ed7f666..14384a1ec53f 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -5167,6 +5167,11 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
+ kvm_vcpu_wake_up(vcpu);
+ }
+
++static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
++{
++ return false;
++}
++
+ static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
+ {
+ unsigned long flags;
+@@ -7264,6 +7269,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+
+ .pmu_ops = &amd_pmu_ops,
+ .deliver_posted_interrupt = svm_deliver_avic_intr,
++ .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
+ .update_pi_irte = svm_update_pi_irte,
+ .setup_mce = svm_setup_mce,
+
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 924c2a79e4a9..4b830c0adcf8 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6096,6 +6096,11 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
+ return max_irr;
+ }
+
++static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
++{
++ return pi_test_on(vcpu_to_pi_desc(vcpu));
++}
++
+ static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+ {
+ if (!kvm_vcpu_apicv_active(vcpu))
+@@ -7662,6 +7667,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
+ .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
+ .sync_pir_to_irr = vmx_sync_pir_to_irr,
+ .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
++ .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,
+
+ .set_tss_addr = vmx_set_tss_addr,
+ .set_identity_map_addr = vmx_set_identity_map_addr,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index a8ad3a4d86b1..cbced8ff29d4 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -9641,6 +9641,22 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+ return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
+ }
+
++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
++{
++ if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
++ return true;
++
++ if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
++ kvm_test_request(KVM_REQ_SMI, vcpu) ||
++ kvm_test_request(KVM_REQ_EVENT, vcpu))
++ return true;
++
++ if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu))
++ return true;
++
++ return false;
++}
++
+ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+ {
+ return vcpu->arch.preempted_in_kernel;
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index d1ad38a3f048..7546cbf3dfb0 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -871,6 +871,7 @@ void kvm_arch_check_processor_compat(void *rtn);
+ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
+ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
+ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
+
+ #ifndef __KVM_HAVE_ARCH_VM_ALLOC
+ /*
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index e629766f0ec8..7e0d18ac62bf 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2475,6 +2475,29 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
+ #endif
+ }
+
++/*
++ * Unlike kvm_arch_vcpu_runnable, this function is called outside
++ * a vcpu_load/vcpu_put pair. However, for most architectures
++ * kvm_arch_vcpu_runnable does not require vcpu_load.
++ */
++bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
++{
++ return kvm_arch_vcpu_runnable(vcpu);
++}
++
++static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
++{
++ if (kvm_arch_dy_runnable(vcpu))
++ return true;
++
++#ifdef CONFIG_KVM_ASYNC_PF
++ if (!list_empty_careful(&vcpu->async_pf.done))
++ return true;
++#endif
++
++ return false;
++}
++
+ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
+ {
+ struct kvm *kvm = me->kvm;
+@@ -2504,7 +2527,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
+ continue;
+ if (vcpu == me)
+ continue;
+- if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
++ if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu))
+ continue;
+ if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu))
+ continue;
+--
+2.22.0
+
diff --git a/series.conf b/series.conf
index 83fd70ff1b..10ab275b5c 100644
--- a/series.conf
+++ b/series.conf
@@ -1157,6 +1157,7 @@
patches.kernel.org/5.2.9-134-NFSv4-Fix-delegation-state-recovery.patch
patches.kernel.org/5.2.9-135-NFSv4-Check-the-return-value-of-update_open_sta.patch
patches.kernel.org/5.2.9-136-NFSv4-Fix-an-Oops-in-nfs4_do_setattr.patch
+ patches.kernel.org/5.2.9-137-KVM-Fix-leak-vCPU-s-VMCS-value-into-other-pCPU.patch
########################################################
# Build fixes that apply to the vanilla kernel too.