Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichal Suchanek <msuchanek@suse.de>2018-10-31 12:36:54 +0100
committerMichal Suchanek <msuchanek@suse.de>2018-10-31 23:28:42 +0100
commit2bdb00682719e44b17952a3ac5b844961f919281 (patch)
tree98e103e4817d395d1630018642e2fc5f6f6e3dc3
parent781ab85590a3d4ab5ce15910d62c354533a395e3 (diff)
KVM: PPC: Book3S PR: Add guest MSR parameter for
kvmppc_save_tm()/kvmppc_restore_tm() (bsc#1061840).
-rw-r--r--patches.arch/KVM-PPC-Book3S-PR-Add-guest-MSR-parameter-for-kvmppc.patch321
-rw-r--r--series.conf1
2 files changed, 322 insertions, 0 deletions
diff --git a/patches.arch/KVM-PPC-Book3S-PR-Add-guest-MSR-parameter-for-kvmppc.patch b/patches.arch/KVM-PPC-Book3S-PR-Add-guest-MSR-parameter-for-kvmppc.patch
new file mode 100644
index 0000000000..96b86534b2
--- /dev/null
+++ b/patches.arch/KVM-PPC-Book3S-PR-Add-guest-MSR-parameter-for-kvmppc.patch
@@ -0,0 +1,321 @@
+From 6f597c6b63b6f3675914b5ec8fcd008a58678650 Mon Sep 17 00:00:00 2001
+From: Simon Guo <wei.guo.simon@gmail.com>
+Date: Wed, 23 May 2018 15:01:48 +0800
+Subject: [PATCH] KVM: PPC: Book3S PR: Add guest MSR parameter for
+ kvmppc_save_tm()/kvmppc_restore_tm()
+
+References: bsc#1061840
+Patch-mainline: v4.18-rc1
+Git-commit: 6f597c6b63b6f3675914b5ec8fcd008a58678650
+
+HV KVM and PR KVM need different MSR source to indicate whether
+treclaim. or trecheckpoint. is necessary.
+
+This patch add new parameter (guest MSR) for these kvmppc_save_tm/
+kvmppc_restore_tm() APIs:
+- For HV KVM, it is VCPU_MSR
+- For PR KVM, it is current host MSR or VCPU_SHADOW_SRR1
+
+This enhancement enables these 2 APIs to be reused by PR KVM later.
+And the patch keeps HV KVM logic unchanged.
+
+This patch also reworks kvmppc_save_tm()/kvmppc_restore_tm() to
+have a clean ABI: r3 for vcpu and r4 for guest_msr.
+
+During kvmppc_save_tm/kvmppc_restore_tm(), the R1 need to be saved
+or restored. Currently the R1 is saved into HSTATE_HOST_R1. In PR
+KVM, we are going to add a C function wrapper for
+kvmppc_save_tm/kvmppc_restore_tm() where the R1 will be incremented
+with added stackframe and save into HSTATE_HOST_R1. There are several
+places in HV KVM to load HSTATE_HOST_R1 as R1, and we don't want to
+bring risk or confusion by TM code.
+
+This patch will use HSTATE_SCRATCH2 to save/restore R1 in
+kvmppc_save_tm/kvmppc_restore_tm() to avoid future confusion, since
+the r1 is actually a temporary/scratch value to be saved/stored.
+
+[paulus@ozlabs.org - rebased on top of 7b0e827c6970 ("KVM: PPC: Book3S HV:
+ Factor fake-suspend handling out of kvmppc_save/restore_tm", 2018-05-30)]
+
+Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 33 +++++++++------
+ arch/powerpc/kvm/tm.S | 71 +++++++++++++++++----------------
+ 2 files changed, 57 insertions(+), 47 deletions(-)
+
+diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+index 8e016598692e..75e3bbf8c957 100644
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -793,7 +793,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+ /*
+ * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
+ */
++ mr r3, r4
++ ld r4, VCPU_MSR(r3)
+ bl kvmppc_restore_tm_hv
++ ld r4, HSTATE_KVM_VCPU(r13)
+ 91:
+ #endif
+
+@@ -1777,7 +1780,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+ /*
+ * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
+ */
++ mr r3, r9
++ ld r4, VCPU_MSR(r3)
+ bl kvmppc_save_tm_hv
++ ld r9, HSTATE_KVM_VCPU(r13)
+ 91:
+ #endif
+
+@@ -2680,7 +2686,8 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+ /*
+ * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
+ */
+- ld r9, HSTATE_KVM_VCPU(r13)
++ ld r3, HSTATE_KVM_VCPU(r13)
++ ld r4, VCPU_MSR(r3)
+ bl kvmppc_save_tm_hv
+ 91:
+ #endif
+@@ -2799,7 +2806,10 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
+ /*
+ * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
+ */
++ mr r3, r4
++ ld r4, VCPU_MSR(r3)
+ bl kvmppc_restore_tm_hv
++ ld r4, HSTATE_KVM_VCPU(r13)
+ 91:
+ #endif
+
+@@ -3120,9 +3130,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /*
+ * Save transactional state and TM-related registers.
+- * Called with r9 pointing to the vcpu struct.
++ * Called with r3 pointing to the vcpu struct and r4 containing
++ * the guest MSR value.
+ * This can modify all checkpointed registers, but
+- * restores r1, r2 and r9 (vcpu pointer) before exit.
++ * restores r1 and r2 before exit.
+ */
+ kvmppc_save_tm_hv:
+ /* See if we need to handle fake suspend mode */
+@@ -3205,9 +3216,10 @@ END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
+
+ /*
+ * Restore transactional state and TM-related registers.
+- * Called with r4 pointing to the vcpu struct.
++ * Called with r3 pointing to the vcpu struct
++ * and r4 containing the guest MSR value.
+ * This potentially modifies all checkpointed registers.
+- * It restores r1, r2, r4 from the PACA.
++ * It restores r1 and r2 from the PACA.
+ */
+ kvmppc_restore_tm_hv:
+ /*
+@@ -3234,15 +3246,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
+ * The user may change these outside of a transaction, so they must
+ * always be context switched.
+ */
+- ld r5, VCPU_TFHAR(r4)
+- ld r6, VCPU_TFIAR(r4)
+- ld r7, VCPU_TEXASR(r4)
++ ld r5, VCPU_TFHAR(r3)
++ ld r6, VCPU_TFIAR(r3)
++ ld r7, VCPU_TEXASR(r3)
+ mtspr SPRN_TFHAR, r5
+ mtspr SPRN_TFIAR, r6
+ mtspr SPRN_TEXASR, r7
+
+- ld r5, VCPU_MSR(r4)
+- rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
++ rldicl. r5, r4, 64 - MSR_TS_S_LG, 62
+ beqlr /* TM not active in guest */
+
+ /* Make sure the failure summary is set */
+@@ -3255,10 +3266,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
+ b 9f /* and return */
+ 10: stdu r1, -PPC_MIN_STKFRM(r1)
+ /* guest is in transactional state, so simulate rollback */
+- mr r3, r4
+ bl kvmhv_emulate_tm_rollback
+ nop
+- ld r4, HSTATE_KVM_VCPU(r13) /* our vcpu pointer has been trashed */
+ addi r1, r1, PPC_MIN_STKFRM
+ 9: ld r0, PPC_LR_STKOFF(r1)
+ mtlr r0
+diff --git a/arch/powerpc/kvm/tm.S b/arch/powerpc/kvm/tm.S
+index ba97789c41ca..f027b5a0c0f0 100644
+--- a/arch/powerpc/kvm/tm.S
++++ b/arch/powerpc/kvm/tm.S
+@@ -26,9 +26,12 @@
+
+ /*
+ * Save transactional state and TM-related registers.
+- * Called with r9 pointing to the vcpu struct.
++ * Called with:
++ * - r3 pointing to the vcpu struct
++ * - r4 points to the MSR with current TS bits:
++ * (For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR).
+ * This can modify all checkpointed registers, but
+- * restores r1, r2 and r9 (vcpu pointer) before exit.
++ * restores r1, r2 before exit.
+ */
+ _GLOBAL(kvmppc_save_tm)
+ mflr r0
+@@ -40,20 +43,17 @@ _GLOBAL(kvmppc_save_tm)
+ rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
+ mtmsrd r8
+
+-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+- ld r5, VCPU_MSR(r9)
+- rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
++ rldicl. r4, r4, 64 - MSR_TS_S_LG, 62
+ beq 1f /* TM not active in guest. */
+-#endif
+
+- std r1, HSTATE_HOST_R1(r13)
+- li r3, TM_CAUSE_KVM_RESCHED
++ std r1, HSTATE_SCRATCH2(r13)
++ std r3, HSTATE_SCRATCH1(r13)
+
+ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ BEGIN_FTR_SECTION
+ /* Emulation of the treclaim instruction needs TEXASR before treclaim */
+ mfspr r6, SPRN_TEXASR
+- std r6, VCPU_ORIG_TEXASR(r9)
++ std r6, VCPU_ORIG_TEXASR(r3)
+ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
+ #endif
+
+@@ -61,6 +61,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
+ li r5, 0
+ mtmsrd r5, 1
+
++ li r3, TM_CAUSE_KVM_RESCHED
++
+ /* All GPRs are volatile at this point. */
+ TRECLAIM(R3)
+
+@@ -68,9 +70,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
+ SET_SCRATCH0(r13)
+ GET_PACA(r13)
+ std r9, PACATMSCRATCH(r13)
+-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+- ld r9, HSTATE_KVM_VCPU(r13)
+-#endif
++ ld r9, HSTATE_SCRATCH1(r13)
+
+ /* Get a few more GPRs free. */
+ std r29, VCPU_GPRS_TM(29)(r9)
+@@ -102,7 +102,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
+ std r4, VCPU_GPRS_TM(9)(r9)
+
+ /* Reload stack pointer and TOC. */
+- ld r1, HSTATE_HOST_R1(r13)
++ ld r1, HSTATE_SCRATCH2(r13)
+ ld r2, PACATOC(r13)
+
+ /* Set MSR RI now we have r1 and r13 back. */
+@@ -156,9 +156,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
+
+ /*
+ * Restore transactional state and TM-related registers.
+- * Called with r4 pointing to the vcpu struct.
++ * Called with:
++ * - r3 pointing to the vcpu struct.
++ * - r4 is the guest MSR with desired TS bits:
++ * For HV KVM, it is VCPU_MSR
++ * For PR KVM, it is provided by caller
+ * This potentially modifies all checkpointed registers.
+- * It restores r1, r2, r4 from the PACA.
++ * It restores r1, r2 from the PACA.
+ */
+ _GLOBAL(kvmppc_restore_tm)
+ mflr r0
+@@ -177,19 +181,17 @@ _GLOBAL(kvmppc_restore_tm)
+ * The user may change these outside of a transaction, so they must
+ * always be context switched.
+ */
+- ld r5, VCPU_TFHAR(r4)
+- ld r6, VCPU_TFIAR(r4)
+- ld r7, VCPU_TEXASR(r4)
++ ld r5, VCPU_TFHAR(r3)
++ ld r6, VCPU_TFIAR(r3)
++ ld r7, VCPU_TEXASR(r3)
+ mtspr SPRN_TFHAR, r5
+ mtspr SPRN_TFIAR, r6
+ mtspr SPRN_TEXASR, r7
+
+-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+- ld r5, VCPU_MSR(r4)
++ mr r5, r4
+ rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
+ beqlr /* TM not active in guest */
+-#endif
+- std r1, HSTATE_HOST_R1(r13)
++ std r1, HSTATE_SCRATCH2(r13)
+
+ /* Make sure the failure summary is set, otherwise we'll program check
+ * when we trechkpt. It's possible that this might have been not set
+@@ -205,21 +207,21 @@ _GLOBAL(kvmppc_restore_tm)
+ * some SPRs.
+ */
+
+- mr r31, r4
++ mr r31, r3
+ addi r3, r31, VCPU_FPRS_TM
+ bl load_fp_state
+ addi r3, r31, VCPU_VRS_TM
+ bl load_vr_state
+- mr r4, r31
+- lwz r7, VCPU_VRSAVE_TM(r4)
++ mr r3, r31
++ lwz r7, VCPU_VRSAVE_TM(r3)
+ mtspr SPRN_VRSAVE, r7
+
+- ld r5, VCPU_LR_TM(r4)
+- lwz r6, VCPU_CR_TM(r4)
+- ld r7, VCPU_CTR_TM(r4)
+- ld r8, VCPU_AMR_TM(r4)
+- ld r9, VCPU_TAR_TM(r4)
+- ld r10, VCPU_XER_TM(r4)
++ ld r5, VCPU_LR_TM(r3)
++ lwz r6, VCPU_CR_TM(r3)
++ ld r7, VCPU_CTR_TM(r3)
++ ld r8, VCPU_AMR_TM(r3)
++ ld r9, VCPU_TAR_TM(r3)
++ ld r10, VCPU_XER_TM(r3)
+ mtlr r5
+ mtcr r6
+ mtctr r7
+@@ -232,8 +234,8 @@ _GLOBAL(kvmppc_restore_tm)
+ * till the last moment to avoid running with userspace PPR and DSCR for
+ * too long.
+ */
+- ld r29, VCPU_DSCR_TM(r4)
+- ld r30, VCPU_PPR_TM(r4)
++ ld r29, VCPU_DSCR_TM(r3)
++ ld r30, VCPU_PPR_TM(r3)
+
+ std r2, PACATMSCRATCH(r13) /* Save TOC */
+
+@@ -265,9 +267,8 @@ _GLOBAL(kvmppc_restore_tm)
+ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ ld r29, HSTATE_DSCR(r13)
+ mtspr SPRN_DSCR, r29
+- ld r4, HSTATE_KVM_VCPU(r13)
+ #endif
+- ld r1, HSTATE_HOST_R1(r13)
++ ld r1, HSTATE_SCRATCH2(r13)
+ ld r2, PACATMSCRATCH(r13)
+
+ /* Set the MSR RI since we have our registers back. */
+--
+2.13.7
+
diff --git a/series.conf b/series.conf
index a66ac804e8..fb0dc643e5 100644
--- a/series.conf
+++ b/series.conf
@@ -16736,6 +16736,7 @@
patches.arch/KVM-PPC-Book3S-HV-Send-kvmppc_bad_interrupt-NMIs-to-.patch
patches.arch/KVM-PPC-Book3S-HV-Factor-fake-suspend-handling-out-o.patch
patches.arch/KVM-PPC-Book3S-PR-Move-kvmppc_save_tm-kvmppc_restore.patch
+ patches.arch/KVM-PPC-Book3S-PR-Add-guest-MSR-parameter-for-kvmppc.patch
patches.suse/ipv6-allow-PMTU-exceptions-to-local-routes.patch
patches.suse/net-dsa-add-error-handling-for-pskb_trim_rcsum.patch
patches.drivers/ixgbe-Fix-setting-of-TC-configuration-for-macvlan-ca.patch