Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichal Suchanek <msuchanek@suse.de>2018-10-31 12:36:54 +0100
committerMichal Suchanek <msuchanek@suse.de>2018-10-31 23:28:40 +0100
commit755c740ee99869e06931b062781de6d0edc4959b (patch)
tree1a7c656c71dfbfb6d8d87da4c1b55c6b5f8f05d8
parent183271dd44e787a6c52ff5d8f70a726f40a36f4d (diff)
KVM: PPC: Move nip/ctr/lr/xer registers to pt_regs in
kvm_vcpu_arch (bsc#1061840).
-rw-r--r--patches.arch/KVM-PPC-Move-nip-ctr-lr-xer-registers-to-pt_regs-in-.patch568
-rw-r--r--series.conf1
2 files changed, 569 insertions, 0 deletions
diff --git a/patches.arch/KVM-PPC-Move-nip-ctr-lr-xer-registers-to-pt_regs-in-.patch b/patches.arch/KVM-PPC-Move-nip-ctr-lr-xer-registers-to-pt_regs-in-.patch
new file mode 100644
index 0000000000..d15a675d20
--- /dev/null
+++ b/patches.arch/KVM-PPC-Move-nip-ctr-lr-xer-registers-to-pt_regs-in-.patch
@@ -0,0 +1,568 @@
+From 8ed09b010711dfaab8ef261f23e968e002ec04f8 Mon Sep 17 00:00:00 2001
+From: Simon Guo <wei.guo.simon@gmail.com>
+Date: Mon, 7 May 2018 14:20:08 +0800
+Subject: [PATCH] KVM: PPC: Move nip/ctr/lr/xer registers to pt_regs in
+ kvm_vcpu_arch
+
+References: bsc#1061840
+Patch-mainline: v4.18-rc1
+Git-commit: 173c520a049f57e2af498a3f0557d07797ce1c1b
+
+This patch moves nip/ctr/lr/xer registers from scattered places in
+kvm_vcpu_arch to pt_regs structure.
+
+cr register is "unsigned long" in pt_regs and u32 in vcpu->arch.
+It will need more consideration and may move in later patches.
+
+Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ arch/powerpc/include/asm/kvm_book3s.h | 16 ++++++-------
+ arch/powerpc/include/asm/kvm_book3s_64.h | 12 +++++-----
+ arch/powerpc/include/asm/kvm_booke.h | 16 ++++++-------
+ arch/powerpc/include/asm/kvm_host.h | 4 ----
+ arch/powerpc/kernel/asm-offsets.c | 16 ++++++-------
+ arch/powerpc/kvm/book3s_32_mmu.c | 2 +-
+ arch/powerpc/kvm/book3s_hv.c | 6 ++---
+ arch/powerpc/kvm/book3s_hv_tm.c | 10 ++++----
+ arch/powerpc/kvm/book3s_hv_tm_builtin.c | 10 ++++----
+ arch/powerpc/kvm/book3s_pr.c | 16 ++++++-------
+ arch/powerpc/kvm/booke.c | 41 +++++++++++++++++---------------
+ arch/powerpc/kvm/booke_emulate.c | 6 ++---
+ arch/powerpc/kvm/e500_emulate.c | 2 +-
+ arch/powerpc/kvm/e500_mmu.c | 2 +-
+ 14 files changed, 79 insertions(+), 80 deletions(-)
+
+diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
+index e3182f7ae499..20d3d5a87296 100644
+--- a/arch/powerpc/include/asm/kvm_book3s.h
++++ b/arch/powerpc/include/asm/kvm_book3s.h
+@@ -295,42 +295,42 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
+
+ static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
+ {
+- vcpu->arch.xer = val;
++ vcpu->arch.regs.xer = val;
+ }
+
+ static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
+ {
+- return vcpu->arch.xer;
++ return vcpu->arch.regs.xer;
+ }
+
+ static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
+ {
+- vcpu->arch.ctr = val;
++ vcpu->arch.regs.ctr = val;
+ }
+
+ static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
+ {
+- return vcpu->arch.ctr;
++ return vcpu->arch.regs.ctr;
+ }
+
+ static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
+ {
+- vcpu->arch.lr = val;
++ vcpu->arch.regs.link = val;
+ }
+
+ static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
+ {
+- return vcpu->arch.lr;
++ return vcpu->arch.regs.link;
+ }
+
+ static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
+ {
+- vcpu->arch.pc = val;
++ vcpu->arch.regs.nip = val;
+ }
+
+ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
+ {
+- return vcpu->arch.pc;
++ return vcpu->arch.regs.nip;
+ }
+
+ static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
+diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
+index 38dbcad086d6..dc435a5af7d6 100644
+--- a/arch/powerpc/include/asm/kvm_book3s_64.h
++++ b/arch/powerpc/include/asm/kvm_book3s_64.h
+@@ -483,9 +483,9 @@ static inline u64 sanitize_msr(u64 msr)
+ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
+ {
+ vcpu->arch.cr = vcpu->arch.cr_tm;
+- vcpu->arch.xer = vcpu->arch.xer_tm;
+- vcpu->arch.lr = vcpu->arch.lr_tm;
+- vcpu->arch.ctr = vcpu->arch.ctr_tm;
++ vcpu->arch.regs.xer = vcpu->arch.xer_tm;
++ vcpu->arch.regs.link = vcpu->arch.lr_tm;
++ vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
+ vcpu->arch.amr = vcpu->arch.amr_tm;
+ vcpu->arch.ppr = vcpu->arch.ppr_tm;
+ vcpu->arch.dscr = vcpu->arch.dscr_tm;
+@@ -500,9 +500,9 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
+ static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
+ {
+ vcpu->arch.cr_tm = vcpu->arch.cr;
+- vcpu->arch.xer_tm = vcpu->arch.xer;
+- vcpu->arch.lr_tm = vcpu->arch.lr;
+- vcpu->arch.ctr_tm = vcpu->arch.ctr;
++ vcpu->arch.xer_tm = vcpu->arch.regs.xer;
++ vcpu->arch.lr_tm = vcpu->arch.regs.link;
++ vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
+ vcpu->arch.amr_tm = vcpu->arch.amr;
+ vcpu->arch.ppr_tm = vcpu->arch.ppr;
+ vcpu->arch.dscr_tm = vcpu->arch.dscr;
+diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
+index f5fc9569ef56..d513e3ed1c65 100644
+--- a/arch/powerpc/include/asm/kvm_booke.h
++++ b/arch/powerpc/include/asm/kvm_booke.h
+@@ -56,12 +56,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
+
+ static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
+ {
+- vcpu->arch.xer = val;
++ vcpu->arch.regs.xer = val;
+ }
+
+ static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
+ {
+- return vcpu->arch.xer;
++ return vcpu->arch.regs.xer;
+ }
+
+ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
+@@ -72,32 +72,32 @@ static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
+
+ static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
+ {
+- vcpu->arch.ctr = val;
++ vcpu->arch.regs.ctr = val;
+ }
+
+ static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
+ {
+- return vcpu->arch.ctr;
++ return vcpu->arch.regs.ctr;
+ }
+
+ static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
+ {
+- vcpu->arch.lr = val;
++ vcpu->arch.regs.link = val;
+ }
+
+ static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
+ {
+- return vcpu->arch.lr;
++ return vcpu->arch.regs.link;
+ }
+
+ static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
+ {
+- vcpu->arch.pc = val;
++ vcpu->arch.regs.nip = val;
+ }
+
+ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
+ {
+- return vcpu->arch.pc;
++ return vcpu->arch.regs.nip;
+ }
+
+ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
+diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
+index 19184c742c16..1dc540b38eba 100644
+--- a/arch/powerpc/include/asm/kvm_host.h
++++ b/arch/powerpc/include/asm/kvm_host.h
+@@ -526,14 +526,10 @@ struct kvm_vcpu_arch {
+ u32 qpr[32];
+ #endif
+
+- ulong pc;
+- ulong ctr;
+- ulong lr;
+ #ifdef CONFIG_PPC_BOOK3S
+ ulong tar;
+ #endif
+
+- ulong xer;
+ u32 cr;
+
+ #ifdef CONFIG_PPC_BOOK3S
+diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
+index 4c374dec043c..aae1592d7c8a 100644
+--- a/arch/powerpc/kernel/asm-offsets.c
++++ b/arch/powerpc/kernel/asm-offsets.c
+@@ -425,14 +425,14 @@ int main(void)
+ #ifdef CONFIG_ALTIVEC
+ OFFSET(VCPU_VRS, kvm_vcpu, arch.vr.vr);
+ #endif
+- OFFSET(VCPU_XER, kvm_vcpu, arch.xer);
+- OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr);
+- OFFSET(VCPU_LR, kvm_vcpu, arch.lr);
++ OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
++ OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
++ OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
+ #ifdef CONFIG_PPC_BOOK3S
+ OFFSET(VCPU_TAR, kvm_vcpu, arch.tar);
+ #endif
+ OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
+- OFFSET(VCPU_PC, kvm_vcpu, arch.pc);
++ OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
+ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr);
+ OFFSET(VCPU_SRR0, kvm_vcpu, arch.shregs.srr0);
+@@ -689,10 +689,10 @@ int main(void)
+
+ #else /* CONFIG_PPC_BOOK3S */
+ OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
+- OFFSET(VCPU_XER, kvm_vcpu, arch.xer);
+- OFFSET(VCPU_LR, kvm_vcpu, arch.lr);
+- OFFSET(VCPU_CTR, kvm_vcpu, arch.ctr);
+- OFFSET(VCPU_PC, kvm_vcpu, arch.pc);
++ OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
++ OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
++ OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
++ OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
+ OFFSET(VCPU_SPRG9, kvm_vcpu, arch.sprg9);
+ OFFSET(VCPU_LAST_INST, kvm_vcpu, arch.last_inst);
+ OFFSET(VCPU_FAULT_DEAR, kvm_vcpu, arch.fault_dear);
+diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
+index 1992676c7a94..45c8ea4a0487 100644
+--- a/arch/powerpc/kvm/book3s_32_mmu.c
++++ b/arch/powerpc/kvm/book3s_32_mmu.c
+@@ -52,7 +52,7 @@
+ static inline bool check_debug_ip(struct kvm_vcpu *vcpu)
+ {
+ #ifdef DEBUG_MMU_PTE_IP
+- return vcpu->arch.pc == DEBUG_MMU_PTE_IP;
++ return vcpu->arch.regs.nip == DEBUG_MMU_PTE_IP;
+ #else
+ return true;
+ #endif
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 73dd78918e9b..68e8ed80f57c 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -399,13 +399,13 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
+
+ pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
+ pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
+- vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
++ vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap);
+ for (r = 0; r < 16; ++r)
+ pr_err("r%2d = %.16lx r%d = %.16lx\n",
+ r, kvmppc_get_gpr(vcpu, r),
+ r+16, kvmppc_get_gpr(vcpu, r+16));
+ pr_err("ctr = %.16lx lr = %.16lx\n",
+- vcpu->arch.ctr, vcpu->arch.lr);
++ vcpu->arch.regs.ctr, vcpu->arch.regs.link);
+ pr_err("srr0 = %.16llx srr1 = %.16llx\n",
+ vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
+ pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
+@@ -413,7 +413,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
+ pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
+ vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
+ pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
+- vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
++ vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
+ pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
+ pr_err("fault dar = %.16lx dsisr = %.8x\n",
+ vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
+diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c
+index bf710ad3a6d7..008285058f9b 100644
+--- a/arch/powerpc/kvm/book3s_hv_tm.c
++++ b/arch/powerpc/kvm/book3s_hv_tm.c
+@@ -19,7 +19,7 @@ static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause)
+ u64 texasr, tfiar;
+ u64 msr = vcpu->arch.shregs.msr;
+
+- tfiar = vcpu->arch.pc & ~0x3ull;
++ tfiar = vcpu->arch.regs.nip & ~0x3ull;
+ texasr = (failure_cause << 56) | TEXASR_ABORT | TEXASR_FS | TEXASR_EXACT;
+ if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr))
+ texasr |= TEXASR_SUSP;
+@@ -57,8 +57,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
+ (newmsr & MSR_TM)));
+ newmsr = sanitize_msr(newmsr);
+ vcpu->arch.shregs.msr = newmsr;
+- vcpu->arch.cfar = vcpu->arch.pc - 4;
+- vcpu->arch.pc = vcpu->arch.shregs.srr0;
++ vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
++ vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
+ return RESUME_GUEST;
+
+ case PPC_INST_RFEBB:
+@@ -90,8 +90,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
+ vcpu->arch.bescr = bescr;
+ msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
+ vcpu->arch.shregs.msr = msr;
+- vcpu->arch.cfar = vcpu->arch.pc - 4;
+- vcpu->arch.pc = vcpu->arch.ebbrr;
++ vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
++ vcpu->arch.regs.nip = vcpu->arch.ebbrr;
+ return RESUME_GUEST;
+
+ case PPC_INST_MTMSRD:
+diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
+index d98ccfd2b88c..b2c7c6fca4f9 100644
+--- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c
++++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
+@@ -35,8 +35,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
+ return 0;
+ newmsr = sanitize_msr(newmsr);
+ vcpu->arch.shregs.msr = newmsr;
+- vcpu->arch.cfar = vcpu->arch.pc - 4;
+- vcpu->arch.pc = vcpu->arch.shregs.srr0;
++ vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
++ vcpu->arch.regs.nip = vcpu->arch.shregs.srr0;
+ return 1;
+
+ case PPC_INST_RFEBB:
+@@ -58,8 +58,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
+ mtspr(SPRN_BESCR, bescr);
+ msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
+ vcpu->arch.shregs.msr = msr;
+- vcpu->arch.cfar = vcpu->arch.pc - 4;
+- vcpu->arch.pc = mfspr(SPRN_EBBRR);
++ vcpu->arch.cfar = vcpu->arch.regs.nip - 4;
++ vcpu->arch.regs.nip = mfspr(SPRN_EBBRR);
+ return 1;
+
+ case PPC_INST_MTMSRD:
+@@ -103,7 +103,7 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
+ void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu)
+ {
+ vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */
+- vcpu->arch.pc = vcpu->arch.tfhar;
++ vcpu->arch.regs.nip = vcpu->arch.tfhar;
+ copy_from_checkpoint(vcpu);
+ vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000;
+ }
+diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
+index 899bc9a02ab5..67061d399cd9 100644
+--- a/arch/powerpc/kvm/book3s_pr.c
++++ b/arch/powerpc/kvm/book3s_pr.c
+@@ -162,10 +162,10 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
+ svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
+ svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
+ svcpu->cr = vcpu->arch.cr;
+- svcpu->xer = vcpu->arch.xer;
+- svcpu->ctr = vcpu->arch.ctr;
+- svcpu->lr = vcpu->arch.lr;
+- svcpu->pc = vcpu->arch.pc;
++ svcpu->xer = vcpu->arch.regs.xer;
++ svcpu->ctr = vcpu->arch.regs.ctr;
++ svcpu->lr = vcpu->arch.regs.link;
++ svcpu->pc = vcpu->arch.regs.nip;
+ #ifdef CONFIG_PPC_BOOK3S_64
+ svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
+ #endif
+@@ -209,10 +209,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
+ vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
+ vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
+ vcpu->arch.cr = svcpu->cr;
+- vcpu->arch.xer = svcpu->xer;
+- vcpu->arch.ctr = svcpu->ctr;
+- vcpu->arch.lr = svcpu->lr;
+- vcpu->arch.pc = svcpu->pc;
++ vcpu->arch.regs.xer = svcpu->xer;
++ vcpu->arch.regs.ctr = svcpu->ctr;
++ vcpu->arch.regs.link = svcpu->lr;
++ vcpu->arch.regs.nip = svcpu->pc;
+ vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
+ vcpu->arch.fault_dar = svcpu->fault_dar;
+ vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
+diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
+index 071b87ee682f..0521910be3e2 100644
+--- a/arch/powerpc/kvm/booke.c
++++ b/arch/powerpc/kvm/booke.c
+@@ -77,8 +77,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
+ {
+ int i;
+
+- printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
+- printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
++ printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip,
++ vcpu->arch.shared->msr);
++ printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link,
++ vcpu->arch.regs.ctr);
+ printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
+ vcpu->arch.shared->srr1);
+
+@@ -484,24 +486,25 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
+ if (allowed) {
+ switch (int_class) {
+ case INT_CLASS_NONCRIT:
+- set_guest_srr(vcpu, vcpu->arch.pc,
++ set_guest_srr(vcpu, vcpu->arch.regs.nip,
+ vcpu->arch.shared->msr);
+ break;
+ case INT_CLASS_CRIT:
+- set_guest_csrr(vcpu, vcpu->arch.pc,
++ set_guest_csrr(vcpu, vcpu->arch.regs.nip,
+ vcpu->arch.shared->msr);
+ break;
+ case INT_CLASS_DBG:
+- set_guest_dsrr(vcpu, vcpu->arch.pc,
++ set_guest_dsrr(vcpu, vcpu->arch.regs.nip,
+ vcpu->arch.shared->msr);
+ break;
+ case INT_CLASS_MC:
+- set_guest_mcsrr(vcpu, vcpu->arch.pc,
++ set_guest_mcsrr(vcpu, vcpu->arch.regs.nip,
+ vcpu->arch.shared->msr);
+ break;
+ }
+
+- vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
++ vcpu->arch.regs.nip = vcpu->arch.ivpr |
++ vcpu->arch.ivor[priority];
+ if (update_esr == true)
+ kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
+ if (update_dear == true)
+@@ -819,7 +822,7 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+
+ case EMULATE_FAIL:
+ printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
+- __func__, vcpu->arch.pc, vcpu->arch.last_inst);
++ __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
+ /* For debugging, encode the failing instruction and
+ * report it to userspace. */
+ run->hw.hardware_exit_reason = ~0ULL << 32;
+@@ -868,7 +871,7 @@ static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
+ */
+ vcpu->arch.dbsr = 0;
+ run->debug.arch.status = 0;
+- run->debug.arch.address = vcpu->arch.pc;
++ run->debug.arch.address = vcpu->arch.regs.nip;
+
+ if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
+ run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
+@@ -964,7 +967,7 @@ static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+
+ case EMULATE_FAIL:
+ pr_debug("%s: load instruction from guest address %lx failed\n",
+- __func__, vcpu->arch.pc);
++ __func__, vcpu->arch.regs.nip);
+ /* For debugging, encode the failing instruction and
+ * report it to userspace. */
+ run->hw.hardware_exit_reason = ~0ULL << 32;
+@@ -1162,7 +1165,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ case BOOKE_INTERRUPT_SPE_FP_DATA:
+ case BOOKE_INTERRUPT_SPE_FP_ROUND:
+ printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
+- __func__, exit_nr, vcpu->arch.pc);
++ __func__, exit_nr, vcpu->arch.regs.nip);
+ run->hw.hardware_exit_reason = exit_nr;
+ r = RESUME_HOST;
+ break;
+@@ -1292,7 +1295,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ }
+
+ case BOOKE_INTERRUPT_ITLB_MISS: {
+- unsigned long eaddr = vcpu->arch.pc;
++ unsigned long eaddr = vcpu->arch.regs.nip;
+ gpa_t gpaddr;
+ gfn_t gfn;
+ int gtlb_index;
+@@ -1384,7 +1387,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+ int i;
+ int r;
+
+- vcpu->arch.pc = 0;
++ vcpu->arch.regs.nip = 0;
+ vcpu->arch.shared->pir = vcpu->vcpu_id;
+ kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
+ kvmppc_set_msr(vcpu, 0);
+@@ -1432,10 +1435,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+ {
+ int i;
+
+- regs->pc = vcpu->arch.pc;
++ regs->pc = vcpu->arch.regs.nip;
+ regs->cr = kvmppc_get_cr(vcpu);
+- regs->ctr = vcpu->arch.ctr;
+- regs->lr = vcpu->arch.lr;
++ regs->ctr = vcpu->arch.regs.ctr;
++ regs->lr = vcpu->arch.regs.link;
+ regs->xer = kvmppc_get_xer(vcpu);
+ regs->msr = vcpu->arch.shared->msr;
+ regs->srr0 = kvmppc_get_srr0(vcpu);
+@@ -1460,10 +1463,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+ {
+ int i;
+
+- vcpu->arch.pc = regs->pc;
++ vcpu->arch.regs.nip = regs->pc;
+ kvmppc_set_cr(vcpu, regs->cr);
+- vcpu->arch.ctr = regs->ctr;
+- vcpu->arch.lr = regs->lr;
++ vcpu->arch.regs.ctr = regs->ctr;
++ vcpu->arch.regs.link = regs->lr;
+ kvmppc_set_xer(vcpu, regs->xer);
+ kvmppc_set_msr(vcpu, regs->msr);
+ kvmppc_set_srr0(vcpu, regs->srr0);
+diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
+index a82f64502de1..d23e582f0fee 100644
+--- a/arch/powerpc/kvm/booke_emulate.c
++++ b/arch/powerpc/kvm/booke_emulate.c
+@@ -34,19 +34,19 @@
+
+ static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
+ {
+- vcpu->arch.pc = vcpu->arch.shared->srr0;
++ vcpu->arch.regs.nip = vcpu->arch.shared->srr0;
+ kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
+ }
+
+ static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu)
+ {
+- vcpu->arch.pc = vcpu->arch.dsrr0;
++ vcpu->arch.regs.nip = vcpu->arch.dsrr0;
+ kvmppc_set_msr(vcpu, vcpu->arch.dsrr1);
+ }
+
+ static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu)
+ {
+- vcpu->arch.pc = vcpu->arch.csrr0;
++ vcpu->arch.regs.nip = vcpu->arch.csrr0;
+ kvmppc_set_msr(vcpu, vcpu->arch.csrr1);
+ }
+
+diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
+index 8f871fb75228..3f8189eb56ed 100644
+--- a/arch/powerpc/kvm/e500_emulate.c
++++ b/arch/powerpc/kvm/e500_emulate.c
+@@ -94,7 +94,7 @@ static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ switch (get_oc(inst)) {
+ case EHPRIV_OC_DEBUG:
+ run->exit_reason = KVM_EXIT_DEBUG;
+- run->debug.arch.address = vcpu->arch.pc;
++ run->debug.arch.address = vcpu->arch.regs.nip;
+ run->debug.arch.status = 0;
+ kvmppc_account_exit(vcpu, DEBUG_EXITS);
+ emulated = EMULATE_EXIT_USER;
+diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
+index ddbf8f0284c0..24296f4cadc6 100644
+--- a/arch/powerpc/kvm/e500_mmu.c
++++ b/arch/powerpc/kvm/e500_mmu.c
+@@ -513,7 +513,7 @@ void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
+ {
+ unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
+
+- kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
++ kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.regs.nip, as);
+ }
+
+ void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
+--
+2.13.7
+
diff --git a/series.conf b/series.conf
index c157b6b10d..28cce9f671 100644
--- a/series.conf
+++ b/series.conf
@@ -16725,6 +16725,7 @@
patches.arch/KVM-PPC-Book3S-Allow-backing-bigger-guest-IOMMU-page.patch
patches.arch/KVM-PPC-Book3S-Check-KVM_CREATE_SPAPR_TCE_64-paramet.patch
patches.arch/KVM-PPC-Add-pt_regs-into-kvm_vcpu_arch-and-move-vcpu.patch
+ patches.arch/KVM-PPC-Move-nip-ctr-lr-xer-registers-to-pt_regs-in-.patch
patches.suse/ipv6-allow-PMTU-exceptions-to-local-routes.patch
patches.suse/net-dsa-add-error-handling-for-pskb_trim_rcsum.patch
patches.drivers/ixgbe-Fix-setting-of-TC-configuration-for-macvlan-ca.patch