Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2018-03-16 14:05:15 +0100
committerJiri Kosina <jkosina@suse.cz>2018-03-16 14:05:15 +0100
commite802267c5ef859be0b7e6830f6ceeaf0fb5231a3 (patch)
tree041d6212ce44a9b6f8d02da43fe4d298e018037b
parentf022555e689cc4494a0b0b1fb2ff24799a70d512 (diff)
parent385e43ed9677af8c3360d5c2f4d7e0c41bee856e (diff)
Merge remote-tracking branch 'origin/users/ptesarik/SLE15/for-next' into SLE15
Pull s390 spectre patches from Petr Tesarik
-rw-r--r--blacklist.conf1
-rw-r--r--config/s390x/default5
-rw-r--r--config/s390x/zfcpdump5
-rw-r--r--patches.arch/s390-add-options-to-change-branch-prediction-behavio.patch293
-rw-r--r--patches.arch/s390-alternative-use-a-copy-of-the-facility-bit-mask.patch129
-rw-r--r--patches.arch/s390-sles15-05-01-gmb.patch57
-rw-r--r--patches.arch/s390-sles15-05-03-scrub-registers.patch108
-rw-r--r--patches.arch/s390-sles15-05-04-array-nospec.patch48
-rw-r--r--patches.arch/s390-sles15-05-05-bpoff-user-space.patch210
-rw-r--r--patches.arch/s390-sles15-05-06-expoline.patch705
-rw-r--r--patches.arch/s390-sles15-05-07-expoline-is-enabled.patch30
-rw-r--r--patches.arch/s390-sles15-05-08-critical-section-bpenter.patch29
-rw-r--r--patches.arch/s390-sles15-05-09-svc-zero-r0.patch53
-rw-r--r--patches.arch/s390-sles15-99-02-nobp.patch227
-rw-r--r--series.conf14
15 files changed, 1685 insertions, 229 deletions
diff --git a/blacklist.conf b/blacklist.conf
index e185685b31..ea1ec7a6ae 100644
--- a/blacklist.conf
+++ b/blacklist.conf
@@ -282,3 +282,4 @@ d6fa71f1c003fb2bc824276bb424a4171f9a717f # peaq-wmi: not present
6d622692836950b3c943776f84c4557ff6c02f3b # rtlwifi: v4.13+
c1e150ceb61e4a585bad156da15c33bfe89f5858 # build fix for NUMA=n
82343484a2d4c97a03bfd81303b5493c65f05c50 # build fix for SPLPAR=n
+dc24b7b49a53c7ee5502c877b133558acec0b3f8 # arch/s390 CFI macros: v4.16+
diff --git a/config/s390x/default b/config/s390x/default
index 56d247887a..c55d8e6208 100644
--- a/config/s390x/default
+++ b/config/s390x/default
@@ -479,6 +479,11 @@ CONFIG_HZ_100=y
CONFIG_HZ=100
CONFIG_SCHED_HRTICK=y
CONFIG_ARCH_RANDOM=y
+# CONFIG_KERNEL_NOBP is not set
+CONFIG_EXPOLINE=y
+# CONFIG_EXPOLINE_OFF is not set
+# CONFIG_EXPOLINE_MEDIUM is not set
+CONFIG_EXPOLINE_FULL=y
#
# Memory setup
diff --git a/config/s390x/zfcpdump b/config/s390x/zfcpdump
index 1405ba4061..95949a7abf 100644
--- a/config/s390x/zfcpdump
+++ b/config/s390x/zfcpdump
@@ -379,6 +379,11 @@ CONFIG_HZ_250=y
CONFIG_HZ=250
# CONFIG_SCHED_HRTICK is not set
CONFIG_ARCH_RANDOM=y
+# CONFIG_KERNEL_NOBP is not set
+CONFIG_EXPOLINE=y
+# CONFIG_EXPOLINE_OFF is not set
+# CONFIG_EXPOLINE_MEDIUM is not set
+CONFIG_EXPOLINE_FULL=y
#
# Memory setup
diff --git a/patches.arch/s390-add-options-to-change-branch-prediction-behavio.patch b/patches.arch/s390-add-options-to-change-branch-prediction-behavio.patch
new file mode 100644
index 0000000000..fbdaab9b53
--- /dev/null
+++ b/patches.arch/s390-add-options-to-change-branch-prediction-behavio.patch
@@ -0,0 +1,293 @@
+From d768bd892fc8f066cd3aa000eb1867bcf32db0ee Mon Sep 17 00:00:00 2001
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Tue, 16 Jan 2018 07:11:45 +0100
+Subject: s390: add options to change branch prediction behaviour for the
+ kernel
+References: LTC#164304, bsc#1068032, bsc#1084911
+Git-commit: d768bd892fc8f066cd3aa000eb1867bcf32db0ee
+Patch-mainline: v4.16-rc1
+
+Add the PPA instruction to the system entry and exit path to switch
+the kernel to a different branch prediction behaviour. The instructions
+are added via CPU alternatives and can be disabled with the "nospec"
+or the "nobp=0" kernel parameter. If the default behaviour selected
+with CONFIG_KERNEL_NOBP is set to "n" then the "nobp=1" parameter can be
+used to enable the changed kernel branch prediction.
+
+Acked-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Petr Tesarik <ptesarik@suse.com>
+
+---
+ arch/s390/Kconfig | 17 ++++++++++++++
+ arch/s390/include/asm/processor.h | 1 +
+ arch/s390/kernel/alternative.c | 23 +++++++++++++++++++
+ arch/s390/kernel/early.c | 2 ++
+ arch/s390/kernel/entry.S | 48 +++++++++++++++++++++++++++++++++++++++
+ arch/s390/kernel/ipl.c | 1 +
+ arch/s390/kernel/smp.c | 2 ++
+ 7 files changed, 94 insertions(+)
+
+diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
+index 0105ce28e246..d514e25095c2 100644
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -540,6 +540,23 @@ config ARCH_RANDOM
+
+ If unsure, say Y.
+
++config KERNEL_NOBP
++ def_bool n
++ prompt "Enable modified branch prediction for the kernel by default"
++ help
++ If this option is selected the kernel will switch to a modified
++ branch prediction mode if the firmware interface is available.
++ The modified branch prediction mode improves the behaviour in
++ regard to speculative execution.
++
++ With the option enabled the kernel parameter "nobp=0" or "nospec"
++ can be used to run the kernel in the normal branch prediction mode.
++
++ With the option disabled the modified branch prediction mode is
++ enabled with the "nobp=1" kernel parameter.
++
++ If unsure, say N.
++
+ endmenu
+
+ menu "Memory setup"
+diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
+index bfbfad482289..5f37f9ceef5e 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -91,6 +91,7 @@ void cpu_detect_mhz_feature(void);
+ extern const struct seq_operations cpuinfo_op;
+ extern int sysctl_ieee_emulation_warnings;
+ extern void execve_tail(void);
++extern void __bpon(void);
+
+ /*
+ * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
+diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
+index 1abf4f35d059..22476135f738 100644
+--- a/arch/s390/kernel/alternative.c
++++ b/arch/s390/kernel/alternative.c
+@@ -15,6 +15,29 @@ static int __init disable_alternative_instructions(char *str)
+
+ early_param("noaltinstr", disable_alternative_instructions);
+
++static int __init nobp_setup_early(char *str)
++{
++ bool enabled;
++ int rc;
++
++ rc = kstrtobool(str, &enabled);
++ if (rc)
++ return rc;
++ if (enabled && test_facility(82))
++ __set_facility(82, S390_lowcore.alt_stfle_fac_list);
++ else
++ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
++ return 0;
++}
++early_param("nobp", nobp_setup_early);
++
++static int __init nospec_setup_early(char *str)
++{
++ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
++ return 0;
++}
++early_param("nospec", nospec_setup_early);
++
+ struct brcl_insn {
+ u16 opc;
+ s32 disp;
+diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
+index 510f2183a7e7..ac707a9f729e 100644
+--- a/arch/s390/kernel/early.c
++++ b/arch/s390/kernel/early.c
+@@ -196,6 +196,8 @@ static noinline __init void setup_facility_list(void)
+ memcpy(S390_lowcore.alt_stfle_fac_list,
+ S390_lowcore.stfle_fac_list,
+ sizeof(S390_lowcore.alt_stfle_fac_list));
++ if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
++ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ }
+
+ static __init void detect_diag9c(void)
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index 5d87eda605f2..e6d7550a3af8 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -159,6 +159,34 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
+ tm off+\addr, \mask
+ .endm
+
++ .macro BPOFF
++ .pushsection .altinstr_replacement, "ax"
++660: .long 0xb2e8c000
++ .popsection
++661: .long 0x47000000
++ .pushsection .altinstructions, "a"
++ .long 661b - .
++ .long 660b - .
++ .word 82
++ .byte 4
++ .byte 4
++ .popsection
++ .endm
++
++ .macro BPON
++ .pushsection .altinstr_replacement, "ax"
++662: .long 0xb2e8d000
++ .popsection
++663: .long 0x47000000
++ .pushsection .altinstructions, "a"
++ .long 663b - .
++ .long 662b - .
++ .word 82
++ .byte 4
++ .byte 4
++ .popsection
++ .endm
++
+ .section .kprobes.text, "ax"
+ .Ldummy:
+ /*
+@@ -171,6 +199,11 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCALL_RESTART)
+ */
+ nop 0
+
++ENTRY(__bpon)
++ .globl __bpon
++ BPON
++ br %r14
++
+ /*
+ * Scheduler resume function, called by switch_to
+ * gpr2 = (task_struct *) prev
+@@ -226,8 +259,11 @@ ENTRY(sie64a)
+ jnz .Lsie_skip
+ TSTMSK __LC_CPU_FLAGS,_CIF_FPU
+ jo .Lsie_skip # exit if fp/vx regs changed
++ BPON
+ .Lsie_entry:
+ sie 0(%r14)
++.Lsie_exit:
++ BPOFF
+ .Lsie_skip:
+ ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+@@ -279,6 +315,7 @@ ENTRY(system_call)
+ stpt __LC_SYNC_ENTER_TIMER
+ .Lsysc_stmg:
+ stmg %r8,%r15,__LC_SAVE_AREA_SYNC
++ BPOFF
+ lg %r12,__LC_CURRENT
+ lghi %r13,__TASK_thread
+ lghi %r14,_PIF_SYSCALL
+@@ -325,6 +362,7 @@ ENTRY(system_call)
+ jnz .Lsysc_work # check for work
+ TSTMSK __LC_CPU_FLAGS,_CIF_WORK
+ jnz .Lsysc_work
++ BPON
+ .Lsysc_restore:
+ lg %r14,__LC_VDSO_PER_CPU
+ lmg %r0,%r10,__PT_R0(%r11)
+@@ -530,6 +568,7 @@ ENTRY(kernel_thread_starter)
+
+ ENTRY(pgm_check_handler)
+ stpt __LC_SYNC_ENTER_TIMER
++ BPOFF
+ stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ lg %r10,__LC_LAST_BREAK
+ lg %r12,__LC_CURRENT
+@@ -637,6 +676,7 @@ ENTRY(pgm_check_handler)
+ ENTRY(io_int_handler)
+ STCK __LC_INT_CLOCK
+ stpt __LC_ASYNC_ENTER_TIMER
++ BPOFF
+ stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
+ lg %r12,__LC_CURRENT
+ larl %r13,cleanup_critical
+@@ -687,9 +727,13 @@ ENTRY(io_int_handler)
+ lg %r14,__LC_VDSO_PER_CPU
+ lmg %r0,%r10,__PT_R0(%r11)
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
++ tm __PT_PSW+1(%r11),0x01 # returning to user ?
++ jno .Lio_exit_kernel
++ BPON
+ .Lio_exit_timer:
+ stpt __LC_EXIT_TIMER
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
++.Lio_exit_kernel:
+ lmg %r11,%r15,__PT_R11(%r11)
+ lpswe __LC_RETURN_PSW
+ .Lio_done:
+@@ -860,6 +904,7 @@ ENTRY(io_int_handler)
+ ENTRY(ext_int_handler)
+ STCK __LC_INT_CLOCK
+ stpt __LC_ASYNC_ENTER_TIMER
++ BPOFF
+ stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
+ lg %r12,__LC_CURRENT
+ larl %r13,cleanup_critical
+@@ -908,6 +953,7 @@ ENTRY(psw_idle)
+ .Lpsw_idle_stcctm:
+ #endif
+ oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
++ BPON
+ STCK __CLOCK_IDLE_ENTER(%r2)
+ stpt __TIMER_IDLE_ENTER(%r2)
+ .Lpsw_idle_lpsw:
+@@ -1008,6 +1054,7 @@ load_fpu_regs:
+ */
+ ENTRY(mcck_int_handler)
+ STCK __LC_MCCK_CLOCK
++ BPOFF
+ la %r1,4095 # revalidate r1
+ spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
+ lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
+@@ -1118,6 +1165,7 @@ ENTRY(mcck_int_handler)
+ mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
+ tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
+ jno 0f
++ BPON
+ stpt __LC_EXIT_TIMER
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ 0: lmg %r11,%r15,__PT_R11(%r11)
+diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
+index da5cc3b469aa..34477c1aee6d 100644
+--- a/arch/s390/kernel/ipl.c
++++ b/arch/s390/kernel/ipl.c
+@@ -543,6 +543,7 @@ static struct kset *ipl_kset;
+
+ static void __ipl_run(void *unused)
+ {
++ __bpon();
+ diag308(DIAG308_LOAD_CLEAR, NULL);
+ if (MACHINE_IS_VM)
+ __cpcmd("IPL", NULL, 0, NULL);
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index fc28c9571647..2fd7d609dae0 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -319,6 +319,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
+ mem_assign_absolute(lc->restart_fn, (unsigned long) func);
+ mem_assign_absolute(lc->restart_data, (unsigned long) data);
+ mem_assign_absolute(lc->restart_source, source_cpu);
++ __bpon();
+ asm volatile(
+ "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
+ " brc 2,0b # busy, try again\n"
+@@ -903,6 +904,7 @@ void __cpu_die(unsigned int cpu)
+ void __noreturn cpu_die(void)
+ {
+ idle_task_exit();
++ __bpon();
+ pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
+ for (;;) ;
+ }
+--
+2.13.6
+
diff --git a/patches.arch/s390-alternative-use-a-copy-of-the-facility-bit-mask.patch b/patches.arch/s390-alternative-use-a-copy-of-the-facility-bit-mask.patch
new file mode 100644
index 0000000000..eb777b0232
--- /dev/null
+++ b/patches.arch/s390-alternative-use-a-copy-of-the-facility-bit-mask.patch
@@ -0,0 +1,129 @@
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Tue, 16 Jan 2018 07:03:44 +0100
+Subject: s390/alternative: use a copy of the facility bit mask
+Git-commit: cf1489984641369611556bf00c48f945c77bcf02
+Patch-mainline: v4.16-rc1
+References: LTC#164304, bsc#1084911
+
+To be able to switch off specific CPU alternatives with kernel parameters
+make a copy of the facility bit mask provided by STFLE and use the copy
+for the decision to apply an alternative.
+
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Petr Tesarik <ptesarik@suse.com>
+
+---
+ arch/s390/include/asm/facility.h | 18 ++++++++++++++++++
+ arch/s390/include/asm/lowcore.h | 3 ++-
+ arch/s390/kernel/alternative.c | 3 ++-
+ arch/s390/kernel/early.c | 3 +++
+ arch/s390/kernel/setup.c | 4 +++-
+ arch/s390/kernel/smp.c | 4 +++-
+ 6 files changed, 31 insertions(+), 4 deletions(-)
+
+diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
+index fbe0c4be3cd8..99c8ce30b3cd 100644
+--- a/arch/s390/include/asm/facility.h
++++ b/arch/s390/include/asm/facility.h
+@@ -15,6 +15,24 @@
+
+ #define MAX_FACILITY_BIT (sizeof(((struct lowcore *)0)->stfle_fac_list) * 8)
+
++static inline void __set_facility(unsigned long nr, void *facilities)
++{
++ unsigned char *ptr = (unsigned char *) facilities;
++
++ if (nr >= MAX_FACILITY_BIT)
++ return;
++ ptr[nr >> 3] |= 0x80 >> (nr & 7);
++}
++
++static inline void __clear_facility(unsigned long nr, void *facilities)
++{
++ unsigned char *ptr = (unsigned char *) facilities;
++
++ if (nr >= MAX_FACILITY_BIT)
++ return;
++ ptr[nr >> 3] &= ~(0x80 >> (nr & 7));
++}
++
+ static inline int __test_facility(unsigned long nr, void *facilities)
+ {
+ unsigned char *ptr;
+diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
+index ec6592e8ba36..c63986aee942 100644
+--- a/arch/s390/include/asm/lowcore.h
++++ b/arch/s390/include/asm/lowcore.h
+@@ -151,7 +151,8 @@ struct lowcore {
+ __u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */
+
+ /* Extended facility list */
+- __u64 stfle_fac_list[32]; /* 0x0f00 */
++ __u64 stfle_fac_list[16]; /* 0x0f00 */
++ __u64 alt_stfle_fac_list[16]; /* 0x0f80 */
+ __u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
+
+ /* Pointer to the machine check extended save area */
+diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
+index 574e77622c04..1abf4f35d059 100644
+--- a/arch/s390/kernel/alternative.c
++++ b/arch/s390/kernel/alternative.c
+@@ -75,7 +75,8 @@ static void __init_or_module __apply_alternatives(struct alt_instr *start,
+ instr = (u8 *)&a->instr_offset + a->instr_offset;
+ replacement = (u8 *)&a->repl_offset + a->repl_offset;
+
+- if (!test_facility(a->facility))
++ if (!__test_facility(a->facility,
++ S390_lowcore.alt_stfle_fac_list))
+ continue;
+
+ if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
+diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
+index 497a92047591..510f2183a7e7 100644
+--- a/arch/s390/kernel/early.c
++++ b/arch/s390/kernel/early.c
+@@ -193,6 +193,9 @@ static noinline __init void setup_facility_list(void)
+ {
+ stfle(S390_lowcore.stfle_fac_list,
+ ARRAY_SIZE(S390_lowcore.stfle_fac_list));
++ memcpy(S390_lowcore.alt_stfle_fac_list,
++ S390_lowcore.stfle_fac_list,
++ sizeof(S390_lowcore.alt_stfle_fac_list));
+ }
+
+ static __init void detect_diag9c(void)
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index 793da97f9a6e..bcd2a4a3937e 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -340,7 +340,9 @@ static void __init setup_lowcore(void)
+ lc->preempt_count = S390_lowcore.preempt_count;
+ lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
+ memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
+- MAX_FACILITY_BIT/8);
++ sizeof(lc->stfle_fac_list));
++ memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
++ sizeof(lc->alt_stfle_fac_list));
+ if (MACHINE_HAS_VX || MACHINE_HAS_GS) {
+ unsigned long bits, size;
+
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index a919b2f0141d..fc28c9571647 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -266,7 +266,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
+ __ctl_store(lc->cregs_save_area, 0, 15);
+ save_access_regs((unsigned int *) lc->access_regs_save_area);
+ memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
+- MAX_FACILITY_BIT/8);
++ sizeof(lc->stfle_fac_list));
++ memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
++ sizeof(lc->alt_stfle_fac_list));
+ }
+
+ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
+--
+2.13.6
+
diff --git a/patches.arch/s390-sles15-05-01-gmb.patch b/patches.arch/s390-sles15-05-01-gmb.patch
new file mode 100644
index 0000000000..aa135e8da7
--- /dev/null
+++ b/patches.arch/s390-sles15-05-01-gmb.patch
@@ -0,0 +1,57 @@
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Subject: s390/spinlock: add gmb memory barrier
+Patch-mainline: Not yet, under development
+References: LTC#164304, bsc#1084911
+
+Add gmb memory barrier
+
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+
+---
+ arch/s390/include/asm/barrier.h | 10 ++++++++++
+ arch/s390/kernel/alternative.c | 7 +++++++
+ 2 files changed, 17 insertions(+)
+
+--- a/arch/s390/include/asm/barrier.h
++++ b/arch/s390/include/asm/barrier.h
+@@ -7,6 +7,8 @@
+ #ifndef __ASM_BARRIER_H
+ #define __ASM_BARRIER_H
+
++#include <asm/alternative.h>
++
+ /*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+@@ -22,6 +24,14 @@
+
+ #define mb() do { asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
+
++static inline void gmb(void)
++{
++ asm volatile(
++ ALTERNATIVE("", ".long 0xb2e8f000", 81)
++ : : : "memory");
++}
++#define gmb gmb
++
+ #define rmb() barrier()
+ #define wmb() barrier()
+ #define dma_rmb() mb()
+--- a/arch/s390/kernel/alternative.c
++++ b/arch/s390/kernel/alternative.c
+@@ -37,6 +37,13 @@ static int __init nospec_setup_early(cha
+ }
+ early_param("nospec", nospec_setup_early);
+
++static int __init nogmb_setup_early(char *str)
++{
++ __clear_facility(81, S390_lowcore.alt_stfle_fac_list);
++ return 0;
++}
++early_param("nogmb", nogmb_setup_early);
++
+ struct brcl_insn {
+ u16 opc;
+ s32 disp;
diff --git a/patches.arch/s390-sles15-05-03-scrub-registers.patch b/patches.arch/s390-sles15-05-03-scrub-registers.patch
new file mode 100644
index 0000000000..1fd304f71f
--- /dev/null
+++ b/patches.arch/s390-sles15-05-03-scrub-registers.patch
@@ -0,0 +1,108 @@
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Subject: s390: scrub registers on kernel entry and KVM exit
+Patch-mainline: v4.16-rc1
+Git-commit: 7041d28115e91f2144f811ffe8a195c696b1e1d0
+References: LTC#164304, bsc#1084911
+
+Clear all user space registers on entry to the kernel and all KVM guest
+registers on KVM guest exit if the register does not contain either a
+parameter or a result value.
+
+Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/kernel/entry.S | 47 +++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 47 insertions(+)
+
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -282,6 +282,12 @@ ENTRY(sie64a)
+ sie_exit:
+ lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
+ stmg %r0,%r13,0(%r14) # save guest gprs 0-13
++ xgr %r0,%r0 # clear guest registers to
++ xgr %r1,%r1 # prevent speculative use
++ xgr %r2,%r2
++ xgr %r3,%r3
++ xgr %r4,%r4
++ xgr %r5,%r5
+ lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
+ lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
+ br %r14
+@@ -317,6 +323,8 @@ ENTRY(system_call)
+ .Lsysc_vtime:
+ UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
+ stmg %r0,%r7,__PT_R0(%r11)
++ # clear user controlled register to prevent speculative use
++ xgr %r0,%r0
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+ mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
+ mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
+@@ -587,6 +595,15 @@ ENTRY(pgm_check_handler)
+ 3: stg %r10,__THREAD_last_break(%r14)
+ 4: la %r11,STACK_FRAME_OVERHEAD(%r15)
+ stmg %r0,%r7,__PT_R0(%r11)
++ # clear user controlled registers to prevent speculative use
++ xgr %r0,%r0
++ xgr %r1,%r1
++ xgr %r2,%r2
++ xgr %r3,%r3
++ xgr %r4,%r4
++ xgr %r5,%r5
++ xgr %r6,%r6
++ xgr %r7,%r7
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+ stmg %r8,%r9,__PT_PSW(%r11)
+ mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
+@@ -653,6 +670,16 @@ ENTRY(io_int_handler)
+ lmg %r8,%r9,__LC_IO_OLD_PSW
+ SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
+ stmg %r0,%r7,__PT_R0(%r11)
++ # clear user controlled registers to prevent speculative use
++ xgr %r0,%r0
++ xgr %r1,%r1
++ xgr %r2,%r2
++ xgr %r3,%r3
++ xgr %r4,%r4
++ xgr %r5,%r5
++ xgr %r6,%r6
++ xgr %r7,%r7
++ xgr %r10,%r10
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+ stmg %r8,%r9,__PT_PSW(%r11)
+ mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
+@@ -863,6 +890,16 @@ ENTRY(ext_int_handler)
+ lmg %r8,%r9,__LC_EXT_OLD_PSW
+ SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
+ stmg %r0,%r7,__PT_R0(%r11)
++ # clear user controlled registers to prevent speculative use
++ xgr %r0,%r0
++ xgr %r1,%r1
++ xgr %r2,%r2
++ xgr %r3,%r3
++ xgr %r4,%r4
++ xgr %r5,%r5
++ xgr %r6,%r6
++ xgr %r7,%r7
++ xgr %r10,%r10
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
+ stmg %r8,%r9,__PT_PSW(%r11)
+ lghi %r1,__LC_EXT_PARAMS2
+@@ -1029,6 +1066,16 @@ ENTRY(mcck_int_handler)
+ .Lmcck_skip:
+ lghi %r14,__LC_GPREGS_SAVE_AREA+64
+ stmg %r0,%r7,__PT_R0(%r11)
++ # clear user controlled registers to prevent speculative use
++ xgr %r0,%r0
++ xgr %r1,%r1
++ xgr %r2,%r2
++ xgr %r3,%r3
++ xgr %r4,%r4
++ xgr %r5,%r5
++ xgr %r6,%r6
++ xgr %r7,%r7
++ xgr %r10,%r10
+ mvc __PT_R8(64,%r11),0(%r14)
+ stmg %r8,%r9,__PT_PSW(%r11)
+ xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
diff --git a/patches.arch/s390-sles15-05-04-array-nospec.patch b/patches.arch/s390-sles15-05-04-array-nospec.patch
new file mode 100644
index 0000000000..acf765ae8e
--- /dev/null
+++ b/patches.arch/s390-sles15-05-04-array-nospec.patch
@@ -0,0 +1,48 @@
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Subject: s390: add optimized array_index_mask_nospec
+Patch-mainline: v4.16-rc1
+Git-commit: e2dd833389cc4069a96b57bdd24227b5f52288f5
+References: LTC#164304, bsc#1084911
+
+Add an optimized version of the array_index_mask_nospec function for
+s390 based on a compare and a subtract with borrow.
+
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/barrier.h | 24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+--- a/arch/s390/include/asm/barrier.h
++++ b/arch/s390/include/asm/barrier.h
+@@ -48,6 +48,30 @@ do { \
+ #define __smp_mb__before_atomic() barrier()
+ #define __smp_mb__after_atomic() barrier()
+
++/**
++ * array_index_mask_nospec - generate a mask for array_idx() that is
++ * ~0UL when the bounds check succeeds and 0 otherwise
++ * @index: array element index
++ * @size: number of elements in array
++ */
++#define array_index_mask_nospec array_index_mask_nospec
++static inline unsigned long array_index_mask_nospec(unsigned long index,
++ unsigned long size)
++{
++ unsigned long mask;
++
++ if (__builtin_constant_p(size) && size > 0) {
++ asm(" clgr %2,%1\n"
++ " slbgr %0,%0\n"
++ :"=d" (mask) : "d" (size-1), "d" (index) :"cc");
++ return mask;
++ }
++ asm(" clgr %1,%2\n"
++ " slbgr %0,%0\n"
++ :"=d" (mask) : "d" (size), "d" (index) :"cc");
++ return ~mask;
++}
++
+ #include <asm-generic/barrier.h>
+
+ #endif /* __ASM_BARRIER_H */
diff --git a/patches.arch/s390-sles15-05-05-bpoff-user-space.patch b/patches.arch/s390-sles15-05-05-bpoff-user-space.patch
new file mode 100644
index 0000000000..a5109a3e0b
--- /dev/null
+++ b/patches.arch/s390-sles15-05-05-bpoff-user-space.patch
@@ -0,0 +1,210 @@
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Subject: s390: run user space and KVM guests with modified branch prediction
+Patch-mainline: v4.16-rc1
+Git-commit: 6b73044b2b0081ee3dd1cd6eaab7dee552601efb
+References: LTC#164304, bsc#1084911
+
+Define TIF_ISOLATE_BP and TIF_ISOLATE_BP_GUEST and add the necessary
+plumbing in entry.S to be able to run user space and KVM guests with
+limited branch prediction.
+
+To switch a user space process to limited branch prediction the
+s390_isolate_bp() function has to be call, and to run a vCPU of a KVM
+guest associated with the current task with limited branch prediction
+call s390_isolate_bp_guest().
+
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/processor.h | 3 ++
+ arch/s390/include/asm/thread_info.h | 4 ++
+ arch/s390/kernel/entry.S | 51 ++++++++++++++++++++++++++++++++----
+ arch/s390/kernel/processor.c | 18 ++++++++++++
+ 4 files changed, 71 insertions(+), 5 deletions(-)
+
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -377,6 +377,9 @@ extern void memcpy_absolute(void *, void
+ memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
+ } while (0)
+
++extern int s390_isolate_bp(void);
++extern int s390_isolate_bp_guest(void);
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif /* __ASM_S390_PROCESSOR_H */
+--- a/arch/s390/include/asm/thread_info.h
++++ b/arch/s390/include/asm/thread_info.h
+@@ -58,6 +58,8 @@ int arch_dup_task_struct(struct task_str
+ #define TIF_GUARDED_STORAGE 4 /* load guarded storage control block */
+ #define TIF_PATCH_PENDING 5 /* pending live patching update */
+ #define TIF_PGSTE 6 /* New mm's will use 4K page tables */
++#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
++#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
+
+ #define TIF_31BIT 16 /* 32bit process */
+ #define TIF_MEMDIE 17 /* is terminating due to OOM killer */
+@@ -78,6 +80,8 @@ int arch_dup_task_struct(struct task_str
+ #define _TIF_UPROBE _BITUL(TIF_UPROBE)
+ #define _TIF_GUARDED_STORAGE _BITUL(TIF_GUARDED_STORAGE)
+ #define _TIF_PATCH_PENDING _BITUL(TIF_PATCH_PENDING)
++#define _TIF_ISOLATE_BP _BITUL(TIF_ISOLATE_BP)
++#define _TIF_ISOLATE_BP_GUEST _BITUL(TIF_ISOLATE_BP_GUEST)
+
+ #define _TIF_31BIT _BITUL(TIF_31BIT)
+ #define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP)
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -105,6 +105,7 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCAL
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ j 3f
+ 1: UPDATE_VTIME %r14,%r15,\timer
++ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+ 2: lg %r15,__LC_ASYNC_STACK # load async stack
+ 3: la %r11,STACK_FRAME_OVERHEAD(%r15)
+ .endm
+@@ -185,6 +186,40 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCAL
+ .popsection
+ .endm
+
++ .macro BPENTER tif_ptr,tif_mask
++ .pushsection .altinstr_replacement, "ax"
++662: .word 0xc004, 0x0000, 0x0000 # 6 byte nop
++ .word 0xc004, 0x0000, 0x0000 # 6 byte nop
++ .popsection
++664: TSTMSK \tif_ptr,\tif_mask
++ jz . + 8
++ .long 0xb2e8d000
++ .pushsection .altinstructions, "a"
++ .long 664b - .
++ .long 662b - .
++ .word 82
++ .byte 12
++ .byte 12
++ .popsection
++ .endm
++
++ .macro BPEXIT tif_ptr,tif_mask
++ TSTMSK \tif_ptr,\tif_mask
++ .pushsection .altinstr_replacement, "ax"
++662: jnz . + 8
++ .long 0xb2e8d000
++ .popsection
++664: jz . + 8
++ .long 0xb2e8c000
++ .pushsection .altinstructions, "a"
++ .long 664b - .
++ .long 662b - .
++ .word 82
++ .byte 8
++ .byte 8
++ .popsection
++ .endm
++
+ .section .kprobes.text, "ax"
+ .Ldummy:
+ /*
+@@ -239,9 +274,11 @@ ENTRY(__switch_to)
+ */
+ ENTRY(sie64a)
+ stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
++ lg %r12,__LC_CURRENT
+ stg %r2,__SF_EMPTY(%r15) # save control block pointer
+ stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
+ xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
++ mvc __SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags
+ TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
+ jno .Lsie_load_guest_gprs
+ brasl %r14,load_fpu_regs # load guest fp/vx regs
+@@ -258,11 +295,12 @@ ENTRY(sie64a)
+ jnz .Lsie_skip
+ TSTMSK __LC_CPU_FLAGS,_CIF_FPU
+ jo .Lsie_skip # exit if fp/vx regs changed
+- BPON
++ BPEXIT __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+ .Lsie_entry:
+ sie 0(%r14)
+ .Lsie_exit:
+ BPOFF
++ BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
+ .Lsie_skip:
+ ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+@@ -322,6 +360,7 @@ ENTRY(system_call)
+ la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
+ .Lsysc_vtime:
+ UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
++ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+ stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled register to prevent speculative use
+ xgr %r0,%r0
+@@ -360,7 +399,7 @@ ENTRY(system_call)
+ jnz .Lsysc_work # check for work
+ TSTMSK __LC_CPU_FLAGS,_CIF_WORK
+ jnz .Lsysc_work
+- BPON
++ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
+ .Lsysc_restore:
+ lg %r14,__LC_VDSO_PER_CPU
+ lmg %r0,%r10,__PT_R0(%r11)
+@@ -585,6 +624,7 @@ ENTRY(pgm_check_handler)
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+ j 4f
+ 2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
++ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+ lg %r15,__LC_KERNEL_STACK
+ lgr %r14,%r12
+ aghi %r14,__TASK_thread # pointer to thread_struct
+@@ -716,7 +756,7 @@ ENTRY(io_int_handler)
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
+ tm __PT_PSW+1(%r11),0x01 # returning to user ?
+ jno .Lio_exit_kernel
+- BPON
++ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
+ .Lio_exit_timer:
+ stpt __LC_EXIT_TIMER
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+@@ -1101,7 +1141,7 @@ ENTRY(mcck_int_handler)
+ mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
+ tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
+ jno 0f
+- BPON
++ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
+ stpt __LC_EXIT_TIMER
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ 0: lmg %r11,%r15,__PT_R11(%r11)
+@@ -1228,7 +1268,8 @@ cleanup_critical:
+ clg %r9,BASED(.Lsie_crit_mcck_length)
+ jh 1f
+ oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
+-1: lg %r9,__SF_EMPTY(%r15) # get control block pointer
++1: BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
++ lg %r9,__SF_EMPTY(%r15) # get control block pointer
+ ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+ larl %r9,sie_exit # skip forward to sie_exit
+--- a/arch/s390/kernel/processor.c
++++ b/arch/s390/kernel/processor.c
+@@ -196,3 +196,21 @@ const struct seq_operations cpuinfo_op =
+ .stop = c_stop,
+ .show = show_cpuinfo,
+ };
++
++int s390_isolate_bp(void)
++{
++ if (!test_facility(82))
++ return -EOPNOTSUPP;
++ set_thread_flag(TIF_ISOLATE_BP);
++ return 0;
++}
++EXPORT_SYMBOL(s390_isolate_bp);
++
++int s390_isolate_bp_guest(void)
++{
++ if (!test_facility(82))
++ return -EOPNOTSUPP;
++ set_thread_flag(TIF_ISOLATE_BP_GUEST);
++ return 0;
++}
++EXPORT_SYMBOL(s390_isolate_bp_guest);
diff --git a/patches.arch/s390-sles15-05-06-expoline.patch b/patches.arch/s390-sles15-05-06-expoline.patch
new file mode 100644
index 0000000000..e770103615
--- /dev/null
+++ b/patches.arch/s390-sles15-05-06-expoline.patch
@@ -0,0 +1,705 @@
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Subject: s390: introduce execute-trampolines for branches
+Patch-mainline: v4.16-rc1
+Git-commit: f19fbd5ed642dc31c809596412dab1ed56f2f156
+References: LTC#164304, bsc#1084911
+
+Add CONFIG_EXPOLINE to enable the use of the new -mindirect-branch= and
+-mfunction_return= compiler options to create a kernel fortified against
+the specte v2 attack.
+
+With CONFIG_EXPOLINE=y all indirect branches will be issued with an
+execute type instruction. For z10 or newer the EXRL instruction will
+be used, for older machines the EX instruction. The typical indirect
+call
+
+ basr %r14,%r1
+
+is replaced with a PC relative call to a new thunk
+
+ brasl %r14,__s390x_indirect_jump_r1
+
+The thunk contains the EXRL/EX instruction to the indirect branch
+
+__s390x_indirect_jump_r1:
+ exrl 0,0f
+ j .
+0: br %r1
+
+The detour via the execute type instruction has a performance impact.
+To get rid of the detour the new kernel parameter "nospectre_v2" and
+"spectre_v2=[on,off,auto]" can be used. If the parameter is specified
+the kernel and module code will be patched at runtime.
+
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/Kconfig | 28 ++++++++
+ arch/s390/Makefile | 10 +++
+ arch/s390/include/asm/lowcore.h | 6 +
+ arch/s390/include/asm/nospec-branch.h | 18 +++++
+ arch/s390/kernel/Makefile | 4 +
+ arch/s390/kernel/entry.S | 112 ++++++++++++++++++++++++++--------
+ arch/s390/kernel/module.c | 63 ++++++++++++++++---
+ arch/s390/kernel/nospec-branch.c | 100 ++++++++++++++++++++++++++++++
+ arch/s390/kernel/setup.c | 4 +
+ arch/s390/kernel/smp.c | 1
+ arch/s390/kernel/vmlinux.lds.S | 14 ++++
+ drivers/s390/char/Makefile | 2
+ 12 files changed, 328 insertions(+), 34 deletions(-)
+ create mode 100644 arch/s390/include/asm/nospec-branch.h
+ create mode 100644 arch/s390/kernel/nospec-branch.c
+
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -536,6 +536,34 @@ config KERNEL_NOBP
+
+ If unsure, say N.
+
++config EXPOLINE
++ def_bool n
++ prompt "Avoid speculative indirect branches in the kernel"
++ help
++ Compile the kernel with the expoline compiler options to guard
++ against kernel-to-user data leaks by avoiding speculative indirect
++ branches.
++ Requires a compiler with -mindirect-branch=thunk support for full
++ protection. The kernel may run slower.
++
++ If unsure, say N.
++
++choice
++ prompt "Expoline default"
++ depends on EXPOLINE
++ default EXPOLINE_FULL
++
++config EXPOLINE_OFF
++ bool "spectre_v2=off"
++
++config EXPOLINE_MEDIUM
++ bool "spectre_v2=auto"
++
++config EXPOLINE_FULL
++ bool "spectre_v2=on"
++
++endchoice
++
+ endmenu
+
+ menu "Memory setup"
+--- a/arch/s390/Makefile
++++ b/arch/s390/Makefile
+@@ -79,6 +79,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamic
+ cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
+ endif
+
++ifdef CONFIG_EXPOLINE
++ ifeq ($(call cc-option-yn,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),y)
++ CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
++ CC_FLAGS_EXPOLINE += -mfunction-return=thunk
++ CC_FLAGS_EXPOLINE += -mindirect-branch-table
++ export CC_FLAGS_EXPOLINE
++ cflags-y += $(CC_FLAGS_EXPOLINE)
++ endif
++endif
++
+ ifdef CONFIG_FUNCTION_TRACER
+ # make use of hotpatch feature if the compiler supports it
+ cc_hotpatch := -mhotpatch=0,3
+--- a/arch/s390/include/asm/lowcore.h
++++ b/arch/s390/include/asm/lowcore.h
+@@ -139,7 +139,11 @@ struct lowcore {
+ /* Per cpu primary space access list */
+ __u32 paste[16]; /* 0x0400 */
+
+- __u8 pad_0x04c0[0x0e00-0x0440]; /* 0x0440 */
++ __u8 pad_0x04c0[0x0500-0x0440]; /* 0x0440 */
++
++ /* br %r1 trampoline */
++ __u16 br_r1_trampoline; /* 0x0500 */
++ __u8 pad_0x0502[0x0e00-0x0502]; /* 0x0502 */
+
+ /*
+ * 0xe00 contains the address of the IPL Parameter Information
+--- /dev/null
++++ b/arch/s390/include/asm/nospec-branch.h
+@@ -0,0 +1,18 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_S390_EXPOLINE_H
++#define _ASM_S390_EXPOLINE_H
++
++#ifndef __ASSEMBLY__
++
++#include <linux/types.h>
++
++extern int nospec_call_disable;
++extern int nospec_return_disable;
++
++void nospec_init_branches(void);
++void nospec_call_revert(s32 *start, s32 *end);
++void nospec_return_revert(s32 *start, s32 *end);
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* _ASM_S390_EXPOLINE_H */
+--- a/arch/s390/kernel/Makefile
++++ b/arch/s390/kernel/Makefile
+@@ -28,6 +28,7 @@ UBSAN_SANITIZE_early.o := n
+ #
+ ifneq ($(CC_FLAGS_MARCH),-march=z900)
+ CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
++CFLAGS_REMOVE_als.o += $(CC_FLAGS_EXPOLINE)
+ CFLAGS_als.o += -march=z900
+ AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
+ AFLAGS_head.o += -march=z900
+@@ -60,6 +61,9 @@ obj-y += entry.o reipl.o relocate_kernel
+
+ extra-y += head.o head64.o vmlinux.lds
+
++obj-$(CONFIG_EXPOLINE) += nospec-branch.o
++CFLAGS_REMOVE_expoline.o += $(CC_FLAGS_EXPOLINE)
++
+ obj-$(CONFIG_MODULES) += module.o
+ obj-$(CONFIG_SMP) += smp.o
+ obj-$(CONFIG_SCHED_TOPOLOGY) += topology.o
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -220,6 +220,68 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCAL
+ .popsection
+ .endm
+
++#ifdef CONFIG_EXPOLINE
++
++ .macro GEN_BR_THUNK name,reg,tmp
++ .section .text.\name,"axG",@progbits,\name,comdat
++ .globl \name
++ .hidden \name
++ .type \name,@function
++\name:
++ .cfi_startproc
++#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
++ exrl 0,0f
++#else
++ larl \tmp,0f
++ ex 0,0(\tmp)
++#endif
++ j .
++0: br \reg
++ .cfi_endproc
++ .endm
++
++ GEN_BR_THUNK __s390x_indirect_jump_r1use_r9,%r9,%r1
++ GEN_BR_THUNK __s390x_indirect_jump_r1use_r14,%r14,%r1
++ GEN_BR_THUNK __s390x_indirect_jump_r11use_r14,%r14,%r11
++
++ .macro BASR_R14_R9
++0: brasl %r14,__s390x_indirect_jump_r1use_r9
++ .pushsection .s390_indirect_branches,"a",@progbits
++ .long 0b-.
++ .popsection
++ .endm
++
++ .macro BR_R1USE_R14
++0: jg __s390x_indirect_jump_r1use_r14
++ .pushsection .s390_indirect_branches,"a",@progbits
++ .long 0b-.
++ .popsection
++ .endm
++
++ .macro BR_R11USE_R14
++0: jg __s390x_indirect_jump_r11use_r14
++ .pushsection .s390_indirect_branches,"a",@progbits
++ .long 0b-.
++ .popsection
++ .endm
++
++#else /* CONFIG_EXPOLINE */
++
++ .macro BASR_R14_R9
++ basr %r14,%r9
++ .endm
++
++ .macro BR_R1USE_R14
++ br %r14
++ .endm
++
++ .macro BR_R11USE_R14
++ br %r14
++ .endm
++
++#endif /* CONFIG_EXPOLINE */
++
++
+ .section .kprobes.text, "ax"
+ .Ldummy:
+ /*
+@@ -235,7 +297,7 @@ _PIF_WORK = (_PIF_PER_TRAP | _PIF_SYSCAL
+ ENTRY(__bpon)
+ .globl __bpon
+ BPON
+- br %r14
++ BR_R1USE_R14
+
+ /*
+ * Scheduler resume function, called by switch_to
+@@ -260,9 +322,9 @@ ENTRY(__switch_to)
+ mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
+ lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
+ TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
+- bzr %r14
++ jz 0f
+ .insn s,0xb2800000,__LC_LPP # set program parameter
+- br %r14
++0: BR_R1USE_R14
+
+ .L__critical_start:
+
+@@ -328,7 +390,7 @@ sie_exit:
+ xgr %r5,%r5
+ lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
+ lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
+- br %r14
++ BR_R1USE_R14
+ .Lsie_fault:
+ lghi %r14,-EFAULT
+ stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
+@@ -387,7 +449,7 @@ ENTRY(system_call)
+ lgf %r9,0(%r8,%r10) # get system call add.
+ TSTMSK __TI_flags(%r12),_TIF_TRACE
+ jnz .Lsysc_tracesys
+- basr %r14,%r9 # call sys_xxxx
++ BASR_R14_R9 # call sys_xxxx
+ stg %r2,__PT_R2(%r11) # store return value
+
+ .Lsysc_return:
+@@ -564,7 +626,7 @@ ENTRY(system_call)
+ lmg %r3,%r7,__PT_R3(%r11)
+ stg %r7,STACK_FRAME_OVERHEAD(%r15)
+ lg %r2,__PT_ORIG_GPR2(%r11)
+- basr %r14,%r9 # call sys_xxx
++ BASR_R14_R9 # call sys_xxx
+ stg %r2,__PT_R2(%r11) # store return value
+ .Lsysc_tracenogo:
+ TSTMSK __TI_flags(%r12),_TIF_TRACE
+@@ -588,7 +650,7 @@ ENTRY(ret_from_fork)
+ lmg %r9,%r10,__PT_R9(%r11) # load gprs
+ ENTRY(kernel_thread_starter)
+ la %r2,0(%r10)
+- basr %r14,%r9
++ BASR_R14_R9
+ j .Lsysc_tracenogo
+
+ /*
+@@ -665,9 +727,9 @@ ENTRY(pgm_check_handler)
+ nill %r10,0x007f
+ sll %r10,2
+ je .Lpgm_return
+- lgf %r1,0(%r10,%r1) # load address of handler routine
++ lgf %r9,0(%r10,%r1) # load address of handler routine
+ lgr %r2,%r11 # pass pointer to pt_regs
+- basr %r14,%r1 # branch to interrupt-handler
++ BASR_R14_R9 # branch to interrupt-handler
+ .Lpgm_return:
+ LOCKDEP_SYS_EXIT
+ tm __PT_PSW+1(%r11),0x01 # returning to user ?
+@@ -977,7 +1039,7 @@ ENTRY(psw_idle)
+ stpt __TIMER_IDLE_ENTER(%r2)
+ .Lpsw_idle_lpsw:
+ lpswe __SF_EMPTY(%r15)
+- br %r14
++ BR_R1USE_R14
+ .Lpsw_idle_end:
+
+ /*
+@@ -991,7 +1053,7 @@ ENTRY(save_fpu_regs)
+ lg %r2,__LC_CURRENT
+ aghi %r2,__TASK_thread
+ TSTMSK __LC_CPU_FLAGS,_CIF_FPU
+- bor %r14
++ jo .Lsave_fpu_regs_exit
+ stfpc __THREAD_FPU_fpc(%r2)
+ lg %r3,__THREAD_FPU_regs(%r2)
+ TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
+@@ -1018,7 +1080,8 @@ ENTRY(save_fpu_regs)
+ std 15,120(%r3)
+ .Lsave_fpu_regs_done:
+ oi __LC_CPU_FLAGS+7,_CIF_FPU
+- br %r14
++.Lsave_fpu_regs_exit:
++ BR_R1USE_R14
+ .Lsave_fpu_regs_end:
+ #if IS_ENABLED(CONFIG_KVM)
+ EXPORT_SYMBOL(save_fpu_regs)
+@@ -1038,7 +1101,7 @@ load_fpu_regs:
+ lg %r4,__LC_CURRENT
+ aghi %r4,__TASK_thread
+ TSTMSK __LC_CPU_FLAGS,_CIF_FPU
+- bnor %r14
++ jno .Lload_fpu_regs_exit
+ lfpc __THREAD_FPU_fpc(%r4)
+ TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
+ lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
+@@ -1065,7 +1128,8 @@ load_fpu_regs:
+ ld 15,120(%r4)
+ .Lload_fpu_regs_done:
+ ni __LC_CPU_FLAGS+7,255-_CIF_FPU
+- br %r14
++.Lload_fpu_regs_exit:
++ BR_R1USE_R14
+ .Lload_fpu_regs_end:
+
+ .L__critical_end:
+@@ -1237,7 +1301,7 @@ cleanup_critical:
+ jl 0f
+ clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
+ jl .Lcleanup_load_fpu_regs
+-0: br %r14
++0: BR_R11USE_R14
+
+ .align 8
+ .Lcleanup_table:
+@@ -1273,7 +1337,7 @@ cleanup_critical:
+ ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+ larl %r9,sie_exit # skip forward to sie_exit
+- br %r14
++ BR_R11USE_R14
+ #endif
+
+ .Lcleanup_system_call:
+@@ -1326,7 +1390,7 @@ cleanup_critical:
+ stg %r15,56(%r11) # r15 stack pointer
+ # set new psw address and exit
+ larl %r9,.Lsysc_do_svc
+- br %r14
++ BR_R11USE_R14
+ .Lcleanup_system_call_insn:
+ .quad system_call
+ .quad .Lsysc_stmg
+@@ -1338,7 +1402,7 @@ cleanup_critical:
+
+ .Lcleanup_sysc_tif:
+ larl %r9,.Lsysc_tif
+- br %r14
++ BR_R11USE_R14
+
+ .Lcleanup_sysc_restore:
+ # check if stpt has been executed
+@@ -1355,14 +1419,14 @@ cleanup_critical:
+ mvc 0(64,%r11),__PT_R8(%r9)
+ lmg %r0,%r7,__PT_R0(%r9)
+ 1: lmg %r8,%r9,__LC_RETURN_PSW
+- br %r14
++ BR_R11USE_R14
+ .Lcleanup_sysc_restore_insn:
+ .quad .Lsysc_exit_timer
+ .quad .Lsysc_done - 4
+
+ .Lcleanup_io_tif:
+ larl %r9,.Lio_tif
+- br %r14
++ BR_R11USE_R14
+
+ .Lcleanup_io_restore:
+ # check if stpt has been executed
+@@ -1376,7 +1440,7 @@ cleanup_critical:
+ mvc 0(64,%r11),__PT_R8(%r9)
+ lmg %r0,%r7,__PT_R0(%r9)
+ 1: lmg %r8,%r9,__LC_RETURN_PSW
+- br %r14
++ BR_R11USE_R14
+ .Lcleanup_io_restore_insn:
+ .quad .Lio_exit_timer
+ .quad .Lio_done - 4
+@@ -1429,17 +1493,17 @@ cleanup_critical:
+ # prepare return psw
+ nihh %r8,0xfcfd # clear irq & wait state bits
+ lg %r9,48(%r11) # return from psw_idle
+- br %r14
++ BR_R11USE_R14
+ .Lcleanup_idle_insn:
+ .quad .Lpsw_idle_lpsw
+
+ .Lcleanup_save_fpu_regs:
+ larl %r9,save_fpu_regs
+- br %r14
++ BR_R11USE_R14
+
+ .Lcleanup_load_fpu_regs:
+ larl %r9,load_fpu_regs
+- br %r14
++ BR_R11USE_R14
+
+ /*
+ * Integer constants
+--- a/arch/s390/kernel/module.c
++++ b/arch/s390/kernel/module.c
+@@ -32,6 +32,8 @@
+ #include <linux/moduleloader.h>
+ #include <linux/bug.h>
+ #include <asm/alternative.h>
++#include <asm/nospec-branch.h>
++#include <asm/facility.h>
+
+ #if 0
+ #define DEBUGP printk
+@@ -169,7 +171,11 @@ int module_frob_arch_sections(Elf_Ehdr *
+ me->arch.got_offset = me->core_layout.size;
+ me->core_layout.size += me->arch.got_size;
+ me->arch.plt_offset = me->core_layout.size;
+- me->core_layout.size += me->arch.plt_size;
++ if (me->arch.plt_size) {
++ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_call_disable)
++ me->arch.plt_size += PLT_ENTRY_SIZE;
++ me->core_layout.size += me->arch.plt_size;
++ }
+ return 0;
+ }
+
+@@ -323,9 +329,21 @@ static int apply_rela(Elf_Rela *rela, El
+ unsigned int *ip;
+ ip = me->core_layout.base + me->arch.plt_offset +
+ info->plt_offset;
+- ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
+- ip[1] = 0x100a0004;
+- ip[2] = 0x07f10000;
++ ip[0] = 0x0d10e310; /* basr 1,0 */
++ ip[1] = 0x100a0004; /* lg 1,10(1) */
++ if (IS_ENABLED(CONFIG_EXPOLINE) &&
++ !nospec_call_disable) {
++ unsigned int *ij;
++ ij = me->core_layout.base +
++ me->arch.plt_offset +
++ me->arch.plt_size - PLT_ENTRY_SIZE;
++ ip[2] = 0xa7f40000 + /* j __jump_r1 */
++ (unsigned int)(u16)
++ (((unsigned long) ij - 8 -
++ (unsigned long) ip) / 2);
++ } else {
++ ip[2] = 0x07f10000; /* br %r1 */
++ }
+ ip[3] = (unsigned int) (val >> 32);
+ ip[4] = (unsigned int) val;
+ info->plt_initialized = 1;
+@@ -431,15 +449,42 @@ int module_finalize(const Elf_Ehdr *hdr,
+ struct module *me)
+ {
+ const Elf_Shdr *s;
+- char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
++ char *secstrings, *secname;
++ void *aseg;
++
++ if (IS_ENABLED(CONFIG_EXPOLINE) &&
++ !nospec_call_disable && me->arch.plt_size) {
++ unsigned int *ij;
++
++ ij = me->core_layout.base + me->arch.plt_offset +
++ me->arch.plt_size - PLT_ENTRY_SIZE;
++ if (test_facility(35)) {
++ ij[0] = 0xc6000000; /* exrl %r0,.+10 */
++ ij[1] = 0x0005a7f4; /* j . */
++ ij[2] = 0x000007f1; /* br %r1 */
++ } else {
++ ij[0] = 0x44000000 | (unsigned int)
++ offsetof(struct lowcore, br_r1_trampoline);
++ ij[1] = 0xa7f40000; /* j . */
++ }
++ }
+
++ secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+- if (!strcmp(".altinstructions", secstrings + s->sh_name)) {
+- /* patch .altinstructions */
+- void *aseg = (void *)s->sh_addr;
++ aseg = (void *) s->sh_addr;
++ secname = secstrings + s->sh_name;
+
++ if (!strcmp(".altinstructions", secname))
++ /* patch .altinstructions */
+ apply_alternatives(aseg, aseg + s->sh_size);
+- }
++
++ if (IS_ENABLED(CONFIG_EXPOLINE) &&
++ (!strcmp(".nospec_call_table", secname)))
++ nospec_call_revert(aseg, aseg + s->sh_size);
++
++ if (IS_ENABLED(CONFIG_EXPOLINE) &&
++ (!strcmp(".nospec_return_table", secname)))
++ nospec_return_revert(aseg, aseg + s->sh_size);
+ }
+
+ jump_label_apply_nops(me);
+--- /dev/null
++++ b/arch/s390/kernel/nospec-branch.c
+@@ -0,0 +1,100 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <linux/module.h>
++#include <asm/nospec-branch.h>
++
++int nospec_call_disable = IS_ENABLED(EXPOLINE_OFF);
++int nospec_return_disable = !IS_ENABLED(EXPOLINE_FULL);
++
++static int __init nospectre_v2_setup_early(char *str)
++{
++ nospec_call_disable = 1;
++ nospec_return_disable = 1;
++ return 0;
++}
++early_param("nospectre_v2", nospectre_v2_setup_early);
++
++static int __init spectre_v2_setup_early(char *str)
++{
++ if (str && !strncmp(str, "on", 2)) {
++ nospec_call_disable = 0;
++ nospec_return_disable = 0;
++ }
++ if (str && !strncmp(str, "off", 3)) {
++ nospec_call_disable = 1;
++ nospec_return_disable = 1;
++ }
++ if (str && !strncmp(str, "auto", 4)) {
++ nospec_call_disable = 0;
++ nospec_return_disable = 1;
++ }
++ return 0;
++}
++early_param("spectre_v2", spectre_v2_setup_early);
++
++static void __init_or_module __nospec_revert(s32 *start, s32 *end)
++{
++ enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type;
++ u8 *instr, *thunk, *br;
++ u8 insnbuf[6];
++ s32 *epo;
++
++ /* Second part of the instruction replace is always a nop */
++ memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x00, 0x00 }, 4);
++ for (epo = start; epo < end; epo++) {
++ instr = (u8 *) epo + *epo;
++ if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
++ type = BRCL_EXPOLINE; /* brcl instruction */
++ else if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x05)
++ type = BRASL_EXPOLINE; /* brasl instruction */
++ else
++ continue;
++ thunk = instr + (*(int *)(instr + 2)) * 2;
++ if (thunk[0] == 0xc6 && thunk[1] == 0x00)
++ /* exrl %r0,<target-br> */
++ br = thunk + (*(int *)(thunk + 2)) * 2;
++ else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 &&
++ thunk[6] == 0x44 && thunk[7] == 0x00 &&
++ (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 &&
++ (thunk[1] & 0xf0) == (thunk[8] & 0xf0))
++ /* larl %rx,<target br> + ex %r0,0(%rx) */
++ br = thunk + (*(int *)(thunk + 2)) * 2;
++ else
++ continue;
++ if (br[0] != 0x07 || (br[1] & 0xf0) != 0xf0)
++ continue;
++ switch (type) {
++ case BRCL_EXPOLINE:
++ /* brcl to thunk, replace with br + nop */
++ insnbuf[0] = br[0];
++ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
++ break;
++ case BRASL_EXPOLINE:
++ /* brasl to thunk, replace with basr + nop */
++ insnbuf[0] = 0x0d;
++ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
++ break;
++ }
++
++ s390_kernel_write(instr, insnbuf, 6);
++ }
++}
++
++void __init_or_module nospec_call_revert(s32 *start, s32 *end)
++{
++ if (nospec_call_disable)
++ __nospec_revert(start, end);
++}
++
++void __init_or_module nospec_return_revert(s32 *start, s32 *end)
++{
++ if (nospec_return_disable)
++ __nospec_revert(start, end);
++}
++
++extern s32 __nospec_call_start[], __nospec_call_end[];
++extern s32 __nospec_return_start[], __nospec_return_end[];
++void __init nospec_init_branches(void)
++{
++ nospec_call_revert(__nospec_call_start, __nospec_call_end);
++ nospec_return_revert(__nospec_return_start, __nospec_return_end);
++}
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -67,6 +67,7 @@
+ #include <asm/sysinfo.h>
+ #include <asm/numa.h>
+ #include <asm/alternative.h>
++#include <asm/nospec-branch.h>
+ #include "entry.h"
+
+ /*
+@@ -384,6 +385,7 @@ static void __init setup_lowcore(void)
+ #ifdef CONFIG_SMP
+ lc->spinlock_lockval = arch_spin_lockval(0);
+ #endif
++ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
+
+ set_prefix((u32)(unsigned long) lc);
+ lowcore_ptr[0] = lc;
+@@ -956,6 +958,8 @@ void __init setup_arch(char **cmdline_p)
+ set_preferred_console();
+
+ apply_alternative_instructions();
++ if (IS_ENABLED(CONFIG_EXPOLINE))
++ nospec_init_branches();
+
+ /* Setup zfcpdump support */
+ setup_zfcpdump();
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -223,6 +223,7 @@ static int pcpu_alloc_lowcore(struct pcp
+ lc->mcesad = mcesa_origin | mcesa_bits;
+ lc->cpu_nr = cpu;
+ lc->spinlock_lockval = arch_spin_lockval(cpu);
++ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
+ if (vdso_alloc_per_cpu(lc))
+ goto out;
+ lowcore_ptr[cpu] = lc;
+--- a/arch/s390/kernel/vmlinux.lds.S
++++ b/arch/s390/kernel/vmlinux.lds.S
+@@ -127,6 +127,20 @@ SECTIONS
+ *(.altinstr_replacement)
+ }
+
++ /*
++ * Table with the patch locations to undo expolines
++ */
++ .nospec_call_table : {
++ __nospec_call_start = . ;
++ *(.s390_indirect*)
++ __nospec_call_end = . ;
++ }
++ .nospec_return_table : {
++ __nospec_return_start = . ;
++ *(.s390_return*)
++ __nospec_return_end = . ;
++ }
++
+ /* early.c uses stsi, which requires page aligned data. */
+ . = ALIGN(PAGE_SIZE);
+ INIT_DATA_SECTION(0x100)
+--- a/drivers/s390/char/Makefile
++++ b/drivers/s390/char/Makefile
+@@ -16,6 +16,8 @@ CFLAGS_REMOVE_sclp_early_core.o += $(CC_
+ CFLAGS_sclp_early_core.o += -march=z900
+ endif
+
++CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE)
++
+ obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
+ sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
+ sclp_early.o sclp_early_core.o
diff --git a/patches.arch/s390-sles15-05-07-expoline-is-enabled.patch b/patches.arch/s390-sles15-05-07-expoline-is-enabled.patch
new file mode 100644
index 0000000000..34634a4947
--- /dev/null
+++ b/patches.arch/s390-sles15-05-07-expoline-is-enabled.patch
@@ -0,0 +1,30 @@
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Subject: s390: Replace IS_ENABLED(EXPOLINE_*) with IS_ENABLED(CONFIG_EXPOLINE_*)
+Patch-mainline: v4.16-rc5
+Git-commit: 2cb370d615e9fbed9e95ed222c2c8f337181aa90
+References: LTC#164304, bsc#1084911
+
+I've accidentally stumbled upon the IS_ENABLED(EXPOLINE_*) lines, which
+obviously always evaluate to false. Fix this.
+
+Fixes: f19fbd5ed642 ("s390: introduce execute-trampolines for branches")
+Signed-off-by: Eugeniu Rosca <erosca@de.adit-jv.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/kernel/nospec-branch.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/s390/kernel/nospec-branch.c
++++ b/arch/s390/kernel/nospec-branch.c
+@@ -2,8 +2,8 @@
+ #include <linux/module.h>
+ #include <asm/nospec-branch.h>
+
+-int nospec_call_disable = IS_ENABLED(EXPOLINE_OFF);
+-int nospec_return_disable = !IS_ENABLED(EXPOLINE_FULL);
++int nospec_call_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
++int nospec_return_disable = !IS_ENABLED(CONFIG_EXPOLINE_FULL);
+
+ static int __init nospectre_v2_setup_early(char *str)
+ {
diff --git a/patches.arch/s390-sles15-05-08-critical-section-bpenter.patch b/patches.arch/s390-sles15-05-08-critical-section-bpenter.patch
new file mode 100644
index 0000000000..710aa14f0a
--- /dev/null
+++ b/patches.arch/s390-sles15-05-08-critical-section-bpenter.patch
@@ -0,0 +1,29 @@
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Subject: s390: do not bypass BPENTER for interrupt system calls
+Patch-mainline: v4.16-rc5
+Git-commit: d5feec04fe578c8dbd9e2e1439afc2f0af761ed4
+References: LTC#164304, bsc#1084911
+
+The system call path can be interrupted before the switch back to the
+standard branch prediction with BPENTER has been done. The critical
+section cleanup code skips forward to .Lsysc_do_svc and bypasses the
+BPENTER. In this case the kernel and all subsequent code will run with
+the limited branch prediction.
+
+Fixes: eacf67eb9b32 ("s390: run user space and KVM guests with modified branch prediction")
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/kernel/entry.S | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -1375,6 +1375,7 @@ cleanup_critical:
+ stg %r15,__LC_SYSTEM_TIMER
+ 0: # update accounting time stamp
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
++ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+ # set up saved register r11
+ lg %r15,__LC_KERNEL_STACK
+ la %r9,STACK_FRAME_OVERHEAD(%r15)
diff --git a/patches.arch/s390-sles15-05-09-svc-zero-r0.patch b/patches.arch/s390-sles15-05-09-svc-zero-r0.patch
new file mode 100644
index 0000000000..ea450e74e3
--- /dev/null
+++ b/patches.arch/s390-sles15-05-09-svc-zero-r0.patch
@@ -0,0 +1,53 @@
+From: Christian Borntraeger <borntraeger@de.ibm.com>
+Subject: s390/entry.S: fix spurious zeroing of r0
+Patch-mainline: v4.16-rc5
+Git-commit: d3f468963cd6fd6d2aa5e26aed8b24232096d0e1
+References: LTC#164304, bsc#1084911
+
+when a system call is interrupted we might call the critical section
+cleanup handler that re-does some of the operations. When we are between
+.Lsysc_vtime and .Lsysc_do_svc we might also redo the saving of the
+problem state registers r0-r7:
+
+.Lcleanup_system_call:
+[...]
+0: # update accounting time stamp
+ mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
+ # set up saved register r11
+ lg %r15,__LC_KERNEL_STACK
+ la %r9,STACK_FRAME_OVERHEAD(%r15)
+ stg %r9,24(%r11) # r11 pt_regs pointer
+ # fill pt_regs
+ mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
+---> stmg %r0,%r7,__PT_R0(%r9)
+
+The problem is now, that we might have already zeroed out r0.
+The fix is to zero out r0 after sysc_do_svc.
+
+Reported-by: Farhan Ali <alifm@linux.vnet.ibm.com>
+Fixes: 7041d28115e91 ("s390: scrub registers on kernel entry and KVM exit")
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/kernel/entry.S | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -424,13 +424,13 @@ ENTRY(system_call)
+ UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+ stmg %r0,%r7,__PT_R0(%r11)
+- # clear user controlled register to prevent speculative use
+- xgr %r0,%r0
+ mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
+ mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
+ mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
+ stg %r14,__PT_FLAGS(%r11)
+ .Lsysc_do_svc:
++ # clear user controlled register to prevent speculative use
++ xgr %r0,%r0
+ # load address of system call table
+ lg %r10,__THREAD_sysc_table(%r13,%r12)
+ llgh %r8,__PT_INT_CODE+2(%r11)
diff --git a/patches.arch/s390-sles15-99-02-nobp.patch b/patches.arch/s390-sles15-99-02-nobp.patch
deleted file mode 100644
index 5167555225..0000000000
--- a/patches.arch/s390-sles15-99-02-nobp.patch
+++ /dev/null
@@ -1,227 +0,0 @@
-From: Martin Schwidefsky <schwidefsky@de.ibm.com>
-Subject: s390: add ppa to system call and program check path
-References: bsc#1068032
-Patch-mainline: Not yet, under development
-
-Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-Signed-off-by: Jiri Kosina <jkosina@suse.cz>
----
- arch/s390/include/asm/processor.h | 1
- arch/s390/kernel/alternative.c | 13 ++++++++++
- arch/s390/kernel/entry.S | 47 ++++++++++++++++++++++++++++++++++++++
- arch/s390/kernel/ipl.c | 1
- arch/s390/kernel/smp.c | 2 +
- arch/s390/kernel/vmlinux.lds.S | 3 ++
- 6 files changed, 67 insertions(+)
-
---- a/arch/s390/include/asm/processor.h
-+++ b/arch/s390/include/asm/processor.h
-@@ -88,6 +88,7 @@ void cpu_detect_mhz_feature(void);
- extern const struct seq_operations cpuinfo_op;
- extern int sysctl_ieee_emulation_warnings;
- extern void execve_tail(void);
-+extern void __bpon(void);
-
- /*
- * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
---- a/arch/s390/kernel/alternative.c
-+++ b/arch/s390/kernel/alternative.c
-@@ -14,6 +14,19 @@ static int __init disable_alternative_in
-
- early_param("noaltinstr", disable_alternative_instructions);
-
-+extern struct alt_instr __alt_nobp[], __alt_nobp_end[];
-+static int __init nobp_setup(char *str)
-+{
-+ bool enabled;
-+ int rc;
-+
-+ rc = kstrtobool(str, &enabled);
-+ if (!rc && enabled)
-+ apply_alternatives(__alt_nobp, __alt_nobp_end);
-+ return rc;
-+}
-+__setup("nobp=", nobp_setup);
-+
- struct brcl_insn {
- u16 opc;
- s32 disp;
---- a/arch/s390/kernel/entry.S
-+++ b/arch/s390/kernel/entry.S
-@@ -157,6 +157,34 @@ _PIF_WORK = (_PIF_PER_TRAP)
- tm off+\addr, \mask
- .endm
-
-+ .macro BPOFF
-+ .pushsection .altinstr_replacement, "ax"
-+660: .long 0xb2e8c000
-+ .popsection
-+661: .long 0x47000000
-+ .pushsection .altnobp, "a"
-+ .long 661b - .
-+ .long 660b - .
-+ .word 82
-+ .byte 4
-+ .byte 4
-+ .popsection
-+ .endm
-+
-+ .macro BPON
-+ .pushsection .altinstr_replacement, "ax"
-+662: .long 0xb2e8d000
-+ .popsection
-+663: .long 0x47000000
-+ .pushsection .altnobp, "a"
-+ .long 663b - .
-+ .long 662b - .
-+ .word 82
-+ .byte 4
-+ .byte 4
-+ .popsection
-+ .endm
-+
- .section .kprobes.text, "ax"
- .Ldummy:
- /*
-@@ -169,6 +197,11 @@ _PIF_WORK = (_PIF_PER_TRAP)
- */
- nop 0
-
-+ENTRY(__bpon)
-+ .globl __bpon
-+ BPON
-+ br %r14
-+
- /*
- * Scheduler resume function, called by switch_to
- * gpr2 = (task_struct *) prev
-@@ -225,9 +258,11 @@ ENTRY(sie64a)
- jnz .Lsie_skip
- TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- jo .Lsie_skip # exit if fp/vx regs changed
-+ BPON
- .Lsie_entry:
- sie 0(%r14)
- .Lsie_skip:
-+ BPOFF
- ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
- lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
- .Lsie_done:
-@@ -270,6 +305,7 @@ EXPORT_SYMBOL(sie_exit)
-
- ENTRY(system_call)
- stpt __LC_SYNC_ENTER_TIMER
-+ BPOFF
- .Lsysc_stmg:
- stmg %r8,%r15,__LC_SAVE_AREA_SYNC
- lg %r12,__LC_CURRENT
-@@ -316,6 +352,7 @@ ENTRY(system_call)
- jnz .Lsysc_work # check for work
- TSTMSK __LC_CPU_FLAGS,_CIF_WORK
- jnz .Lsysc_work
-+ BPON
- .Lsysc_restore:
- lg %r14,__LC_VDSO_PER_CPU
- lmg %r0,%r10,__PT_R0(%r11)
-@@ -500,6 +537,7 @@ ENTRY(kernel_thread_starter)
-
- ENTRY(pgm_check_handler)
- stpt __LC_SYNC_ENTER_TIMER
-+ BPOFF
- stmg %r8,%r15,__LC_SAVE_AREA_SYNC
- lg %r10,__LC_LAST_BREAK
- lg %r12,__LC_CURRENT
-@@ -595,6 +633,7 @@ ENTRY(pgm_check_handler)
- ENTRY(io_int_handler)
- STCK __LC_INT_CLOCK
- stpt __LC_ASYNC_ENTER_TIMER
-+ BPOFF
- stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
- lg %r12,__LC_CURRENT
- larl %r13,cleanup_critical
-@@ -635,9 +674,13 @@ ENTRY(io_int_handler)
- lg %r14,__LC_VDSO_PER_CPU
- lmg %r0,%r10,__PT_R0(%r11)
- mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
-+ tm __PT_PSW+1(%r11),0x01 # returning to user ?
-+ jno .Lio_exit_timer
-+ BPON
- .Lio_exit_timer:
- stpt __LC_EXIT_TIMER
- mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
-+.Lio_exit_kernel:
- lmg %r11,%r15,__PT_R11(%r11)
- lpswe __LC_RETURN_PSW
- .Lio_done:
-@@ -800,6 +843,7 @@ ENTRY(io_int_handler)
- ENTRY(ext_int_handler)
- STCK __LC_INT_CLOCK
- stpt __LC_ASYNC_ENTER_TIMER
-+ BPOFF
- stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
- lg %r12,__LC_CURRENT
- larl %r13,cleanup_critical
-@@ -838,6 +882,7 @@ ENTRY(psw_idle)
- .Lpsw_idle_stcctm:
- #endif
- oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
-+ BPON
- STCK __CLOCK_IDLE_ENTER(%r2)
- stpt __TIMER_IDLE_ENTER(%r2)
- .Lpsw_idle_lpsw:
-@@ -940,6 +985,7 @@ load_fpu_regs:
- */
- ENTRY(mcck_int_handler)
- STCK __LC_MCCK_CLOCK
-+ BPOFF
- la %r1,4095 # revalidate r1
- spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
- lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
-@@ -995,6 +1041,7 @@ ENTRY(mcck_int_handler)
- mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
- tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
- jno 0f
-+ BPON
- stpt __LC_EXIT_TIMER
- mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
- 0: lmg %r11,%r15,__PT_R11(%r11)
---- a/arch/s390/kernel/ipl.c
-+++ b/arch/s390/kernel/ipl.c
-@@ -564,6 +564,7 @@ static struct kset *ipl_kset;
-
- static void __ipl_run(void *unused)
- {
-+ __bpon();
- diag308(DIAG308_LOAD_CLEAR, NULL);
- if (MACHINE_IS_VM)
- __cpcmd("IPL", NULL, 0, NULL);
---- a/arch/s390/kernel/smp.c
-+++ b/arch/s390/kernel/smp.c
-@@ -327,6 +327,7 @@ static void pcpu_delegate(struct pcpu *p
- mem_assign_absolute(lc->restart_fn, (unsigned long) func);
- mem_assign_absolute(lc->restart_data, (unsigned long) data);
- mem_assign_absolute(lc->restart_source, source_cpu);
-+ __bpon();
- asm volatile(
- "0: sigp 0,%0,%2 # sigp restart to target cpu\n"
- " brc 2,0b # busy, try again\n"
-@@ -902,6 +903,7 @@ void __cpu_die(unsigned int cpu)
- void __noreturn cpu_die(void)
- {
- idle_task_exit();
-+ __bpon();
- pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
- for (;;) ;
- }
---- a/arch/s390/kernel/vmlinux.lds.S
-+++ b/arch/s390/kernel/vmlinux.lds.S
-@@ -115,6 +115,9 @@ SECTIONS
- __alt_instructions = .;
- *(.altinstructions)
- __alt_instructions_end = .;
-+ __alt_nobp = .;
-+ *(.altnobp)
-+ __alt_nobp_end = .;
- }
-
- /*
diff --git a/series.conf b/series.conf
index 8ec2f5d075..7c150f0cc7 100644
--- a/series.conf
+++ b/series.conf
@@ -1,4 +1,4 @@
-# Kernel patches configuration file
+ # Kernel patches configuration file
# vim: set ts=8 sw=8 noet:
#
# There are three kinds of rules (see guards.1 for details):
@@ -10537,6 +10537,12 @@
patches.drivers/drm-i915-bios-add-DP-max-link-rate-to-VBT-child-devi
patches.drivers/0005-iommu-vt-d-use-domain-instead-of-cache-fetching
patches.drivers/ACPI-sbshc-remove-raw-pointer-from-printk-message
+ patches.arch/s390-sles15-05-03-scrub-registers.patch
+ patches.arch/s390-sles15-05-04-array-nospec.patch
+ patches.arch/s390-alternative-use-a-copy-of-the-facility-bit-mask.patch
+ patches.arch/s390-add-options-to-change-branch-prediction-behavio.patch
+ patches.arch/s390-sles15-05-05-bpoff-user-space.patch
+ patches.arch/s390-sles15-05-06-expoline.patch
patches.fixes/iscsi-target-make-sure-to-wake-up-sleeping-login-wor.patch
patches.fixes/0012-xprtrdma-Fix-calculation-of-ri_max_send_sges.patch
patches.fixes/0013-xprtrdma-Fix-BUG-after-a-device-removal.patch
@@ -10821,6 +10827,9 @@
patches.drivers/qla2xxx-do-not-check-login_state-if-no-loop-id-is-as.patch
patches.drivers/qla2xxx-ensure-async-flags-are-reset-correctly.patch
patches.drivers/scsi-qla2xxx-Fix-FC-NVMe-LUN-discovery.patch
+ patches.arch/s390-sles15-05-07-expoline-is-enabled.patch
+ patches.arch/s390-sles15-05-08-critical-section-bpenter.patch
+ patches.arch/s390-sles15-05-09-svc-zero-r0.patch
patches.drivers/ALSA-hda-Fix-a-wrong-FIXUP-for-alc289-on-Dell-machin
patches.drivers/ALSA-hda-realtek-Add-support-headset-mode-for-DELL-W
patches.drivers/ALSA-hda-realtek-Add-headset-mode-support-for-Dell-2
@@ -11120,9 +11129,10 @@
########################################################
patches.arch/s390-sles15-message-catalog.patch
patches.drivers/s390-sles15-00-04-04-kmsg-add-VNIC-Characteristics-msg-documentation.patch
- patches.arch/s390-sles15-99-02-nobp.patch
patches.arch/s390-sles15-03-01-rwlock.patch
+ patches.arch/s390-sles15-05-01-gmb.patch
+
########################################################
# VM/FS patches
########################################################