Home Home > GIT Browse > linux-next
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPetr Tesarik <ptesarik@suse.cz>2019-11-06 14:59:49 +0100
committerPetr Tesarik <ptesarik@suse.cz>2019-11-06 14:59:49 +0100
commit493622b4884ffbc57acc7cb6eccd76ef72abaec2 (patch)
tree5660e346f121a460b057600a2d54724c5d8d613b
parent2a4e83015ba29049af00e48593309b6a96ddee57 (diff)
parentfb8543dfd412c63715a116fca13de6b1f3aab317 (diff)
Merge branch 'users/ykaukab/SLE15-SP1/for-next' into SLE15-SP1_EMBARGOrpm-4.12.14-197.26--sle15-sp1-updatesrpm-4.12.14-197.26
Pull ARM64 fixes from Yousaf Kaukab
-rw-r--r--config/arm64/default2
-rw-r--r--patches.suse/0001-arm64-capabilities-Update-prototype-for-enable-call-.patch46
-rw-r--r--patches.suse/0004-arm64-capabilities-Prepare-for-fine-grained-capabili.patch42
-rw-r--r--patches.suse/0005-arm64-capabilities-Add-flags-to-handle-the-conflicts.patch40
-rw-r--r--patches.suse/0006-arm64-Add-ARCH_WORKAROUND_2-probing.patch4
-rw-r--r--patches.suse/0008-kabi-arm64-reserve-space-in-cpu_hwcaps-and-cpu_hwcap.patch10
-rw-r--r--patches.suse/0021-arm64-Delay-enabling-hardware-DBM-feature.patch12
-rw-r--r--patches.suse/0026-arm64-mm-Support-Common-Not-Private-translations.patch16
-rw-r--r--patches.suse/KVM-arm-arm64-Clean-dcache-to-PoC-when-changing-PTE-.patch63
-rw-r--r--patches.suse/KVM-arm-arm64-Detangle-kvm_mmu.h-from-kvm_hyp.h.patch126
-rw-r--r--patches.suse/KVM-arm-arm64-Drop-vcpu-parameter-from-guest-cache-m.patch127
-rw-r--r--patches.suse/KVM-arm-arm64-Limit-icache-invalidation-to-prefetch-.patch148
-rw-r--r--patches.suse/KVM-arm-arm64-Only-clean-the-dcache-on-translation-f.patch50
-rw-r--r--patches.suse/KVM-arm-arm64-Preserve-Exec-permission-across-R-W-pe.patch142
-rw-r--r--patches.suse/KVM-arm-arm64-Split-dcache-icache-flushing.patch192
-rw-r--r--patches.suse/KVM-arm64-Set-SCTLR_EL2.DSSBS-if-SSBD-is-forcefully-.patch65
-rw-r--r--patches.suse/arm-KVM-Add-optimized-PIPT-icache-flushing.patch108
-rw-r--r--patches.suse/arm64-Add-decoding-macros-for-CP15_32-and-CP15_64-tr.patch80
-rw-r--r--patches.suse/arm64-Add-part-number-for-Neoverse-N1.patch40
-rw-r--r--patches.suse/arm64-Add-silicon-errata.txt-entry-for-ARM-erratum-1.patch33
-rw-r--r--patches.suse/arm64-Add-support-for-new-control-bits-CTR_EL0.DIC-a.patch199
-rw-r--r--patches.suse/arm64-Apply-ARM64_ERRATUM_1188873-to-Neoverse-N1.patch87
-rw-r--r--patches.suse/arm64-Fake-the-IminLine-size-on-systems-affected-by-.patch65
-rw-r--r--patches.suse/arm64-Fix-mismatched-cache-line-size-detection.patch88
-rw-r--r--patches.suse/arm64-Fix-silly-typo-in-comment.patch35
-rw-r--r--patches.suse/arm64-Force-SSBS-on-context-switch.patch113
-rw-r--r--patches.suse/arm64-Handle-erratum-1418040-as-a-superset-of-erratu.patch134
-rw-r--r--patches.suse/arm64-Handle-mismatched-cache-type.patch73
-rw-r--r--patches.suse/arm64-Introduce-sysreg_clear_set.patch60
-rw-r--r--patches.suse/arm64-KVM-Add-invalidate_icache_range-helper.patch142
-rw-r--r--patches.suse/arm64-KVM-PTE-PMD-S2-XN-bit-definition.patch38
-rw-r--r--patches.suse/arm64-Make-ARM64_ERRATUM_1188873-depend-on-COMPAT.patch33
-rw-r--r--patches.suse/arm64-Restrict-ARM64_ERRATUM_1188873-mitigation-to-A.patch96
-rw-r--r--patches.suse/arm64-arch_timer-Add-workaround-for-ARM-erratum-1188.patch123
-rw-r--r--patches.suse/arm64-arch_timer-avoid-unused-function-warning.patch43
-rw-r--r--patches.suse/arm64-compat-Add-CNTFRQ-trap-handler.patch64
-rw-r--r--patches.suse/arm64-compat-Add-CNTVCT-trap-handler.patch67
-rw-r--r--patches.suse/arm64-compat-Add-condition-code-checks-and-IT-advanc.patch123
-rw-r--r--patches.suse/arm64-compat-Add-cp15_32-and-cp15_64-handler-arrays.patch71
-rw-r--r--patches.suse/arm64-compat-Add-separate-CP15-trapping-hook.patch82
-rw-r--r--patches.suse/arm64-compat-Workaround-Neoverse-N1-1542419-for-comp.patch54
-rw-r--r--patches.suse/arm64-cpu-Move-errata-and-feature-enable-callbacks-c.patch144
-rw-r--r--patches.suse/arm64-cpu_errata-Remove-ARM64_MISMATCHED_CACHE_LINE_.patch73
-rw-r--r--patches.suse/arm64-cpufeature-Convert-hook_lock-to-raw_spin_lock_.patch51
-rw-r--r--patches.suse/arm64-cpufeature-Detect-SSBS-and-advertise-to-usersp.patch175
-rw-r--r--patches.suse/arm64-cpufeature-Fix-handling-of-CTR_EL0.IDC-field.patch191
-rw-r--r--patches.suse/arm64-cpufeature-Trap-CTR_EL0-access-only-where-it-i.patch38
-rw-r--r--patches.suse/arm64-cpufeature-ctr-Fix-cpu-capability-check-for-la.patch74
-rw-r--r--patches.suse/arm64-entry-Allow-handling-of-undefined-instructions.patch58
-rw-r--r--patches.suse/arm64-errata-Hide-CTR_EL0.DIC-on-systems-affected-by.patch148
-rw-r--r--patches.suse/arm64-fix-SSBS-sanitization.patch71
-rw-r--r--patches.suse/arm64-force_signal_inject-WARN-if-called-from-kernel.patch42
-rw-r--r--patches.suse/arm64-kill-change_cpacr.patch55
-rw-r--r--patches.suse/arm64-kill-config_sctlr_el1.patch105
-rw-r--r--patches.suse/arm64-move-SCTLR_EL-1-2-assertions-to-asm-sysreg.h.patch100
-rw-r--r--patches.suse/arm64-ssbd-Add-support-for-PSTATE.SSBS-rather-than-t.patch292
-rw-r--r--patches.suse/arm64-ssbd-Drop-ifdefs-for-PR_SPEC_STORE_BYPASS.patch40
-rw-r--r--patches.suse/irqchip-gic-v3-its-Fix-LPI-release-for-Multi-MSI-dev.patch53
-rw-r--r--patches.suse/irqchip-gic-v3-its-Fix-command-queue-pointer-compari.patch148
-rw-r--r--patches.suse/irqchip-gic-v3-its-Fix-misuse-of-GENMASK-macro.patch35
-rw-r--r--series.conf53
61 files changed, 5012 insertions, 70 deletions
diff --git a/config/arm64/default b/config/arm64/default
index cbb928417b..e39e8cad95 100644
--- a/config/arm64/default
+++ b/config/arm64/default
@@ -540,6 +540,8 @@ CONFIG_ARM64_ERRATUM_834220=y
CONFIG_ARM64_ERRATUM_845719=y
CONFIG_ARM64_ERRATUM_843419=y
CONFIG_ARM64_ERRATUM_1024718=y
+CONFIG_ARM64_ERRATUM_1418040=y
+CONFIG_ARM64_ERRATUM_1542419=y
CONFIG_CAVIUM_ERRATUM_22375=y
CONFIG_CAVIUM_ERRATUM_23144=y
CONFIG_CAVIUM_ERRATUM_23154=y
diff --git a/patches.suse/0001-arm64-capabilities-Update-prototype-for-enable-call-.patch b/patches.suse/0001-arm64-capabilities-Update-prototype-for-enable-call-.patch
index 04726091e2..fca88cdb66 100644
--- a/patches.suse/0001-arm64-capabilities-Update-prototype-for-enable-call-.patch
+++ b/patches.suse/0001-arm64-capabilities-Update-prototype-for-enable-call-.patch
@@ -205,7 +205,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_832075
-@@ -342,7 +339,7 @@ const struct arm64_cpu_capabilities arm6
+@@ -351,7 +348,7 @@ const struct arm64_cpu_capabilities arm6
.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
.matches = has_mismatched_cache_line_size,
.def_scope = SCOPE_LOCAL_CPU,
@@ -214,7 +214,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
},
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
{
-@@ -374,27 +371,27 @@ const struct arm64_cpu_capabilities arm6
+@@ -383,27 +380,27 @@ const struct arm64_cpu_capabilities arm6
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
@@ -247,7 +247,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
},
{
.capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
-@@ -403,7 +400,7 @@ const struct arm64_cpu_capabilities arm6
+@@ -412,7 +409,7 @@ const struct arm64_cpu_capabilities arm6
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
@@ -256,7 +256,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
},
{
.capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
-@@ -412,12 +409,12 @@ const struct arm64_cpu_capabilities arm6
+@@ -421,12 +418,12 @@ const struct arm64_cpu_capabilities arm6
{
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
@@ -271,7 +271,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
},
#endif
{
-@@ -435,8 +432,8 @@ void verify_local_cpu_errata_workarounds
+@@ -444,8 +441,8 @@ void verify_local_cpu_errata_workarounds
for (; caps->matches; caps++) {
if (cpus_have_cap(caps->capability)) {
@@ -284,7 +284,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
" at boot time\n",
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
-@@ -897,7 +897,8 @@ static bool unmap_kernel_at_el0(const st
+@@ -909,7 +909,8 @@ static bool unmap_kernel_at_el0(const st
ID_AA64PFR0_CSV3_SHIFT);
}
@@ -294,7 +294,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
{
typedef void (kpti_remap_fn)(int, int, phys_addr_t);
extern kpti_remap_fn idmap_kpti_install_ng_mappings;
-@@ -907,7 +908,7 @@ static int kpti_install_ng_mappings(void
+@@ -919,7 +920,7 @@ static int kpti_install_ng_mappings(void
int cpu = smp_processor_id();
if (kpti_applied)
@@ -303,7 +303,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
-@@ -918,7 +919,7 @@ static int kpti_install_ng_mappings(void
+@@ -930,7 +931,7 @@ static int kpti_install_ng_mappings(void
if (!cpu)
kpti_applied = true;
@@ -312,7 +312,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
}
static int __init parse_kpti(char *str)
-@@ -935,7 +936,7 @@ static int __init parse_kpti(char *str)
+@@ -947,7 +948,7 @@ static int __init parse_kpti(char *str)
__setup("kpti=", parse_kpti);
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
@@ -321,7 +321,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
{
/*
* Copy register values that aren't redirected by hardware.
-@@ -947,8 +948,6 @@ static int cpu_copy_el2regs(void *__unus
+@@ -959,8 +960,6 @@ static int cpu_copy_el2regs(void *__unus
*/
if (!alternatives_applied)
write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
@@ -330,7 +330,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
}
static const struct arm64_cpu_capabilities arm64_features[] = {
-@@ -972,7 +971,7 @@ static const struct arm64_cpu_capabiliti
+@@ -984,7 +983,7 @@ static const struct arm64_cpu_capabiliti
.field_pos = ID_AA64MMFR1_PAN_SHIFT,
.sign = FTR_UNSIGNED,
.min_field_value = 1,
@@ -339,7 +339,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
},
#endif /* CONFIG_ARM64_PAN */
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
-@@ -1020,7 +1019,7 @@ static const struct arm64_cpu_capabiliti
+@@ -1032,7 +1031,7 @@ static const struct arm64_cpu_capabiliti
.capability = ARM64_HAS_VIRT_HOST_EXTN,
.def_scope = SCOPE_SYSTEM,
.matches = runs_at_el2,
@@ -348,7 +348,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
},
{
.desc = "32-bit EL0 Support",
-@@ -1044,7 +1043,7 @@ static const struct arm64_cpu_capabiliti
+@@ -1056,7 +1055,7 @@ static const struct arm64_cpu_capabiliti
.capability = ARM64_UNMAP_KERNEL_AT_EL0,
.def_scope = SCOPE_SYSTEM,
.matches = unmap_kernel_at_el0,
@@ -357,7 +357,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
},
#endif
{
-@@ -1064,7 +1063,7 @@ static const struct arm64_cpu_capabiliti
+@@ -1076,7 +1075,7 @@ static const struct arm64_cpu_capabiliti
.field_pos = ID_AA64PFR0_SVE_SHIFT,
.min_field_value = ID_AA64PFR0_SVE,
.matches = has_cpuid_feature,
@@ -366,7 +366,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
},
#endif /* CONFIG_ARM64_SVE */
#ifdef CONFIG_ARM64_RAS_EXTN
-@@ -1077,7 +1076,7 @@ static const struct arm64_cpu_capabiliti
+@@ -1089,7 +1088,7 @@ static const struct arm64_cpu_capabiliti
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_RAS_SHIFT,
.min_field_value = ID_AA64PFR0_RAS_V1,
@@ -374,8 +374,8 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
+ .cpu_enable = cpu_clear_disr,
},
#endif /* CONFIG_ARM64_RAS_EXTN */
- {},
-@@ -1224,6 +1223,14 @@ void update_cpu_capabilities(const struc
+ {
+@@ -1248,6 +1247,14 @@ void update_cpu_capabilities(const struc
}
}
@@ -390,7 +390,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
/*
* Run through the enabled capabilities and enable() it on all active
* CPUs
-@@ -1239,14 +1246,15 @@ void __init enable_cpu_capabilities(cons
+@@ -1263,14 +1270,15 @@ void __init enable_cpu_capabilities(cons
/* Ensure cpus_have_const_cap(num) works */
static_branch_enable(&cpu_hwcap_keys[num]);
@@ -408,7 +408,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
}
}
}
-@@ -1289,8 +1297,8 @@ verify_local_cpu_features(const struct a
+@@ -1313,8 +1321,8 @@ verify_local_cpu_features(const struct a
smp_processor_id(), caps->desc);
cpu_die_early();
}
@@ -419,7 +419,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
}
}
-@@ -1513,10 +1521,8 @@ static int __init enable_mrs_emulation(v
+@@ -1537,10 +1545,8 @@ static int __init enable_mrs_emulation(v
core_initcall(enable_mrs_emulation);
@@ -465,7 +465,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
-@@ -427,10 +428,9 @@ asmlinkage void __exception do_undefinst
+@@ -368,10 +369,9 @@ asmlinkage void __exception do_undefinst
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
}
@@ -479,7 +479,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
#define __user_cache_maint(insn, address, res) \
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
-@@ -789,7 +789,7 @@ asmlinkage int __exception do_debug_exce
+@@ -821,7 +821,7 @@ asmlinkage int __exception do_debug_exce
NOKPROBE_SYMBOL(do_debug_exception);
#ifdef CONFIG_ARM64_PAN
@@ -488,7 +488,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
{
/*
* We modify PSTATE. This won't work from irq context as the PSTATE
-@@ -799,6 +799,5 @@ int cpu_enable_pan(void *__unused)
+@@ -831,6 +831,5 @@ int cpu_enable_pan(void *__unused)
config_sctlr_el1(SCTLR_EL1_SPAN, 0);
asm(SET_PSTATE_PAN(1));
diff --git a/patches.suse/0004-arm64-capabilities-Prepare-for-fine-grained-capabili.patch b/patches.suse/0004-arm64-capabilities-Prepare-for-fine-grained-capabili.patch
index 1d980aecf7..e59d09b047 100644
--- a/patches.suse/0004-arm64-capabilities-Prepare-for-fine-grained-capabili.patch
+++ b/patches.suse/0004-arm64-capabilities-Prepare-for-fine-grained-capabili.patch
@@ -70,8 +70,8 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
---
arch/arm64/include/asm/cpufeature.h | 105 +++++++++++++++++++++++++++++++++---
arch/arm64/kernel/cpu_errata.c | 6 +-
- arch/arm64/kernel/cpufeature.c | 36 ++++++------
- 3 files changed, 120 insertions(+), 27 deletions(-)
+ arch/arm64/kernel/cpufeature.c | 40 ++++++-------
+ 3 files changed, 122 insertions(+), 29 deletions(-)
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -217,7 +217,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
.matches = is_affected_midr_range, \
.midr_model = model, \
.midr_range_min = 0, \
-@@ -338,7 +338,7 @@ const struct arm64_cpu_capabilities arm6
+@@ -347,7 +347,7 @@ const struct arm64_cpu_capabilities arm6
.desc = "Mismatched cache line size",
.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
.matches = has_mismatched_cache_line_size,
@@ -228,7 +228,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
-@@ -963,7 +963,7 @@ static const struct arm64_cpu_capabiliti
+@@ -975,7 +975,7 @@ static const struct arm64_cpu_capabiliti
{
.desc = "GIC system register CPU interface",
.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
@@ -237,7 +237,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
.matches = has_useable_gicv3_cpuif,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.field_pos = ID_AA64PFR0_GIC_SHIFT,
-@@ -974,7 +974,7 @@ static const struct arm64_cpu_capabiliti
+@@ -986,7 +986,7 @@ static const struct arm64_cpu_capabiliti
{
.desc = "Privileged Access Never",
.capability = ARM64_HAS_PAN,
@@ -246,7 +246,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64MMFR1_EL1,
.field_pos = ID_AA64MMFR1_PAN_SHIFT,
-@@ -987,7 +987,7 @@ static const struct arm64_cpu_capabiliti
+@@ -999,7 +999,7 @@ static const struct arm64_cpu_capabiliti
{
.desc = "LSE atomic instructions",
.capability = ARM64_HAS_LSE_ATOMICS,
@@ -255,7 +255,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64ISAR0_EL1,
.field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
-@@ -998,14 +998,14 @@ static const struct arm64_cpu_capabiliti
+@@ -1010,14 +1010,14 @@ static const struct arm64_cpu_capabiliti
{
.desc = "Software prefetching using PRFM",
.capability = ARM64_HAS_NO_HW_PREFETCH,
@@ -272,7 +272,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64MMFR2_EL1,
.field_pos = ID_AA64MMFR2_UAO_SHIFT,
-@@ -1019,21 +1019,21 @@ static const struct arm64_cpu_capabiliti
+@@ -1031,21 +1031,21 @@ static const struct arm64_cpu_capabiliti
#ifdef CONFIG_ARM64_PAN
{
.capability = ARM64_ALT_PAN_NOT_UAO,
@@ -297,7 +297,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
-@@ -1043,14 +1043,14 @@ static const struct arm64_cpu_capabiliti
+@@ -1055,14 +1055,14 @@ static const struct arm64_cpu_capabiliti
{
.desc = "Reduced HYP mapping offset",
.capability = ARM64_HYP_OFFSET_LOW,
@@ -314,7 +314,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
.matches = unmap_kernel_at_el0,
.cpu_enable = kpti_install_ng_mappings,
},
-@@ -1058,15 +1058,15 @@ static const struct arm64_cpu_capabiliti
+@@ -1070,15 +1070,15 @@ static const struct arm64_cpu_capabiliti
{
/* FP/SIMD is not implemented */
.capability = ARM64_HAS_NO_FPSIMD,
@@ -332,7 +332,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_SVE_SHIFT,
-@@ -1079,7 +1079,7 @@ static const struct arm64_cpu_capabiliti
+@@ -1091,7 +1091,7 @@ static const struct arm64_cpu_capabiliti
{
.desc = "RAS Extension Support",
.capability = ARM64_HAS_RAS_EXTN,
@@ -341,7 +341,21 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
-@@ -1091,16 +1091,16 @@ static const struct arm64_cpu_capabiliti
+@@ -1103,28 +1103,28 @@ static const struct arm64_cpu_capabiliti
+ {
+ .desc = "Data cache clean to the PoU not required for I/D coherence",
+ .capability = ARM64_HAS_CACHE_IDC,
+- .def_scope = SCOPE_SYSTEM,
++ .type = ARM64_CPUCAP_SCOPE_SYSTEM,
+ .matches = has_cache_idc,
+ },
+ {
+ .desc = "Instruction cache invalidation not required for I/D coherence",
+ .capability = ARM64_HAS_CACHE_DIC,
+- .def_scope = SCOPE_SYSTEM,
++ .type = ARM64_CPUCAP_SCOPE_SYSTEM,
+ .matches = has_cache_dic,
+ },
{},
};
@@ -361,7 +375,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
.hwcap = cap, \
}
-@@ -1196,7 +1196,7 @@ static void __init setup_elf_hwcaps(cons
+@@ -1220,7 +1220,7 @@ static void __init setup_elf_hwcaps(cons
/* We support emulation of accesses to CPU ID feature registers */
elf_hwcap |= HWCAP_CPUID;
for (; hwcaps->matches; hwcaps++)
@@ -370,7 +384,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
cap_set_elf_hwcap(hwcaps);
}
-@@ -1223,7 +1223,7 @@ static void update_cpu_capabilities(cons
+@@ -1247,7 +1247,7 @@ static void update_cpu_capabilities(cons
const char *info)
{
for (; caps->matches; caps++) {
diff --git a/patches.suse/0005-arm64-capabilities-Add-flags-to-handle-the-conflicts.patch b/patches.suse/0005-arm64-capabilities-Add-flags-to-handle-the-conflicts.patch
index bcc41c826d..0a685ae444 100644
--- a/patches.suse/0005-arm64-capabilities-Add-flags-to-handle-the-conflicts.patch
+++ b/patches.suse/0005-arm64-capabilities-Add-flags-to-handle-the-conflicts.patch
@@ -47,8 +47,8 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
---
arch/arm64/include/asm/cpufeature.h | 68 ++++++++++++++++++++++++++++++++++++
arch/arm64/kernel/cpu_errata.c | 6 +--
- arch/arm64/kernel/cpufeature.c | 28 +++++++-------
- 3 files changed, 85 insertions(+), 17 deletions(-)
+ arch/arm64/kernel/cpufeature.c | 32 ++++++++--------
+ 3 files changed, 87 insertions(+), 19 deletions(-)
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -167,7 +167,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
.matches = is_affected_midr_range, \
.midr_model = model, \
.midr_range_min = 0, \
-@@ -338,7 +338,7 @@ const struct arm64_cpu_capabilities arm6
+@@ -347,7 +347,7 @@ const struct arm64_cpu_capabilities arm6
.desc = "Mismatched cache line size",
.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
.matches = has_mismatched_cache_line_size,
@@ -178,7 +178,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
-@@ -963,7 +963,7 @@ static const struct arm64_cpu_capabiliti
+@@ -975,7 +975,7 @@ static const struct arm64_cpu_capabiliti
{
.desc = "GIC system register CPU interface",
.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
@@ -187,7 +187,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
.matches = has_useable_gicv3_cpuif,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.field_pos = ID_AA64PFR0_GIC_SHIFT,
-@@ -974,7 +974,7 @@ static const struct arm64_cpu_capabiliti
+@@ -986,7 +986,7 @@ static const struct arm64_cpu_capabiliti
{
.desc = "Privileged Access Never",
.capability = ARM64_HAS_PAN,
@@ -196,7 +196,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64MMFR1_EL1,
.field_pos = ID_AA64MMFR1_PAN_SHIFT,
-@@ -987,7 +987,7 @@ static const struct arm64_cpu_capabiliti
+@@ -999,7 +999,7 @@ static const struct arm64_cpu_capabiliti
{
.desc = "LSE atomic instructions",
.capability = ARM64_HAS_LSE_ATOMICS,
@@ -205,7 +205,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64ISAR0_EL1,
.field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
-@@ -998,14 +998,14 @@ static const struct arm64_cpu_capabiliti
+@@ -1010,14 +1010,14 @@ static const struct arm64_cpu_capabiliti
{
.desc = "Software prefetching using PRFM",
.capability = ARM64_HAS_NO_HW_PREFETCH,
@@ -222,7 +222,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64MMFR2_EL1,
.field_pos = ID_AA64MMFR2_UAO_SHIFT,
-@@ -1019,21 +1019,21 @@ static const struct arm64_cpu_capabiliti
+@@ -1031,21 +1031,21 @@ static const struct arm64_cpu_capabiliti
#ifdef CONFIG_ARM64_PAN
{
.capability = ARM64_ALT_PAN_NOT_UAO,
@@ -247,7 +247,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
-@@ -1043,14 +1043,14 @@ static const struct arm64_cpu_capabiliti
+@@ -1055,14 +1055,14 @@ static const struct arm64_cpu_capabiliti
{
.desc = "Reduced HYP mapping offset",
.capability = ARM64_HYP_OFFSET_LOW,
@@ -264,7 +264,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
.matches = unmap_kernel_at_el0,
.cpu_enable = kpti_install_ng_mappings,
},
-@@ -1058,14 +1058,14 @@ static const struct arm64_cpu_capabiliti
+@@ -1070,14 +1070,14 @@ static const struct arm64_cpu_capabiliti
{
/* FP/SIMD is not implemented */
.capability = ARM64_HAS_NO_FPSIMD,
@@ -281,7 +281,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
.capability = ARM64_SVE,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
-@@ -1079,7 +1079,7 @@ static const struct arm64_cpu_capabiliti
+@@ -1091,7 +1091,7 @@ static const struct arm64_cpu_capabiliti
{
.desc = "RAS Extension Support",
.capability = ARM64_HAS_RAS_EXTN,
@@ -290,7 +290,23 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.sign = FTR_UNSIGNED,
-@@ -1094,7 +1094,7 @@ static const struct arm64_cpu_capabiliti
+@@ -1103,13 +1103,13 @@ static const struct arm64_cpu_capabiliti
+ {
+ .desc = "Data cache clean to the PoU not required for I/D coherence",
+ .capability = ARM64_HAS_CACHE_IDC,
+- .type = ARM64_CPUCAP_SCOPE_SYSTEM,
++ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cache_idc,
+ },
+ {
+ .desc = "Instruction cache invalidation not required for I/D coherence",
+ .capability = ARM64_HAS_CACHE_DIC,
+- .type = ARM64_CPUCAP_SCOPE_SYSTEM,
++ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cache_dic,
+ },
+ {},
+@@ -1118,7 +1118,7 @@ static const struct arm64_cpu_capabiliti
#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
{ \
.desc = #cap, \
diff --git a/patches.suse/0006-arm64-Add-ARCH_WORKAROUND_2-probing.patch b/patches.suse/0006-arm64-Add-ARCH_WORKAROUND_2-probing.patch
index 4801d3cebc..5c5d37f7bb 100644
--- a/patches.suse/0006-arm64-Add-ARCH_WORKAROUND_2-probing.patch
+++ b/patches.suse/0006-arm64-Add-ARCH_WORKAROUND_2-probing.patch
@@ -52,8 +52,8 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
#define ARM64_HW_DBM 26
+#define ARM64_SSBD 27
#define ARM64_WORKAROUND_843419 29
-
- #define ARM64_NCAPS 30
+ #define ARM64_HAS_CACHE_IDC 32
+ #define ARM64_HAS_CACHE_DIC 33
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -253,6 +253,67 @@ void __init arm64_update_smccc_conduit(s
diff --git a/patches.suse/0008-kabi-arm64-reserve-space-in-cpu_hwcaps-and-cpu_hwcap.patch b/patches.suse/0008-kabi-arm64-reserve-space-in-cpu_hwcaps-and-cpu_hwcap.patch
index 8f8308fc9b..fc6b9d12c6 100644
--- a/patches.suse/0008-kabi-arm64-reserve-space-in-cpu_hwcaps-and-cpu_hwcap.patch
+++ b/patches.suse/0008-kabi-arm64-reserve-space-in-cpu_hwcaps-and-cpu_hwcap.patch
@@ -17,12 +17,12 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
-@@ -49,6 +49,7 @@
- #define ARM64_HAS_CNP 28
- #define ARM64_WORKAROUND_843419 29
+@@ -55,6 +55,7 @@
+ #define ARM64_MISMATCHED_CACHE_TYPE 34
+ #define ARM64_WORKAROUND_1542419 35
--#define ARM64_NCAPS 30
-+/* kabi: reserve 30 - 40 for future cpu capabilities */
+-#define ARM64_NCAPS 36
++/* kabi: reserve 36 - 40 for future cpu capabilities */
+#define ARM64_NCAPS 40
#endif /* __ASM_CPUCAPS_H */
diff --git a/patches.suse/0021-arm64-Delay-enabling-hardware-DBM-feature.patch b/patches.suse/0021-arm64-Delay-enabling-hardware-DBM-feature.patch
index 580ed66750..ca224e7c40 100644
--- a/patches.suse/0021-arm64-Delay-enabling-hardware-DBM-feature.patch
+++ b/patches.suse/0021-arm64-Delay-enabling-hardware-DBM-feature.patch
@@ -46,11 +46,11 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
#define ARM64_HAS_RAS_EXTN 25
+#define ARM64_HW_DBM 26
#define ARM64_WORKAROUND_843419 29
-
- #define ARM64_NCAPS 30
+ #define ARM64_HAS_CACHE_IDC 32
+ #define ARM64_HAS_CACHE_DIC 33
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
-@@ -959,6 +959,57 @@ static void cpu_copy_el2regs(const struc
+@@ -971,6 +971,57 @@ static void cpu_copy_el2regs(const struc
write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
}
@@ -108,10 +108,10 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
-@@ -1098,6 +1149,26 @@ static const struct arm64_cpu_capabiliti
- .cpu_enable = cpu_clear_disr,
+@@ -1122,6 +1173,26 @@ static const struct arm64_cpu_capabiliti
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cache_dic,
},
- #endif /* CONFIG_ARM64_RAS_EXTN */
+#ifdef CONFIG_ARM64_HW_AFDBM
+ {
+ /*
diff --git a/patches.suse/0026-arm64-mm-Support-Common-Not-Private-translations.patch b/patches.suse/0026-arm64-mm-Support-Common-Not-Private-translations.patch
index b3a051b688..c67c350273 100644
--- a/patches.suse/0026-arm64-mm-Support-Common-Not-Private-translations.patch
+++ b/patches.suse/0026-arm64-mm-Support-Common-Not-Private-translations.patch
@@ -84,8 +84,8 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
#define ARM64_SSBD 27
+#define ARM64_HAS_CNP 28
#define ARM64_WORKAROUND_843419 29
-
- #define ARM64_NCAPS 30
+ #define ARM64_SSBS 30
+ #define ARM64_HAS_CACHE_IDC 32
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -510,6 +510,12 @@ static inline bool system_supports_sve(v
@@ -133,7 +133,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
-@@ -209,6 +209,8 @@
+@@ -211,6 +211,8 @@
#define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS)
#define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
@@ -160,8 +160,8 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
/*
* NOTE: Any changes to the visibility of features should be kept in
-@@ -859,6 +861,20 @@ static bool has_no_fpsimd(const struct a
- ID_AA64PFR0_FP_SHIFT) < 0;
+@@ -875,6 +877,20 @@ static bool has_cache_dic(const struct a
+ return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_DIC_SHIFT);
}
+static bool __maybe_unused
@@ -181,8 +181,8 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
-@@ -1188,6 +1204,19 @@ static const struct arm64_cpu_capabiliti
- .cpu_enable = cpu_enable_hw_dbm,
+@@ -1293,6 +1309,19 @@ static const struct arm64_cpu_capabiliti
+ .cpu_enable = cpu_enable_ssbs,
},
#endif
+#ifdef CONFIG_ARM64_CNP
@@ -201,7 +201,7 @@ Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
{},
};
-@@ -1628,6 +1657,11 @@ cpufeature_pan_not_uao(const struct arm6
+@@ -1734,6 +1763,11 @@ cpufeature_pan_not_uao(const struct arm6
return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
}
diff --git a/patches.suse/KVM-arm-arm64-Clean-dcache-to-PoC-when-changing-PTE-.patch b/patches.suse/KVM-arm-arm64-Clean-dcache-to-PoC-when-changing-PTE-.patch
new file mode 100644
index 0000000000..f37f3019fe
--- /dev/null
+++ b/patches.suse/KVM-arm-arm64-Clean-dcache-to-PoC-when-changing-PTE-.patch
@@ -0,0 +1,63 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Thu, 23 Aug 2018 09:58:27 +0100
+Subject: KVM: arm/arm64: Clean dcache to PoC when changing PTE due to CoW
+
+Git-commit: 694556d54f354d3fe43bb2e61fd6103cca2638a4
+Patch-mainline: v4.19-rc3
+References: jsc#ECO-561,jsc#SLE-10671
+
+When triggering a CoW, we unmap the RO page via an MMU notifier
+(invalidate_range_start), and then populate the new PTE using another
+one (change_pte). In the meantime, we'll have copied the old page
+into the new one.
+
+The problem is that the data for the new page is sitting in the
+cache, and should the guest have an uncached mapping to that page
+(or its MMU off), following accesses will bypass the cache.
+
+In a way, this is similar to what happens on a translation fault:
+We need to clean the page to the PoC before mapping it. So let's just
+do that.
+
+This fixes a KVM unit test regression observed on a HiSilicon platform,
+and subsequently reproduced on Seattle.
+
+Fixes: a9c0e12ebee5 ("KVM: arm/arm64: Only clean the dcache on translation fault")
+Cc: stable@vger.kernel.org # v4.16+
+Reported-by: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
+---
+ virt/kvm/arm/mmu.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
+index 91aaf73b00df..111a660be3be 100644
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -1860,13 +1860,20 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data
+ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
+ {
+ unsigned long end = hva + PAGE_SIZE;
++ kvm_pfn_t pfn = pte_pfn(pte);
+ pte_t stage2_pte;
+
+ if (!kvm->arch.pgd)
+ return;
+
+ trace_kvm_set_spte_hva(hva);
+- stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
++
++ /*
++ * We've moved a page around, probably through CoW, so let's treat it
++ * just like a translation fault and clean the cache to the PoC.
++ */
++ clean_dcache_guest_page(pfn, PAGE_SIZE);
++ stage2_pte = pfn_pte(pfn, PAGE_S2);
+ handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
+ }
+
+--
+2.16.4
+
diff --git a/patches.suse/KVM-arm-arm64-Detangle-kvm_mmu.h-from-kvm_hyp.h.patch b/patches.suse/KVM-arm-arm64-Detangle-kvm_mmu.h-from-kvm_hyp.h.patch
new file mode 100644
index 0000000000..3019fa81c9
--- /dev/null
+++ b/patches.suse/KVM-arm-arm64-Detangle-kvm_mmu.h-from-kvm_hyp.h.patch
@@ -0,0 +1,126 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Mon, 23 Oct 2017 17:11:14 +0100
+Subject: KVM: arm/arm64: Detangle kvm_mmu.h from kvm_hyp.h
+
+Git-commit: d68119864ef4b253a585a1c897cda6936d4b5de9
+Patch-mainline: v4.16-rc1
+References: jsc#ECO-561,jsc#SLE-10671
+
+kvm_hyp.h has an odd dependency on kvm_mmu.h, which makes the
+opposite inclusion impossible. Let's start with breaking that
+useless dependency.
+
+Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm/include/asm/kvm_hyp.h | 1 -
+ arch/arm/kvm/hyp/switch.c | 1 +
+ arch/arm/kvm/hyp/tlb.c | 1 +
+ arch/arm64/include/asm/kvm_hyp.h | 1 -
+ arch/arm64/kvm/hyp/debug-sr.c | 1 +
+ arch/arm64/kvm/hyp/switch.c | 1 +
+ arch/arm64/kvm/hyp/tlb.c | 1 +
+ virt/kvm/arm/hyp/vgic-v2-sr.c | 1 +
+ 8 files changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
+index ab20ffa8b9e7..76368de7237b 100644
+--- a/arch/arm/include/asm/kvm_hyp.h
++++ b/arch/arm/include/asm/kvm_hyp.h
+@@ -21,7 +21,6 @@
+ #include <linux/compiler.h>
+ #include <linux/kvm_host.h>
+ #include <asm/cp15.h>
+-#include <asm/kvm_mmu.h>
+ #include <asm/vfp.h>
+
+ #define __hyp_text __section(.hyp.text) notrace
+diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
+index 330c9ce34ba5..ae45ae96aac2 100644
+--- a/arch/arm/kvm/hyp/switch.c
++++ b/arch/arm/kvm/hyp/switch.c
+@@ -18,6 +18,7 @@
+
+ #include <asm/kvm_asm.h>
+ #include <asm/kvm_hyp.h>
++#include <asm/kvm_mmu.h>
+
+ __asm__(".arch_extension virt");
+
+diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c
+index 6d810af2d9fd..c0edd450e104 100644
+--- a/arch/arm/kvm/hyp/tlb.c
++++ b/arch/arm/kvm/hyp/tlb.c
+@@ -19,6 +19,7 @@
+ */
+
+ #include <asm/kvm_hyp.h>
++#include <asm/kvm_mmu.h>
+
+ /**
+ * Flush per-VMID TLBs
+diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
+index 08d3bb66c8b7..f26f9cd70c72 100644
+--- a/arch/arm64/include/asm/kvm_hyp.h
++++ b/arch/arm64/include/asm/kvm_hyp.h
+@@ -20,7 +20,6 @@
+
+ #include <linux/compiler.h>
+ #include <linux/kvm_host.h>
+-#include <asm/kvm_mmu.h>
+ #include <asm/sysreg.h>
+
+ #define __hyp_text __section(.hyp.text) notrace
+diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
+index 321c9c05dd9e..360455f86346 100644
+--- a/arch/arm64/kvm/hyp/debug-sr.c
++++ b/arch/arm64/kvm/hyp/debug-sr.c
+@@ -21,6 +21,7 @@
+ #include <asm/debug-monitors.h>
+ #include <asm/kvm_asm.h>
+ #include <asm/kvm_hyp.h>
++#include <asm/kvm_mmu.h>
+
+ #define read_debug(r,n) read_sysreg(r##n##_el1)
+ #define write_debug(v,r,n) write_sysreg(v, r##n##_el1)
+diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
+index f7c651f3a8c0..f3d8bed096f5 100644
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -21,6 +21,7 @@
+ #include <asm/kvm_asm.h>
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_hyp.h>
++#include <asm/kvm_mmu.h>
+ #include <asm/fpsimd.h>
+ #include <asm/debug-monitors.h>
+
+diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
+index 73464a96c365..131c7772703c 100644
+--- a/arch/arm64/kvm/hyp/tlb.c
++++ b/arch/arm64/kvm/hyp/tlb.c
+@@ -16,6 +16,7 @@
+ */
+
+ #include <asm/kvm_hyp.h>
++#include <asm/kvm_mmu.h>
+ #include <asm/tlbflush.h>
+
+ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
+diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
+index d7fd46fe9efb..4fe6e797e8b3 100644
+--- a/virt/kvm/arm/hyp/vgic-v2-sr.c
++++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
+@@ -21,6 +21,7 @@
+
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_hyp.h>
++#include <asm/kvm_mmu.h>
+
+ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
+ {
+--
+2.16.4
+
diff --git a/patches.suse/KVM-arm-arm64-Drop-vcpu-parameter-from-guest-cache-m.patch b/patches.suse/KVM-arm-arm64-Drop-vcpu-parameter-from-guest-cache-m.patch
new file mode 100644
index 0000000000..c35605f877
--- /dev/null
+++ b/patches.suse/KVM-arm-arm64-Drop-vcpu-parameter-from-guest-cache-m.patch
@@ -0,0 +1,127 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Mon, 23 Oct 2017 17:11:22 +0100
+Subject: KVM: arm/arm64: Drop vcpu parameter from guest cache maintenance
+ operartions
+
+Git-commit: 17ab9d57debaa53d665651e425a0efc4a893c039
+Patch-mainline: v4.16-rc1
+References: jsc#ECO-561,jsc#SLE-10671
+
+The vcpu parameter isn't used for anything, and gets in the way of
+further cleanups. Let's get rid of it.
+
+Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm/include/asm/kvm_mmu.h | 7 ++-----
+ arch/arm64/include/asm/kvm_mmu.h | 7 ++-----
+ virt/kvm/arm/mmu.c | 18 ++++++++----------
+ 3 files changed, 12 insertions(+), 20 deletions(-)
+
+diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
+index aab64fe52146..bc70a1f0f42d 100644
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -150,9 +150,7 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
+ return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
+ }
+
+-static inline void __clean_dcache_guest_page(struct kvm_vcpu *vcpu,
+- kvm_pfn_t pfn,
+- unsigned long size)
++static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
+ {
+ /*
+ * Clean the dcache to the Point of Coherency.
+@@ -177,8 +175,7 @@ static inline void __clean_dcache_guest_page(struct kvm_vcpu *vcpu,
+ }
+ }
+
+-static inline void __invalidate_icache_guest_page(struct kvm_vcpu *vcpu,
+- kvm_pfn_t pfn,
++static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
+ unsigned long size)
+ {
+ u32 iclsz;
+diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
+index 126abefffe7f..06f1f9794679 100644
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -252,17 +252,14 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
+ return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
+ }
+
+-static inline void __clean_dcache_guest_page(struct kvm_vcpu *vcpu,
+- kvm_pfn_t pfn,
+- unsigned long size)
++static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
+ {
+ void *va = page_address(pfn_to_page(pfn));
+
+ kvm_flush_dcache_to_poc(va, size);
+ }
+
+-static inline void __invalidate_icache_guest_page(struct kvm_vcpu *vcpu,
+- kvm_pfn_t pfn,
++static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
+ unsigned long size)
+ {
+ if (icache_is_aliasing()) {
+diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
+index b83b5a8442bb..a1ea43fa75cf 100644
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -1276,16 +1276,14 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
+ kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
+ }
+
+-static void clean_dcache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
+- unsigned long size)
++static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
+ {
+- __clean_dcache_guest_page(vcpu, pfn, size);
++ __clean_dcache_guest_page(pfn, size);
+ }
+
+-static void invalidate_icache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
+- unsigned long size)
++static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
+ {
+- __invalidate_icache_guest_page(vcpu, pfn, size);
++ __invalidate_icache_guest_page(pfn, size);
+ }
+
+ static void kvm_send_hwpoison_signal(unsigned long address,
+@@ -1421,11 +1419,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ }
+
+ if (fault_status != FSC_PERM)
+- clean_dcache_guest_page(vcpu, pfn, PMD_SIZE);
++ clean_dcache_guest_page(pfn, PMD_SIZE);
+
+ if (exec_fault) {
+ new_pmd = kvm_s2pmd_mkexec(new_pmd);
+- invalidate_icache_guest_page(vcpu, pfn, PMD_SIZE);
++ invalidate_icache_guest_page(pfn, PMD_SIZE);
+ } else if (fault_status == FSC_PERM) {
+ /* Preserve execute if XN was already cleared */
+ if (stage2_is_exec(kvm, fault_ipa))
+@@ -1443,11 +1441,11 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ }
+
+ if (fault_status != FSC_PERM)
+- clean_dcache_guest_page(vcpu, pfn, PAGE_SIZE);
++ clean_dcache_guest_page(pfn, PAGE_SIZE);
+
+ if (exec_fault) {
+ new_pte = kvm_s2pte_mkexec(new_pte);
+- invalidate_icache_guest_page(vcpu, pfn, PAGE_SIZE);
++ invalidate_icache_guest_page(pfn, PAGE_SIZE);
+ } else if (fault_status == FSC_PERM) {
+ /* Preserve execute if XN was already cleared */
+ if (stage2_is_exec(kvm, fault_ipa))
+--
+2.16.4
+
diff --git a/patches.suse/KVM-arm-arm64-Limit-icache-invalidation-to-prefetch-.patch b/patches.suse/KVM-arm-arm64-Limit-icache-invalidation-to-prefetch-.patch
new file mode 100644
index 0000000000..bbaab5169f
--- /dev/null
+++ b/patches.suse/KVM-arm-arm64-Limit-icache-invalidation-to-prefetch-.patch
@@ -0,0 +1,148 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Mon, 23 Oct 2017 17:11:19 +0100
+Subject: KVM: arm/arm64: Limit icache invalidation to prefetch aborts
+
+Git-commit: d0e22b4ac3ba23c611739f554392bf5e217df49f
+Patch-mainline: v4.16-rc1
+References: jsc#ECO-561,jsc#SLE-10671
+
+We've so far eagerly invalidated the icache, no matter how
+the page was faulted in (data or prefetch abort).
+
+But we can easily track execution by setting the XN bits
+in the S2 page tables, get the prefetch abort at HYP and
+perform the icache invalidation at that time only.
+
+As for most VMs, the instruction working set is pretty
+small compared to the data set, this is likely to save
+some traffic (specially as the invalidation is broadcast).
+
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm/include/asm/kvm_mmu.h | 12 ++++++++++++
+ arch/arm/include/asm/pgtable.h | 4 ++--
+ arch/arm64/include/asm/kvm_mmu.h | 12 ++++++++++++
+ arch/arm64/include/asm/pgtable-prot.h | 4 ++--
+ virt/kvm/arm/mmu.c | 19 +++++++++++++++----
+ 5 files changed, 43 insertions(+), 8 deletions(-)
+
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -85,6 +85,18 @@ static inline pmd_t kvm_s2pmd_mkwrite(pm
+ return pmd;
+ }
+
++static inline pte_t kvm_s2pte_mkexec(pte_t pte)
++{
++ pte_val(pte) &= ~L_PTE_XN;
++ return pte;
++}
++
++static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
++{
++ pmd_val(pmd) &= ~PMD_SECT_XN;
++ return pmd;
++}
++
+ static inline void kvm_set_s2pte_readonly(pte_t *pte)
+ {
+ pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
+--- a/arch/arm/include/asm/pgtable.h
++++ b/arch/arm/include/asm/pgtable.h
+@@ -102,8 +102,8 @@ extern pgprot_t pgprot_s2_device;
+ #define PAGE_HYP_EXEC _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY)
+ #define PAGE_HYP_RO _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN)
+ #define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
+-#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
+-#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY)
++#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY | L_PTE_XN)
++#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY | L_PTE_XN)
+
+ #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
+ #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -173,6 +173,18 @@ static inline pmd_t kvm_s2pmd_mkwrite(pm
+ return pmd;
+ }
+
++static inline pte_t kvm_s2pte_mkexec(pte_t pte)
++{
++ pte_val(pte) &= ~PTE_S2_XN;
++ return pte;
++}
++
++static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
++{
++ pmd_val(pmd) &= ~PMD_S2_XN;
++ return pmd;
++}
++
+ static inline void kvm_set_s2pte_readonly(pte_t *pte)
+ {
+ pteval_t old_pteval, pteval;
+--- a/arch/arm64/include/asm/pgtable-prot.h
++++ b/arch/arm64/include/asm/pgtable-prot.h
+@@ -67,8 +67,8 @@
+ #define PAGE_HYP_RO __pgprot(_HYP_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
+ #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
+
+-#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
+-#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
++#define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY | PTE_S2_XN)
++#define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_S2_XN)
+
+ #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
+ #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -1290,7 +1290,7 @@ static int user_mem_abort(struct kvm_vcp
+ unsigned long fault_status)
+ {
+ int ret;
+- bool write_fault, writable, hugetlb = false, force_pte = false;
++ bool write_fault, exec_fault, writable, hugetlb = false, force_pte = false;
+ unsigned long mmu_seq;
+ gfn_t gfn = fault_ipa >> PAGE_SHIFT;
+ struct kvm *kvm = vcpu->kvm;
+@@ -1302,7 +1302,10 @@ static int user_mem_abort(struct kvm_vcp
+ unsigned long flags = 0;
+
+ write_fault = kvm_is_write_fault(vcpu);
+- if (fault_status == FSC_PERM && !write_fault) {
++ exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
++ VM_BUG_ON(write_fault && exec_fault);
++
++ if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
+ kvm_err("Unexpected L2 read permission error\n");
+ return -EFAULT;
+ }
+@@ -1396,7 +1399,11 @@ static int user_mem_abort(struct kvm_vcp
+ kvm_set_pfn_dirty(pfn);
+ }
+ clean_dcache_guest_page(vcpu, pfn, PMD_SIZE);
+- invalidate_icache_guest_page(vcpu, pfn, PMD_SIZE);
++
++ if (exec_fault) {
++ new_pmd = kvm_s2pmd_mkexec(new_pmd);
++ invalidate_icache_guest_page(vcpu, pfn, PMD_SIZE);
++ }
+
+ ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
+ } else {
+@@ -1408,7 +1415,11 @@ static int user_mem_abort(struct kvm_vcp
+ mark_page_dirty(kvm, gfn);
+ }
+ clean_dcache_guest_page(vcpu, pfn, PAGE_SIZE);
+- invalidate_icache_guest_page(vcpu, pfn, PAGE_SIZE);
++
++ if (exec_fault) {
++ new_pte = kvm_s2pte_mkexec(new_pte);
++ invalidate_icache_guest_page(vcpu, pfn, PAGE_SIZE);
++ }
+
+ ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
+ }
diff --git a/patches.suse/KVM-arm-arm64-Only-clean-the-dcache-on-translation-f.patch b/patches.suse/KVM-arm-arm64-Only-clean-the-dcache-on-translation-f.patch
new file mode 100644
index 0000000000..3bdd696419
--- /dev/null
+++ b/patches.suse/KVM-arm-arm64-Only-clean-the-dcache-on-translation-f.patch
@@ -0,0 +1,50 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Mon, 23 Oct 2017 17:11:20 +0100
+Subject: KVM: arm/arm64: Only clean the dcache on translation fault
+
+Git-commit: a9c0e12ebee56ef06b7eccdbc73bab71d0018df8
+Patch-mainline: v4.16-rc1
+References: jsc#ECO-561,jsc#SLE-10671
+
+The only case where we actually need to perform a dcache maintenance
+is when we map the page for the first time, and subsequent permission
+faults do not require cache maintenance. Let's make it conditional
+on not being a permission fault (and thus a translation fault).
+
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ virt/kvm/arm/mmu.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
+index 0417c8e2a81c..f956efbd933d 100644
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -1400,7 +1400,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ new_pmd = kvm_s2pmd_mkwrite(new_pmd);
+ kvm_set_pfn_dirty(pfn);
+ }
+- clean_dcache_guest_page(vcpu, pfn, PMD_SIZE);
++
++ if (fault_status != FSC_PERM)
++ clean_dcache_guest_page(vcpu, pfn, PMD_SIZE);
+
+ if (exec_fault) {
+ new_pmd = kvm_s2pmd_mkexec(new_pmd);
+@@ -1416,7 +1418,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ kvm_set_pfn_dirty(pfn);
+ mark_page_dirty(kvm, gfn);
+ }
+- clean_dcache_guest_page(vcpu, pfn, PAGE_SIZE);
++
++ if (fault_status != FSC_PERM)
++ clean_dcache_guest_page(vcpu, pfn, PAGE_SIZE);
+
+ if (exec_fault) {
+ new_pte = kvm_s2pte_mkexec(new_pte);
+--
+2.16.4
+
diff --git a/patches.suse/KVM-arm-arm64-Preserve-Exec-permission-across-R-W-pe.patch b/patches.suse/KVM-arm-arm64-Preserve-Exec-permission-across-R-W-pe.patch
new file mode 100644
index 0000000000..189b40467a
--- /dev/null
+++ b/patches.suse/KVM-arm-arm64-Preserve-Exec-permission-across-R-W-pe.patch
@@ -0,0 +1,142 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Mon, 23 Oct 2017 17:11:21 +0100
+Subject: KVM: arm/arm64: Preserve Exec permission across R/W permission faults
+
+Git-commit: 7a3796d2ef5bb948f709467eef1bf96edbfc67a0
+Patch-mainline: v4.16-rc1
+References: jsc#ECO-561,jsc#SLE-10671
+
+So far, we loose the Exec property whenever we take permission
+faults, as we always reconstruct the PTE/PMD from scratch. This
+can be counter productive as we can end-up with the following
+fault sequence:
+
+ X -> RO -> ROX -> RW -> RWX
+
+Instead, we can lookup the existing PTE/PMD and clear the XN bit in the
+new entry if it was already cleared in the old one, leadig to a much
+nicer fault sequence:
+
+ X -> ROX -> RWX
+
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm/include/asm/kvm_mmu.h | 10 ++++++++++
+ arch/arm64/include/asm/kvm_mmu.h | 10 ++++++++++
+ virt/kvm/arm/mmu.c | 27 +++++++++++++++++++++++++++
+ 3 files changed, 47 insertions(+)
+
+diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
+index 4d7a54cbb3ab..aab64fe52146 100644
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -107,6 +107,11 @@ static inline bool kvm_s2pte_readonly(pte_t *pte)
+ return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
+ }
+
++static inline bool kvm_s2pte_exec(pte_t *pte)
++{
++ return !(pte_val(*pte) & L_PTE_XN);
++}
++
+ static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
+ {
+ pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
+@@ -117,6 +122,11 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
+ return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
+ }
+
++static inline bool kvm_s2pmd_exec(pmd_t *pmd)
++{
++ return !(pmd_val(*pmd) & PMD_SECT_XN);
++}
++
+ static inline bool kvm_page_empty(void *ptr)
+ {
+ struct page *ptr_page = virt_to_page(ptr);
+diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
+index 1e1b20cb348f..126abefffe7f 100644
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -203,6 +203,11 @@ static inline bool kvm_s2pte_readonly(pte_t *pte)
+ return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
+ }
+
++static inline bool kvm_s2pte_exec(pte_t *pte)
++{
++ return !(pte_val(*pte) & PTE_S2_XN);
++}
++
+ static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
+ {
+ kvm_set_s2pte_readonly((pte_t *)pmd);
+@@ -213,6 +218,11 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
+ return kvm_s2pte_readonly((pte_t *)pmd);
+ }
+
++static inline bool kvm_s2pmd_exec(pmd_t *pmd)
++{
++ return !(pmd_val(*pmd) & PMD_S2_XN);
++}
++
+ static inline bool kvm_page_empty(void *ptr)
+ {
+ struct page *ptr_page = virt_to_page(ptr);
+diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
+index f956efbd933d..b83b5a8442bb 100644
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -926,6 +926,25 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
+ return 0;
+ }
+
++static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
++{
++ pmd_t *pmdp;
++ pte_t *ptep;
++
++ pmdp = stage2_get_pmd(kvm, NULL, addr);
++ if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp))
++ return false;
++
++ if (pmd_thp_or_huge(*pmdp))
++ return kvm_s2pmd_exec(pmdp);
++
++ ptep = pte_offset_kernel(pmdp, addr);
++ if (!ptep || pte_none(*ptep) || !pte_present(*ptep))
++ return false;
++
++ return kvm_s2pte_exec(ptep);
++}
++
+ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+ phys_addr_t addr, const pte_t *new_pte,
+ unsigned long flags)
+@@ -1407,6 +1426,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ if (exec_fault) {
+ new_pmd = kvm_s2pmd_mkexec(new_pmd);
+ invalidate_icache_guest_page(vcpu, pfn, PMD_SIZE);
++ } else if (fault_status == FSC_PERM) {
++ /* Preserve execute if XN was already cleared */
++ if (stage2_is_exec(kvm, fault_ipa))
++ new_pmd = kvm_s2pmd_mkexec(new_pmd);
+ }
+
+ ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
+@@ -1425,6 +1448,10 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ if (exec_fault) {
+ new_pte = kvm_s2pte_mkexec(new_pte);
+ invalidate_icache_guest_page(vcpu, pfn, PAGE_SIZE);
++ } else if (fault_status == FSC_PERM) {
++ /* Preserve execute if XN was already cleared */
++ if (stage2_is_exec(kvm, fault_ipa))
++ new_pte = kvm_s2pte_mkexec(new_pte);
+ }
+
+ ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
+--
+2.16.4
+
diff --git a/patches.suse/KVM-arm-arm64-Split-dcache-icache-flushing.patch b/patches.suse/KVM-arm-arm64-Split-dcache-icache-flushing.patch
new file mode 100644
index 0000000000..790b6fa70a
--- /dev/null
+++ b/patches.suse/KVM-arm-arm64-Split-dcache-icache-flushing.patch
@@ -0,0 +1,192 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Mon, 23 Oct 2017 17:11:15 +0100
+Subject: KVM: arm/arm64: Split dcache/icache flushing
+
+Git-commit: a15f693935a9f1fec8241cafaca27be4483d4464
+Patch-mainline: v4.16-rc1
+References: jsc#ECO-561,jsc#SLE-10671
+
+As we're about to introduce opportunistic invalidation of the icache,
+let's split dcache and icache flushing.
+
+Acked-by: Christoffer Dall <cdall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm/include/asm/kvm_mmu.h | 60 ++++++++++++++++++++++++++++------------
+ arch/arm64/include/asm/kvm_mmu.h | 13 +++++++--
+ virt/kvm/arm/mmu.c | 20 ++++++++++----
+ 3 files changed, 67 insertions(+), 26 deletions(-)
+
+diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
+index fa6f2174276b..9fa4b2520974 100644
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -126,21 +126,12 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
+ return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
+ }
+
+-static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
+- kvm_pfn_t pfn,
+- unsigned long size)
++static inline void __clean_dcache_guest_page(struct kvm_vcpu *vcpu,
++ kvm_pfn_t pfn,
++ unsigned long size)
+ {
+ /*
+- * If we are going to insert an instruction page and the icache is
+- * either VIPT or PIPT, there is a potential problem where the host
+- * (or another VM) may have used the same page as this guest, and we
+- * read incorrect data from the icache. If we're using a PIPT cache,
+- * we can invalidate just that page, but if we are using a VIPT cache
+- * we need to invalidate the entire icache - damn shame - as written
+- * in the ARM ARM (DDI 0406C.b - Page B3-1393).
+- *
+- * VIVT caches are tagged using both the ASID and the VMID and doesn't
+- * need any kind of flushing (DDI 0406C.b - Page B3-1392).
++ * Clean the dcache to the Point of Coherency.
+ *
+ * We need to do this through a kernel mapping (using the
+ * user-space mapping has proved to be the wrong
+@@ -155,19 +146,52 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
+
+ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+- if (icache_is_pipt())
+- __cpuc_coherent_user_range((unsigned long)va,
+- (unsigned long)va + PAGE_SIZE);
+-
+ size -= PAGE_SIZE;
+ pfn++;
+
+ kunmap_atomic(va);
+ }
++}
+
+- if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
++static inline void __invalidate_icache_guest_page(struct kvm_vcpu *vcpu,
++ kvm_pfn_t pfn,
++ unsigned long size)
++{
++ /*
++ * If we are going to insert an instruction page and the icache is
++ * either VIPT or PIPT, there is a potential problem where the host
++ * (or another VM) may have used the same page as this guest, and we
++ * read incorrect data from the icache. If we're using a PIPT cache,
++ * we can invalidate just that page, but if we are using a VIPT cache
++ * we need to invalidate the entire icache - damn shame - as written
++ * in the ARM ARM (DDI 0406C.b - Page B3-1393).
++ *
++ * VIVT caches are tagged using both the ASID and the VMID and doesn't
++ * need any kind of flushing (DDI 0406C.b - Page B3-1392).
++ */
++
++ VM_BUG_ON(size & ~PAGE_MASK);
++
++ if (icache_is_vivt_asid_tagged())
++ return;
++
++ if (!icache_is_pipt()) {
+ /* any kind of VIPT cache */
+ __flush_icache_all();
++ return;
++ }
++
++ /* PIPT cache. As for the d-side, use a temporary kernel mapping. */
++ while (size) {
++ void *va = kmap_atomic_pfn(pfn);
++
++ __cpuc_coherent_user_range((unsigned long)va,
++ (unsigned long)va + PAGE_SIZE);
++
++ size -= PAGE_SIZE;
++ pfn++;
++
++ kunmap_atomic(va);
+ }
+ }
+
+diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
+index 672c8684d5c2..8034b96fb3a4 100644
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -230,19 +230,26 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
+ return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
+ }
+
+-static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
+- kvm_pfn_t pfn,
+- unsigned long size)
++static inline void __clean_dcache_guest_page(struct kvm_vcpu *vcpu,
++ kvm_pfn_t pfn,
++ unsigned long size)
+ {
+ void *va = page_address(pfn_to_page(pfn));
+
+ kvm_flush_dcache_to_poc(va, size);
++}
+
++static inline void __invalidate_icache_guest_page(struct kvm_vcpu *vcpu,
++ kvm_pfn_t pfn,
++ unsigned long size)
++{
+ if (icache_is_aliasing()) {
+ /* any kind of VIPT cache */
+ __flush_icache_all();
+ } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) {
+ /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
++ void *va = page_address(pfn_to_page(pfn));
++
+ flush_icache_range((unsigned long)va,
+ (unsigned long)va + size);
+ }
+diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
+index b36945d49986..2174244f6317 100644
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -1257,10 +1257,16 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
+ kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
+ }
+
+-static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
+- unsigned long size)
++static void clean_dcache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
++ unsigned long size)
+ {
+- __coherent_cache_guest_page(vcpu, pfn, size);
++ __clean_dcache_guest_page(vcpu, pfn, size);
++}
++
++static void invalidate_icache_guest_page(struct kvm_vcpu *vcpu, kvm_pfn_t pfn,
++ unsigned long size)
++{
++ __invalidate_icache_guest_page(vcpu, pfn, size);
+ }
+
+ static void kvm_send_hwpoison_signal(unsigned long address,
+@@ -1391,7 +1397,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ new_pmd = kvm_s2pmd_mkwrite(new_pmd);
+ kvm_set_pfn_dirty(pfn);
+ }
+- coherent_cache_guest_page(vcpu, pfn, PMD_SIZE);
++ clean_dcache_guest_page(vcpu, pfn, PMD_SIZE);
++ invalidate_icache_guest_page(vcpu, pfn, PMD_SIZE);
++
+ ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
+ } else {
+ pte_t new_pte = pfn_pte(pfn, mem_type);
+@@ -1401,7 +1409,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
+ kvm_set_pfn_dirty(pfn);
+ mark_page_dirty(kvm, gfn);
+ }
+- coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE);
++ clean_dcache_guest_page(vcpu, pfn, PAGE_SIZE);
++ invalidate_icache_guest_page(vcpu, pfn, PAGE_SIZE);
++
+ ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
+ }
+
+--
+2.16.4
+
diff --git a/patches.suse/KVM-arm64-Set-SCTLR_EL2.DSSBS-if-SSBD-is-forcefully-.patch b/patches.suse/KVM-arm64-Set-SCTLR_EL2.DSSBS-if-SSBD-is-forcefully-.patch
new file mode 100644
index 0000000000..b7fbb87e73
--- /dev/null
+++ b/patches.suse/KVM-arm64-Set-SCTLR_EL2.DSSBS-if-SSBD-is-forcefully-.patch
@@ -0,0 +1,65 @@
+From: Will Deacon <will.deacon@arm.com>
+Date: Wed, 8 Aug 2018 16:10:54 +0100
+Subject: KVM: arm64: Set SCTLR_EL2.DSSBS if SSBD is forcefully disabled and
+ !vhe
+
+Git-commit: 7c36447ae5a090729e7b129f24705bb231a07e0b
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561
+
+When running without VHE, it is necessary to set SCTLR_EL2.DSSBS if SSBD
+has been forcefully disabled on the kernel command-line.
+
+Acked-by: Christoffer Dall <christoffer.dall@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/include/asm/kvm_host.h | 11 +++++++++++
+ arch/arm64/kvm/hyp/sysreg-sr.c | 11 +++++++++++
+ 2 files changed, 22 insertions(+)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -362,6 +362,8 @@ int kvm_perf_teardown(void);
+
+ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
+
++void __kvm_enable_ssbs(void);
++
+ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
+ unsigned long hyp_stack_ptr,
+ unsigned long vector_ptr)
+@@ -374,6 +376,15 @@ static inline void __cpu_init_hyp_mode(p
+ */
+ BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
+ __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
++
++ /*
++ * Disabling SSBD on a non-VHE system requires us to enable SSBS
++ * at EL2.
++ */
++ if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
++ arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
++ kvm_call_hyp(__kvm_enable_ssbs);
++ }
+ }
+
+ static inline void kvm_arch_hardware_unsetup(void) {}
+--- a/arch/arm64/kvm/hyp/sysreg-sr.c
++++ b/arch/arm64/kvm/hyp/sysreg-sr.c
+@@ -190,3 +190,14 @@ void __hyp_text __sysreg32_restore_state
+ if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
+ write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
+ }
++
++void __hyp_text __kvm_enable_ssbs(void)
++{
++ u64 tmp;
++
++ asm volatile(
++ "mrs %0, sctlr_el2\n"
++ "orr %0, %0, %1\n"
++ "msr sctlr_el2, %0"
++ : "=&r" (tmp) : "L" (SCTLR_ELx_DSSBS));
++}
diff --git a/patches.suse/arm-KVM-Add-optimized-PIPT-icache-flushing.patch b/patches.suse/arm-KVM-Add-optimized-PIPT-icache-flushing.patch
new file mode 100644
index 0000000000..3038c4ef36
--- /dev/null
+++ b/patches.suse/arm-KVM-Add-optimized-PIPT-icache-flushing.patch
@@ -0,0 +1,108 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Mon, 23 Oct 2017 17:11:17 +0100
+Subject: arm: KVM: Add optimized PIPT icache flushing
+
+Git-commit: 91c703e0382a1212d249adf34af4943a5da90d54
+Patch-mainline: v4.16-rc1
+References: jsc#ECO-561,jsc#SLE-10671
+
+Calling __cpuc_coherent_user_range to invalidate the icache on
+a PIPT icache machine has some pointless overhead, as it starts
+by cleaning the dcache to the PoU, while we're guaranteed to
+have already cleaned it to the PoC.
+
+As KVM is the only user of such a feature, let's implement some
+ad-hoc cache flushing in kvm_mmu.h. Should it become useful to
+other subsystems, it can be moved to a more global location.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm/include/asm/kvm_hyp.h | 2 ++
+ arch/arm/include/asm/kvm_mmu.h | 32 +++++++++++++++++++++++++++++---
+ 2 files changed, 31 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
+index 76368de7237b..1ab8329e9ff7 100644
+--- a/arch/arm/include/asm/kvm_hyp.h
++++ b/arch/arm/include/asm/kvm_hyp.h
+@@ -68,6 +68,8 @@
+ #define HIFAR __ACCESS_CP15(c6, 4, c0, 2)
+ #define HPFAR __ACCESS_CP15(c6, 4, c0, 4)
+ #define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
++#define BPIALLIS __ACCESS_CP15(c7, 0, c1, 6)
++#define ICIMVAU __ACCESS_CP15(c7, 0, c5, 1)
+ #define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
+ #define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
+ #define TLBIALL __ACCESS_CP15(c8, 0, c7, 0)
+diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
+index 9fa4b2520974..bc8d21e76637 100644
+--- a/arch/arm/include/asm/kvm_mmu.h
++++ b/arch/arm/include/asm/kvm_mmu.h
+@@ -37,6 +37,8 @@
+
+ #include <linux/highmem.h>
+ #include <asm/cacheflush.h>
++#include <asm/cputype.h>
++#include <asm/kvm_hyp.h>
+ #include <asm/pgalloc.h>
+ #include <asm/stage2_pgtable.h>
+
+@@ -157,6 +159,8 @@ static inline void __invalidate_icache_guest_page(struct kvm_vcpu *vcpu,
+ kvm_pfn_t pfn,
+ unsigned long size)
+ {
++ u32 iclsz;
++
+ /*
+ * If we are going to insert an instruction page and the icache is
+ * either VIPT or PIPT, there is a potential problem where the host
+@@ -181,18 +185,40 @@ static inline void __invalidate_icache_guest_page(struct kvm_vcpu *vcpu,
+ return;
+ }
+
+- /* PIPT cache. As for the d-side, use a temporary kernel mapping. */
++ /*
++ * CTR IminLine contains Log2 of the number of words in the
++ * cache line, so we can get the number of words as
++ * 2 << (IminLine - 1). To get the number of bytes, we
++ * multiply by 4 (the number of bytes in a 32-bit word), and
++ * get 4 << (IminLine).
++ */
++ iclsz = 4 << (read_cpuid(CPUID_CACHETYPE) & 0xf);
++
+ while (size) {
+ void *va = kmap_atomic_pfn(pfn);
++ void *end = va + PAGE_SIZE;
++ void *addr = va;
+
+- __cpuc_coherent_user_range((unsigned long)va,
+- (unsigned long)va + PAGE_SIZE);
++ do {
++ write_sysreg(addr, ICIMVAU);
++ addr += iclsz;
++ } while (addr < end);
++
++ dsb(ishst);
++ isb();
+
+ size -= PAGE_SIZE;
+ pfn++;
+
+ kunmap_atomic(va);
+ }
++
++ /* Check if we need to invalidate the BTB */
++ if ((read_cpuid_ext(CPUID_EXT_MMFR1) >> 28) != 4) {
++ write_sysreg(0, BPIALLIS);
++ dsb(ishst);
++ isb();
++ }
+ }
+
+ static inline void __kvm_flush_dcache_pte(pte_t pte)
+--
+2.16.4
+
diff --git a/patches.suse/arm64-Add-decoding-macros-for-CP15_32-and-CP15_64-tr.patch b/patches.suse/arm64-Add-decoding-macros-for-CP15_32-and-CP15_64-tr.patch
new file mode 100644
index 0000000000..82b8c755be
--- /dev/null
+++ b/patches.suse/arm64-Add-decoding-macros-for-CP15_32-and-CP15_64-tr.patch
@@ -0,0 +1,80 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Thu, 27 Sep 2018 17:15:28 +0100
+Subject: arm64: Add decoding macros for CP15_32 and CP15_64 traps
+
+Git-commit: bd7ac140b82f2bedfc792d8ccf9b2a108c1324f3
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561
+
+So far, we don't have anything to help decoding ESR_ELx when dealing
+with ESR_ELx_EC_CP15_{32,64}. As we're about to handle some of those,
+let's add some useful macros.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/include/asm/esr.h | 52 +++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 52 insertions(+)
+
+--- a/arch/arm64/include/asm/esr.h
++++ b/arch/arm64/include/asm/esr.h
+@@ -227,6 +227,58 @@
+ (((e) & ESR_ELx_SYS64_ISS_OP2_MASK) >> \
+ ESR_ELx_SYS64_ISS_OP2_SHIFT))
+
++/*
++ * ISS field definitions for CP15 accesses
++ */
++#define ESR_ELx_CP15_32_ISS_DIR_MASK 0x1
++#define ESR_ELx_CP15_32_ISS_DIR_READ 0x1
++#define ESR_ELx_CP15_32_ISS_DIR_WRITE 0x0
++
++#define ESR_ELx_CP15_32_ISS_RT_SHIFT 5
++#define ESR_ELx_CP15_32_ISS_RT_MASK (UL(0x1f) << ESR_ELx_CP15_32_ISS_RT_SHIFT)
++#define ESR_ELx_CP15_32_ISS_CRM_SHIFT 1
++#define ESR_ELx_CP15_32_ISS_CRM_MASK (UL(0xf) << ESR_ELx_CP15_32_ISS_CRM_SHIFT)
++#define ESR_ELx_CP15_32_ISS_CRN_SHIFT 10
++#define ESR_ELx_CP15_32_ISS_CRN_MASK (UL(0xf) << ESR_ELx_CP15_32_ISS_CRN_SHIFT)
++#define ESR_ELx_CP15_32_ISS_OP1_SHIFT 14
++#define ESR_ELx_CP15_32_ISS_OP1_MASK (UL(0x7) << ESR_ELx_CP15_32_ISS_OP1_SHIFT)
++#define ESR_ELx_CP15_32_ISS_OP2_SHIFT 17
++#define ESR_ELx_CP15_32_ISS_OP2_MASK (UL(0x7) << ESR_ELx_CP15_32_ISS_OP2_SHIFT)
++
++#define ESR_ELx_CP15_32_ISS_SYS_MASK (ESR_ELx_CP15_32_ISS_OP1_MASK | \
++ ESR_ELx_CP15_32_ISS_OP2_MASK | \
++ ESR_ELx_CP15_32_ISS_CRN_MASK | \
++ ESR_ELx_CP15_32_ISS_CRM_MASK | \
++ ESR_ELx_CP15_32_ISS_DIR_MASK)
++#define ESR_ELx_CP15_32_ISS_SYS_VAL(op1, op2, crn, crm) \
++ (((op1) << ESR_ELx_CP15_32_ISS_OP1_SHIFT) | \
++ ((op2) << ESR_ELx_CP15_32_ISS_OP2_SHIFT) | \
++ ((crn) << ESR_ELx_CP15_32_ISS_CRN_SHIFT) | \
++ ((crm) << ESR_ELx_CP15_32_ISS_CRM_SHIFT))
++
++#define ESR_ELx_CP15_64_ISS_DIR_MASK 0x1
++#define ESR_ELx_CP15_64_ISS_DIR_READ 0x1
++#define ESR_ELx_CP15_64_ISS_DIR_WRITE 0x0
++
++#define ESR_ELx_CP15_64_ISS_RT_SHIFT 5
++#define ESR_ELx_CP15_64_ISS_RT_MASK (UL(0x1f) << ESR_ELx_CP15_64_ISS_RT_SHIFT)
++
++#define ESR_ELx_CP15_64_ISS_RT2_SHIFT 10
++#define ESR_ELx_CP15_64_ISS_RT2_MASK (UL(0x1f) << ESR_ELx_CP15_64_ISS_RT2_SHIFT)
++
++#define ESR_ELx_CP15_64_ISS_OP1_SHIFT 16
++#define ESR_ELx_CP15_64_ISS_OP1_MASK (UL(0xf) << ESR_ELx_CP15_64_ISS_OP1_SHIFT)
++#define ESR_ELx_CP15_64_ISS_CRM_SHIFT 1
++#define ESR_ELx_CP15_64_ISS_CRM_MASK (UL(0xf) << ESR_ELx_CP15_64_ISS_CRM_SHIFT)
++
++#define ESR_ELx_CP15_64_ISS_SYS_VAL(op1, crm) \
++ (((op1) << ESR_ELx_CP15_64_ISS_OP1_SHIFT) | \
++ ((crm) << ESR_ELx_CP15_64_ISS_CRM_SHIFT))
++
++#define ESR_ELx_CP15_64_ISS_SYS_MASK (ESR_ELx_CP15_64_ISS_OP1_MASK | \
++ ESR_ELx_CP15_64_ISS_CRM_MASK | \
++ ESR_ELx_CP15_64_ISS_DIR_MASK)
++
+ #ifndef __ASSEMBLY__
+ #include <asm/types.h>
+
diff --git a/patches.suse/arm64-Add-part-number-for-Neoverse-N1.patch b/patches.suse/arm64-Add-part-number-for-Neoverse-N1.patch
new file mode 100644
index 0000000000..71d49d5355
--- /dev/null
+++ b/patches.suse/arm64-Add-part-number-for-Neoverse-N1.patch
@@ -0,0 +1,40 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Mon, 15 Apr 2019 13:03:53 +0100
+Subject: arm64: Add part number for Neoverse N1
+
+Git-commit: 0cf57b86859c49381addb3ce47be70aadf5fd2c0
+Patch-mainline: v5.2-rc1
+References: jsc#ECO-561
+
+New CPU, new part number. You know the drill.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/include/asm/cputype.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
+index 5f1437099b99..2602bae334fb 100644
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -89,6 +89,7 @@
+ #define ARM_CPU_PART_CORTEX_A35 0xD04
+ #define ARM_CPU_PART_CORTEX_A55 0xD05
+ #define ARM_CPU_PART_CORTEX_A76 0xD0B
++#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
+
+ #define APM_CPU_PART_POTENZA 0x000
+
+@@ -118,6 +119,7 @@
+ #define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35)
+ #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
+ #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
++#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
+ #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
+ #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
+--
+2.16.4
+
diff --git a/patches.suse/arm64-Add-silicon-errata.txt-entry-for-ARM-erratum-1.patch b/patches.suse/arm64-Add-silicon-errata.txt-entry-for-ARM-erratum-1.patch
new file mode 100644
index 0000000000..d08a47ced0
--- /dev/null
+++ b/patches.suse/arm64-Add-silicon-errata.txt-entry-for-ARM-erratum-1.patch
@@ -0,0 +1,33 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Wed, 10 Oct 2018 17:25:51 +0100
+Subject: arm64: Add silicon-errata.txt entry for ARM erratum 1188873
+
+Git-commit: e03a4e5bb7430f9294c12f02c69eb045d010e942
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561
+
+Document that we actually work around ARM erratum 1188873
+
+Fixes: 95b861a4a6d9 ("arm64: arch_timer: Add workaround for ARM erratum 1188873")
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ Documentation/arm64/silicon-errata.txt | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
+index 3b2f2dd82225..76ccded8b74c 100644
+--- a/Documentation/arm64/silicon-errata.txt
++++ b/Documentation/arm64/silicon-errata.txt
+@@ -56,6 +56,7 @@ stable kernels.
+ | ARM | Cortex-A72 | #853709 | N/A |
+ | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
+ | ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
++| ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |
+ | ARM | MMU-500 | #841119,#826419 | N/A |
+ | | | | |
+ | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
+--
+2.16.4
+
diff --git a/patches.suse/arm64-Add-support-for-new-control-bits-CTR_EL0.DIC-a.patch b/patches.suse/arm64-Add-support-for-new-control-bits-CTR_EL0.DIC-a.patch
new file mode 100644
index 0000000000..9612c33970
--- /dev/null
+++ b/patches.suse/arm64-Add-support-for-new-control-bits-CTR_EL0.DIC-a.patch
@@ -0,0 +1,199 @@
+From: Shanker Donthineni <shankerd@codeaurora.org>
+Date: Wed, 7 Mar 2018 09:00:08 -0600
+Subject: arm64: Add support for new control bits CTR_EL0.DIC and CTR_EL0.IDC
+
+Git-commit: 6ae4b6e0578886eb36cedbf99f04031d93f9e315
+Patch-mainline: v4.17-rc1
+References: jsc#ECO-561,jsc#SLE-10671
+
+The DCache clean & ICache invalidation requirements for instructions
+to be data coherence are discoverable through new fields in CTR_EL0.
+The following two control bits DIC and IDC were defined for this
+purpose. No need to perform point of unification cache maintenance
+operations from software on systems where CPU caches are transparent.
+
+This patch optimize the three functions __flush_cache_user_range(),
+clean_dcache_area_pou() and invalidate_icache_range() if the hardware
+reports CTR_EL0.IDC and/or CTR_EL0.IDC. Basically it skips the two
+instructions 'DC CVAU' and 'IC IVAU', and the associated loop logic
+in order to avoid the unnecessary overhead.
+
+CTR_EL0.DIC: Instruction cache invalidation requirements for
+ instruction to data coherence. The meaning of this bit[29].
+ 0: Instruction cache invalidation to the point of unification
+ is required for instruction to data coherence.
+ 1: Instruction cache cleaning to the point of unification is
+ not required for instruction to data coherence.
+
+CTR_EL0.IDC: Data cache clean requirements for instruction to data
+ coherence. The meaning of this bit[28].
+ 0: Data cache clean to the point of unification is required for
+ instruction to data coherence, unless CLIDR_EL1.LoC == 0b000
+ or (CLIDR_EL1.LoUIS == 0b000 && CLIDR_EL1.LoUU == 0b000).
+ 1: Data cache clean to the point of unification is not required
+ for instruction to data coherence.
+
+Co-authored-by: Philip Elcan <pelcan@codeaurora.org>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Shanker Donthineni <shankerd@codeaurora.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/include/asm/cache.h | 4 ++++
+ arch/arm64/include/asm/cacheflush.h | 3 +++
+ arch/arm64/include/asm/cpucaps.h | 4 +++-
+ arch/arm64/kernel/cpufeature.c | 36 ++++++++++++++++++++++++++++++------
+ arch/arm64/mm/cache.S | 21 ++++++++++++++++++++-
+ 5 files changed, 60 insertions(+), 8 deletions(-)
+
+--- a/arch/arm64/include/asm/cache.h
++++ b/arch/arm64/include/asm/cache.h
+@@ -20,8 +20,12 @@
+
+ #define CTR_L1IP_SHIFT 14
+ #define CTR_L1IP_MASK 3
++#define CTR_DMINLINE_SHIFT 16
++#define CTR_ERG_SHIFT 20
+ #define CTR_CWG_SHIFT 24
+ #define CTR_CWG_MASK 15
++#define CTR_IDC_SHIFT 28
++#define CTR_DIC_SHIFT 29
+
+ #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
+
+--- a/arch/arm64/include/asm/cacheflush.h
++++ b/arch/arm64/include/asm/cacheflush.h
+@@ -131,6 +131,9 @@ extern void flush_dcache_page(struct pag
+
+ static inline void __flush_icache_all(void)
+ {
++ if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
++ return;
++
+ asm("ic ialluis");
+ dsb(ish);
+ }
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -45,7 +45,9 @@
+ #define ARM64_HARDEN_BP_POST_GUEST_EXIT 24
+ #define ARM64_HAS_RAS_EXTN 25
+ #define ARM64_WORKAROUND_843419 29
++#define ARM64_HAS_CACHE_IDC 32
++#define ARM64_HAS_CACHE_DIC 33
+
+-#define ARM64_NCAPS 30
++#define ARM64_NCAPS 34
+
+ #endif /* __ASM_CPUCAPS_H */
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -199,12 +199,12 @@ static const struct arm64_ftr_bits ftr_i
+ };
+
+ static const struct arm64_ftr_bits ftr_ctr[] = {
+- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
+- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */
+- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
+- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
+- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */
+- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0),
++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0),
++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
+ /*
+ * Linux can handle differing I-cache policies. Userspace JITs will
+ * make use of *minLine.
+@@ -852,6 +852,18 @@ static bool has_no_fpsimd(const struct a
+ ID_AA64PFR0_FP_SHIFT) < 0;
+ }
+
++static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
++ int __unused)
++{
++ return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_IDC_SHIFT);
++}
++
++static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
++ int __unused)
++{
++ return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_DIC_SHIFT);
++}
++
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+ static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
+
+@@ -1077,6 +1089,18 @@ static const struct arm64_cpu_capabiliti
+ .enable = cpu_clear_disr,
+ },
+ #endif /* CONFIG_ARM64_RAS_EXTN */
++ {
++ .desc = "Data cache clean to the PoU not required for I/D coherence",
++ .capability = ARM64_HAS_CACHE_IDC,
++ .def_scope = SCOPE_SYSTEM,
++ .matches = has_cache_idc,
++ },
++ {
++ .desc = "Instruction cache invalidation not required for I/D coherence",
++ .capability = ARM64_HAS_CACHE_DIC,
++ .def_scope = SCOPE_SYSTEM,
++ .matches = has_cache_dic,
++ },
+ {},
+ };
+
+--- a/arch/arm64/mm/cache.S
++++ b/arch/arm64/mm/cache.S
+@@ -50,6 +50,10 @@ ENTRY(flush_icache_range)
+ */
+ ENTRY(__flush_cache_user_range)
+ uaccess_ttbr0_enable x2, x3, x4
++alternative_if ARM64_HAS_CACHE_IDC
++ dsb ishst
++ b 7f
++alternative_else_nop_endif
+ dcache_line_size x2, x3
+ sub x3, x2, #1
+ bic x4, x0, x3
+@@ -60,8 +64,13 @@ user_alt 9f, "dc cvau, x4", "dc civac,
+ b.lo 1b
+ dsb ish
+
++7:
++alternative_if ARM64_HAS_CACHE_DIC
++ isb
++ b 8f
++alternative_else_nop_endif
+ invalidate_icache_by_line x0, x1, x2, x3, 9f
+- mov x0, #0
++8: mov x0, #0
+ 1:
+ uaccess_ttbr0_disable x1, x2
+ ret
+@@ -80,6 +89,12 @@ ENDPROC(__flush_cache_user_range)
+ * - end - virtual end address of region
+ */
+ ENTRY(invalidate_icache_range)
++alternative_if ARM64_HAS_CACHE_DIC
++ mov x0, xzr
++ isb
++ ret
++alternative_else_nop_endif
++
+ uaccess_ttbr0_enable x2, x3
+
+ invalidate_icache_by_line x0, x1, x2, x3, 2f
+@@ -116,6 +131,10 @@ ENDPIPROC(__flush_dcache_area)
+ * - size - size in question
+ */
+ ENTRY(__clean_dcache_area_pou)
++alternative_if ARM64_HAS_CACHE_IDC
++ dsb ishst
++ ret
++alternative_else_nop_endif
+ dcache_by_line_op cvau, ish, x0, x1, x2, x3
+ ret
+ ENDPROC(__clean_dcache_area_pou)
diff --git a/patches.suse/arm64-Apply-ARM64_ERRATUM_1188873-to-Neoverse-N1.patch b/patches.suse/arm64-Apply-ARM64_ERRATUM_1188873-to-Neoverse-N1.patch
new file mode 100644
index 0000000000..09831f8f5c
--- /dev/null
+++ b/patches.suse/arm64-Apply-ARM64_ERRATUM_1188873-to-Neoverse-N1.patch
@@ -0,0 +1,87 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Mon, 15 Apr 2019 13:03:54 +0100
+Subject: arm64: Apply ARM64_ERRATUM_1188873 to Neoverse-N1
+
+Git-commit: 6989303a3b2d864fd8e17d3fa3365d3e9649a598
+Patch-mainline: v5.2-rc1
+References: jsc#ECO-561
+
+Neoverse-N1 is also affected by ARM64_ERRATUM_1188873, so let's
+add it to the list of affected CPUs.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+[will: Update silicon-errata.txt]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ Documentation/arm64/silicon-errata.txt | 1 +
+ arch/arm64/Kconfig | 11 ++++++-----
+ arch/arm64/kernel/cpu_errata.c | 13 +++++++++++--
+ 3 files changed, 18 insertions(+), 7 deletions(-)
+
+--- a/Documentation/arm64/silicon-errata.txt
++++ b/Documentation/arm64/silicon-errata.txt
+@@ -57,6 +57,7 @@ stable kernels.
+ | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
+ | ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
+ | ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |
++| ARM | Neoverse-N1 | #1188873 | ARM64_ERRATUM_1188873 |
+ | ARM | MMU-500 | #841119,#826419 | N/A |
+ | | | | |
+ | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -455,16 +455,17 @@ config ARM64_ERRATUM_1024718
+ If unsure, say Y.
+
+ config ARM64_ERRATUM_1188873
+- bool "Cortex-A76: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
++ bool "Cortex-A76/Neoverse-N1: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
+ default y
+ depends on COMPAT
+ select ARM_ARCH_TIMER_OOL_WORKAROUND
+ help
+- This option adds work arounds for ARM Cortex-A76 erratum 1188873
++ This option adds work arounds for ARM Cortex-A76/Neoverse-N1
++ erratum 1188873
+
+- Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could cause
+- register corruption when accessing the timer registers from
+- AArch32 userspace.
++ Affected Cortex-A76/Neoverse-N1 cores (r0p0, r1p0, r2p0) could
++ cause register corruption when accessing the timer registers
++ from AArch32 userspace.
+
+ If unsure, say Y.
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -535,6 +535,16 @@ static const struct arm64_cpu_capabiliti
+
+ #endif
+
++#ifdef CONFIG_ARM64_ERRATUM_1188873
++static const struct midr_range erratum_1188873_list[] = {
++ /* Cortex-A76 r0p0 to r2p0 */
++ MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
++ /* Neoverse-N1 r0p0 to r2p0 */
++ MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 2, 0),
++ {},
++};
++#endif
++
+ const struct arm64_cpu_capabilities arm64_errata[] = {
+ #if defined(CONFIG_ARM64_ERRATUM_826319) || \
+ defined(CONFIG_ARM64_ERRATUM_827319) || \
+@@ -691,10 +701,9 @@ const struct arm64_cpu_capabilities arm6
+ #endif
+ #ifdef CONFIG_ARM64_ERRATUM_1188873
+ {
+- /* Cortex-A76 r0p0 to r2p0 */
+ .desc = "ARM erratum 1188873",
+ .capability = ARM64_WORKAROUND_1188873,
+- ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
++ ERRATA_MIDR_RANGE_LIST(erratum_1188873_list),
+ },
+ #endif
+ {
diff --git a/patches.suse/arm64-Fake-the-IminLine-size-on-systems-affected-by-.patch b/patches.suse/arm64-Fake-the-IminLine-size-on-systems-affected-by-.patch
new file mode 100644
index 0000000000..6f190f4a12
--- /dev/null
+++ b/patches.suse/arm64-Fake-the-IminLine-size-on-systems-affected-by-.patch
@@ -0,0 +1,65 @@
+From: James Morse <james.morse@arm.com>
+Date: Thu, 17 Oct 2019 18:42:59 +0100
+Subject: arm64: Fake the IminLine size on systems affected by Neoverse-N1
+ #1542419
+
+Git-commit: ee9d90be9ddace01b7fb126567e4b539fbe1f82f
+Patch-mainline: Queued
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
+References: jsc#ECO-561,jsc#SLE-10671
+
+Systems affected by Neoverse-N1 #1542419 support DIC so do not need to
+perform icache maintenance once new instructions are cleaned to the PoU.
+For the errata workaround, the kernel hides DIC from user-space, so that
+the unnecessary cache maintenance can be trapped by firmware.
+
+To reduce the number of traps, produce a fake IminLine value based on
+PAGE_SIZE.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
+---
+ arch/arm64/include/asm/cache.h | 3 ++-
+ arch/arm64/kernel/traps.c | 8 +++++++-
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/include/asm/cache.h
++++ b/arch/arm64/include/asm/cache.h
+@@ -22,6 +22,7 @@
+ #define CTR_L1IP_MASK 3
+ #define CTR_DMINLINE_SHIFT 16
+ #define CTR_IMINLINE_SHIFT 0
++#define CTR_IMINLINE_MASK 0xf
+ #define CTR_ERG_SHIFT 20
+ #define CTR_CWG_SHIFT 24
+ #define CTR_CWG_MASK 15
+@@ -29,7 +30,7 @@
+ #define CTR_DIC_SHIFT 29
+
+ #define CTR_CACHE_MINLINE_MASK \
+- (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT)
++ (0xf << CTR_DMINLINE_SHIFT | CTR_IMINLINE_MASK << CTR_IMINLINE_SHIFT)
+
+ #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
+
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -433,9 +433,15 @@ static void ctr_read_handler(unsigned in
+ int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+ unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
+
+- if (cpus_have_const_cap(ARM64_WORKAROUND_1542419))
++ if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
++ /* Hide DIC so that we can trap the unnecessary maintenance...*/
+ val &= ~BIT(CTR_DIC_SHIFT);
+
++ /* ... and fake IminLine to reduce the number of traps. */
++ val &= ~CTR_IMINLINE_MASK;
++ val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK;
++ }
++
+ pt_regs_write_reg(regs, rt, val);
+
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
diff --git a/patches.suse/arm64-Fix-mismatched-cache-line-size-detection.patch b/patches.suse/arm64-Fix-mismatched-cache-line-size-detection.patch
new file mode 100644
index 0000000000..2c0b326ab7
--- /dev/null
+++ b/patches.suse/arm64-Fix-mismatched-cache-line-size-detection.patch
@@ -0,0 +1,88 @@
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Wed, 4 Jul 2018 23:07:45 +0100
+Subject: arm64: Fix mismatched cache line size detection
+
+Git-commit: 4c4a39dd5fe2d13e2d2fa5fceb8ef95d19fc389a
+Patch-mainline: v4.19-rc1
+References: jsc#ECO-561,jsc#SLE-10671
+
+If there is a mismatch in the I/D min line size, we must
+always use the system wide safe value both in applications
+and in the kernel, while performing cache operations. However,
+we have been checking more bits than just the min line sizes,
+which triggers false negatives. We may need to trap the user
+accesses in such cases, but not necessarily patch the kernel.
+
+This patch fixes the check to do the right thing as advertised.
+A new capability will be added to check mismatches in other
+fields and ensure we trap the CTR accesses.
+
+Fixes: be68a8aaf925 ("arm64: cpufeature: Fix CTR_EL0 field definitions")
+Cc: <stable@vger.kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Reported-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/include/asm/cache.h | 4 ++++
+ arch/arm64/kernel/cpu_errata.c | 6 ++++--
+ arch/arm64/kernel/cpufeature.c | 2 +-
+ 3 files changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
+index 5df5cfe1c143..5ee5bca8c24b 100644
+--- a/arch/arm64/include/asm/cache.h
++++ b/arch/arm64/include/asm/cache.h
+@@ -21,12 +21,16 @@
+ #define CTR_L1IP_SHIFT 14
+ #define CTR_L1IP_MASK 3
+ #define CTR_DMINLINE_SHIFT 16
++#define CTR_IMINLINE_SHIFT 0
+ #define CTR_ERG_SHIFT 20
+ #define CTR_CWG_SHIFT 24
+ #define CTR_CWG_MASK 15
+ #define CTR_IDC_SHIFT 28
+ #define CTR_DIC_SHIFT 29
+
++#define CTR_CACHE_MINLINE_MASK \
++ (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT)
++
+ #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
+
+ #define ICACHE_POLICY_VPIPT 0
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 1d2b6d768efe..5d1fa928ea4b 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -68,9 +68,11 @@ static bool
+ has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
+ int scope)
+ {
++ u64 mask = CTR_CACHE_MINLINE_MASK;
++
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+- return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
+- (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
++ return (read_cpuid_cachetype() & mask) !=
++ (arm64_ftr_reg_ctrel0.sys_val & mask);
+ }
+
+ static void
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index f24892a40d2c..25d5cef00333 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -214,7 +214,7 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
+ * If we have differing I-cache policies, report it as the weakest - VIPT.
+ */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
+- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
+ ARM64_FTR_END,
+ };
+
+--
+2.16.4
+
diff --git a/patches.suse/arm64-Fix-silly-typo-in-comment.patch b/patches.suse/arm64-Fix-silly-typo-in-comment.patch
new file mode 100644
index 0000000000..2874612d06
--- /dev/null
+++ b/patches.suse/arm64-Fix-silly-typo-in-comment.patch
@@ -0,0 +1,35 @@
+From: Will Deacon <will.deacon@arm.com>
+Date: Fri, 15 Jun 2018 11:36:43 +0100
+Subject: arm64: Fix silly typo in comment
+
+Git-commit: ca7f686ac9fe87a9175696a8744e095ab9749c49
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561
+
+I was passing through and figuered I'd fix this up:
+
+ featuer -> feature
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/include/asm/cpufeature.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
+index 1717ba1db35d..9079715794af 100644
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -262,7 +262,7 @@ extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
+ /*
+ * CPU feature detected at boot time based on system-wide value of a
+ * feature. It is safe for a late CPU to have this feature even though
+- * the system hasn't enabled it, although the featuer will not be used
++ * the system hasn't enabled it, although the feature will not be used
+ * by Linux in this case. If the system has enabled this feature already,
+ * then every late CPU must have it.
+ */
+--
+2.16.4
+
diff --git a/patches.suse/arm64-Force-SSBS-on-context-switch.patch b/patches.suse/arm64-Force-SSBS-on-context-switch.patch
new file mode 100644
index 0000000000..8d795e657e
--- /dev/null
+++ b/patches.suse/arm64-Force-SSBS-on-context-switch.patch
@@ -0,0 +1,113 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Mon, 22 Jul 2019 14:53:09 +0100
+Subject: arm64: Force SSBS on context switch
+
+Git-commit: cbdf8a189a66001c36007bf0f5c975d0376c5c3a
+Patch-mainline: v5.3-rc2
+References: jsc#ECO-561
+
+On a CPU that doesn't support SSBS, PSTATE[12] is RES0. In a system
+where only some of the CPUs implement SSBS, we end-up losing track of
+the SSBS bit across task migration.
+
+To address this issue, let's force the SSBS bit on context switch.
+
+Fixes: 8f04e8e6e29c ("arm64: ssbd: Add support for PSTATE.SSBS rather than trapping to EL3")
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+[will: inverted logic and added comments]
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/include/asm/processor.h | 14 ++++++++++++--
+ arch/arm64/kernel/process.c | 29 ++++++++++++++++++++++++++++-
+ 2 files changed, 40 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -142,6 +142,16 @@ static inline void start_thread_common(s
+ regs->pc = pc;
+ }
+
++static inline void set_ssbs_bit(struct pt_regs *regs)
++{
++ regs->pstate |= PSR_SSBS_BIT;
++}
++
++static inline void set_compat_ssbs_bit(struct pt_regs *regs)
++{
++ regs->pstate |= PSR_AA32_SSBS_BIT;
++}
++
+ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
+ unsigned long sp)
+ {
+@@ -149,7 +159,7 @@ static inline void start_thread(struct p
+ regs->pstate = PSR_MODE_EL0t;
+
+ if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
+- regs->pstate |= PSR_SSBS_BIT;
++ set_ssbs_bit(regs);
+
+ regs->sp = sp;
+ }
+@@ -168,7 +178,7 @@ static inline void compat_start_thread(s
+ #endif
+
+ if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
+- regs->pstate |= PSR_AA32_SSBS_BIT;
++ set_compat_ssbs_bit(regs);
+
+ regs->compat_sp = sp;
+ }
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -354,7 +354,7 @@ int copy_thread(unsigned long clone_flag
+ childregs->pstate |= PSR_UAO_BIT;
+
+ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
+- childregs->pstate |= PSR_SSBS_BIT;
++ set_ssbs_bit(childregs);
+
+ p->thread.cpu_context.x19 = stack_start;
+ p->thread.cpu_context.x20 = stk_sz;
+@@ -394,6 +394,32 @@ void uao_thread_switch(struct task_struc
+ }
+
+ /*
++ * Force SSBS state on context-switch, since it may be lost after migrating
++ * from a CPU which treats the bit as RES0 in a heterogeneous system.
++ */
++static void ssbs_thread_switch(struct task_struct *next)
++{
++ struct pt_regs *regs = task_pt_regs(next);
++
++ /*
++ * Nothing to do for kernel threads, but 'regs' may be junk
++ * (e.g. idle task) so check the flags and bail early.
++ */
++ if (unlikely(next->flags & PF_KTHREAD))
++ return;
++
++ /* If the mitigation is enabled, then we leave SSBS clear. */
++ if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
++ test_tsk_thread_flag(next, TIF_SSBD))
++ return;
++
++ if (compat_user_mode(regs))
++ set_compat_ssbs_bit(regs);
++ else if (user_mode(regs))
++ set_ssbs_bit(regs);
++}
++
++/*
+ * We store our current task in sp_el0, which is clobbered by userspace. Keep a
+ * shadow copy so that we can restore this upon entry from userspace.
+ *
+@@ -421,6 +447,7 @@ __notrace_funcgraph struct task_struct *
+ contextidr_thread_switch(next);
+ entry_task_switch(next);
+ uao_thread_switch(next);
++ ssbs_thread_switch(next);
+
+ /*
+ * Complete any pending TLB or cache maintenance on this CPU in case
diff --git a/patches.suse/arm64-Handle-erratum-1418040-as-a-superset-of-erratu.patch b/patches.suse/arm64-Handle-erratum-1418040-as-a-superset-of-erratu.patch
new file mode 100644
index 0000000000..17acc5e5a8
--- /dev/null
+++ b/patches.suse/arm64-Handle-erratum-1418040-as-a-superset-of-erratu.patch
@@ -0,0 +1,134 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Thu, 23 May 2019 11:24:50 +0100
+Subject: arm64: Handle erratum 1418040 as a superset of erratum 1188873
+
+Git-commit: a5325089bd05a7b0259cc4038479d36308edbda2
+Patch-mainline: v5.2-rc2
+References: jsc#ECO-561
+
+We already mitigate erratum 1188873 affecting Cortex-A76 and
+Neoverse-N1 r0p0 to r2p0. It turns out that revisions r0p0 to
+r3p1 of the same cores are affected by erratum 1418040, which
+has the same workaround as 1188873.
+
+Let's expand the range of affected revisions to match 1418040,
+and repaint all occurences of 1188873 to 1418040. Whilst we're
+there, do a bit of reformating in silicon-errata.txt and drop
+a now unnecessary dependency on ARM_ARCH_TIMER_OOL_WORKAROUND.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ Documentation/arm64/silicon-errata.txt | 8 ++++----
+ arch/arm64/Kconfig | 7 +++----
+ arch/arm64/include/asm/cpucaps.h | 2 +-
+ arch/arm64/kernel/cpu_errata.c | 24 ++++++++++++++----------
+ arch/arm64/kernel/entry.S | 4 ++--
+ 5 files changed, 24 insertions(+), 21 deletions(-)
+
+--- a/Documentation/arm64/silicon-errata.txt
++++ b/Documentation/arm64/silicon-errata.txt
+@@ -56,11 +56,11 @@ stable kernels.
+ | ARM | Cortex-A72 | #853709 | N/A |
+ | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
+ | ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
+-| ARM | Cortex-A76 | #1188873 | ARM64_ERRATUM_1188873 |
+-| ARM | Neoverse-N1 | #1188873 | ARM64_ERRATUM_1188873 |
+-| ARM | MMU-500 | #841119,#826419 | N/A |
++| ARM | Cortex-A76 | #1188873,1418040| ARM64_ERRATUM_1418040 |
++| ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 |
++| ARM | MMU-500 | #841119,826419 | N/A |
+ | | | | |
+-| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
++| Cavium | ThunderX ITS | #22375,24313 | CAVIUM_ERRATUM_22375 |
+ | Cavium | ThunderX ITS | #23144 | CAVIUM_ERRATUM_23144 |
+ | Cavium | ThunderX GICv3 | #23154 | CAVIUM_ERRATUM_23154 |
+ | Cavium | ThunderX Core | #27456 | CAVIUM_ERRATUM_27456 |
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -454,16 +454,15 @@ config ARM64_ERRATUM_1024718
+
+ If unsure, say Y.
+
+-config ARM64_ERRATUM_1188873
++config ARM64_ERRATUM_1418040
+ bool "Cortex-A76/Neoverse-N1: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
+ default y
+ depends on COMPAT
+- select ARM_ARCH_TIMER_OOL_WORKAROUND
+ help
+ This option adds work arounds for ARM Cortex-A76/Neoverse-N1
+- erratum 1188873
++ errata 1188873 and 1418040.
+
+- Affected Cortex-A76/Neoverse-N1 cores (r0p0, r1p0, r2p0) could
++ Affected Cortex-A76/Neoverse-N1 cores (r0p0 to r3p1) could
+ cause register corruption when accessing the timer registers
+ from AArch32 userspace.
+
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -49,7 +49,7 @@
+ #define ARM64_HAS_CNP 28
+ #define ARM64_WORKAROUND_843419 29
+ #define ARM64_SSBS 30
+-#define ARM64_WORKAROUND_1188873 31
++#define ARM64_WORKAROUND_1418040 31
+ #define ARM64_HAS_CACHE_IDC 32
+ #define ARM64_HAS_CACHE_DIC 33
+ #define ARM64_MISMATCHED_CACHE_TYPE 34
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -541,12 +541,16 @@ static const struct arm64_cpu_capabiliti
+
+ #endif
+
+-#ifdef CONFIG_ARM64_ERRATUM_1188873
+-static const struct midr_range erratum_1188873_list[] = {
+- /* Cortex-A76 r0p0 to r2p0 */
+- MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
+- /* Neoverse-N1 r0p0 to r2p0 */
+- MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 2, 0),
++#ifdef CONFIG_ARM64_ERRATUM_1418040
++/*
++ * - 1188873 affects r0p0 to r2p0
++ * - 1418040 affects r0p0 to r3p1
++ */
++static const struct midr_range erratum_1418040_list[] = {
++ /* Cortex-A76 r0p0 to r3p1 */
++ MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
++ /* Neoverse-N1 r0p0 to r3p1 */
++ MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
+ {},
+ };
+ #endif
+@@ -712,11 +716,11 @@ const struct arm64_cpu_capabilities arm6
+ .matches = has_ssbd_mitigation,
+ },
+ #endif
+-#ifdef CONFIG_ARM64_ERRATUM_1188873
++#ifdef CONFIG_ARM64_ERRATUM_1418040
+ {
+- .desc = "ARM erratum 1188873",
+- .capability = ARM64_WORKAROUND_1188873,
+- ERRATA_MIDR_RANGE_LIST(erratum_1188873_list),
++ .desc = "ARM erratum 1418040",
++ .capability = ARM64_WORKAROUND_1418040,
++ ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
+ },
+ #endif
+ {
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -330,8 +330,8 @@ alternative_if ARM64_WORKAROUND_845719
+ alternative_else_nop_endif
+ #endif
+ 3:
+-#ifdef CONFIG_ARM64_ERRATUM_1188873
+-alternative_if_not ARM64_WORKAROUND_1188873
++#ifdef CONFIG_ARM64_ERRATUM_1418040
++alternative_if_not ARM64_WORKAROUND_1418040
+ b 4f
+ alternative_else_nop_endif
+ /*
diff --git a/patches.suse/arm64-Handle-mismatched-cache-type.patch b/patches.suse/arm64-Handle-mismatched-cache-type.patch
new file mode 100644
index 0000000000..d6fc1821ec
--- /dev/null
+++ b/patches.suse/arm64-Handle-mismatched-cache-type.patch
@@ -0,0 +1,73 @@
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Wed, 4 Jul 2018 23:07:46 +0100
+Subject: arm64: Handle mismatched cache type
+
+
+Git-commit: 314d53d297980676011e6fd83dac60db4a01dc70
+Patch-mainline: v4.19-rc1
+References: jsc#ECO-561,jsc#SLE-10671
+
+Track mismatches in the cache type register (CTR_EL0), other
+than the D/I min line sizes and trap user accesses if there are any.
+
+Fixes: be68a8aaf925 ("arm64: cpufeature: Fix CTR_EL0 field definitions")
+Cc: <stable@vger.kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
+---
+ arch/arm64/include/asm/cpucaps.h | 3 ++-
+ arch/arm64/kernel/cpu_errata.c | 17 ++++++++++++++---
+ 2 files changed, 16 insertions(+), 4 deletions(-)
+
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -49,7 +49,8 @@
+ #define ARM64_WORKAROUND_843419 29
+ #define ARM64_HAS_CACHE_IDC 32
+ #define ARM64_HAS_CACHE_DIC 33
++#define ARM64_MISMATCHED_CACHE_TYPE 34
+
+-#define ARM64_NCAPS 34
++#define ARM64_NCAPS 35
+
+ #endif /* __ASM_CPUCAPS_H */
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -51,11 +51,15 @@ is_affected_midr_range_list(const struct
+ }
+
+ static bool
+-has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
+- int scope)
++has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
++ int scope)
+ {
+ u64 mask = CTR_CACHE_MINLINE_MASK;
+
++ /* Skip matching the min line sizes for cache type check */
++ if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
++ mask ^= arm64_ftr_reg_ctrel0.strict_mask;
++
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ return (read_cpuid_cachetype() & mask) !=
+ (arm64_ftr_reg_ctrel0.sys_val & mask);
+@@ -617,7 +621,14 @@ const struct arm64_cpu_capabilities arm6
+ {
+ .desc = "Mismatched cache line size",
+ .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
+- .matches = has_mismatched_cache_line_size,
++ .matches = has_mismatched_cache_type,
++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
++ .cpu_enable = cpu_enable_trap_ctr_access,
++ },
++ {
++ .desc = "Mismatched cache type",
++ .capability = ARM64_MISMATCHED_CACHE_TYPE,
++ .matches = has_mismatched_cache_type,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .cpu_enable = cpu_enable_trap_ctr_access,
+ },
diff --git a/patches.suse/arm64-Introduce-sysreg_clear_set.patch b/patches.suse/arm64-Introduce-sysreg_clear_set.patch
new file mode 100644
index 0000000000..eaeada185c
--- /dev/null
+++ b/patches.suse/arm64-Introduce-sysreg_clear_set.patch
@@ -0,0 +1,60 @@
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Fri, 15 Jun 2018 16:47:23 +0100
+Subject: arm64: Introduce sysreg_clear_set()
+
+Git-commit: 6ebdf4db8fa564a150f46d32178af0873eb5abbb
+Patch-mainline: v4.18-rc2
+References: jsc#ECO-561
+
+Currently we have a couple of helpers to manipulate bits in particular
+sysregs:
+
+ * config_sctlr_el1(u32 clear, u32 set)
+
+ * change_cpacr(u64 val, u64 mask)
+
+The parameters of these differ in naming convention, order, and size,
+which is unfortunate. They also differ slightly in behaviour, as
+change_cpacr() skips the sysreg write if the bits are unchanged, which
+is a useful optimization when sysreg writes are expensive.
+
+Before we gain yet another sysreg manipulation function, let's
+unify these with a common helper, providing a consistent order for
+clear/set operands, and the write skipping behaviour from
+change_cpacr(). Code will be migrated to the new helper in subsequent
+patches.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/include/asm/sysreg.h | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
+index 6171178075dc..a8f84812c6e8 100644
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -728,6 +728,17 @@ asm(
+ asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
+ } while (0)
+
++/*
++ * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the
++ * set mask are set. Other bits are left as-is.
++ */
++#define sysreg_clear_set(sysreg, clear, set) do { \
++ u64 __scs_val = read_sysreg(sysreg); \
++ u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
++ if (__scs_new != __scs_val) \
++ write_sysreg(__scs_new, sysreg); \
++} while (0)
++
+ static inline void config_sctlr_el1(u32 clear, u32 set)
+ {
+ u32 val;
+--
+2.16.4
+
diff --git a/patches.suse/arm64-KVM-Add-invalidate_icache_range-helper.patch b/patches.suse/arm64-KVM-Add-invalidate_icache_range-helper.patch
new file mode 100644
index 0000000000..90bd2fde50
--- /dev/null
+++ b/patches.suse/arm64-KVM-Add-invalidate_icache_range-helper.patch
@@ -0,0 +1,142 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Mon, 23 Oct 2017 17:11:16 +0100
+Subject: arm64: KVM: Add invalidate_icache_range helper
+
+Git-commit: 4fee94736603cd6fd83c1ea1ee0388d1d2dbe11b
+Patch-mainline: v4.16-rc1
+References: jsc#ECO-561,jsc#SLE-10671
+
+We currently tightly couple dcache clean with icache invalidation,
+but KVM could do without the initial flush to PoU, as we've
+already flushed things to PoC.
+
+Let's introduce invalidate_icache_range which is limited to
+invalidating the icache from the linear mapping (and thus
+has none of the userspace fault handling complexity), and
+wire it in KVM instead of flush_icache_range.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/include/asm/assembler.h | 21 +++++++++++++++++++++
+ arch/arm64/include/asm/cacheflush.h | 7 +++++++
+ arch/arm64/include/asm/kvm_mmu.h | 4 ++--
+ arch/arm64/mm/cache.S | 32 ++++++++++++++++++++++----------
+ 4 files changed, 52 insertions(+), 12 deletions(-)
+
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -429,6 +429,27 @@ alternative_endif
+ .endm
+
+ /*
++ * Macro to perform an instruction cache maintenance for the interval
++ * [start, end)
++ *
++ * start, end: virtual addresses describing the region
++ * label: A label to branch to on user fault.
++ * Corrupts: tmp1, tmp2
++ */
++ .macro invalidate_icache_by_line start, end, tmp1, tmp2, label
++ icache_line_size \tmp1, \tmp2
++ sub \tmp2, \tmp1, #1
++ bic \tmp2, \start, \tmp2
++9997:
++USER(\label, ic ivau, \tmp2) // invalidate I line PoU
++ add \tmp2, \tmp2, \tmp1
++ cmp \tmp2, \end
++ b.lo 9997b
++ dsb ish
++ isb
++ .endm
++
++/*
+ * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
+ */
+ .macro reset_pmuserenr_el0, tmpreg
+--- a/arch/arm64/include/asm/cacheflush.h
++++ b/arch/arm64/include/asm/cacheflush.h
+@@ -52,6 +52,12 @@
+ * - start - virtual start address
+ * - end - virtual end address
+ *
++ * invalidate_icache_range(start, end)
++ *
++ * Invalidate the I-cache in the region described by start, end.
++ * - start - virtual start address
++ * - end - virtual end address
++ *
+ * __flush_cache_user_range(start, end)
+ *
+ * Ensure coherency between the I-cache and the D-cache in the
+@@ -66,6 +72,7 @@
+ * - size - region size
+ */
+ extern void flush_icache_range(unsigned long start, unsigned long end);
++extern int invalidate_icache_range(unsigned long start, unsigned long end);
+ extern void __flush_dcache_area(void *addr, size_t len);
+ extern void __clean_dcache_area_poc(void *addr, size_t len);
+ extern void __clean_dcache_area_pou(void *addr, size_t len);
+--- a/arch/arm64/include/asm/kvm_mmu.h
++++ b/arch/arm64/include/asm/kvm_mmu.h
+@@ -250,8 +250,8 @@ static inline void __invalidate_icache_g
+ /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */
+ void *va = page_address(pfn_to_page(pfn));
+
+- flush_icache_range((unsigned long)va,
+- (unsigned long)va + size);
++ invalidate_icache_range((unsigned long)va,
++ (unsigned long)va + size);
+ }
+ }
+
+--- a/arch/arm64/mm/cache.S
++++ b/arch/arm64/mm/cache.S
+@@ -60,16 +60,7 @@ user_alt 9f, "dc cvau, x4", "dc civac,
+ b.lo 1b
+ dsb ish
+
+- icache_line_size x2, x3
+- sub x3, x2, #1
+- bic x4, x0, x3
+-1:
+-USER(9f, ic ivau, x4 ) // invalidate I line PoU
+- add x4, x4, x2
+- cmp x4, x1
+- b.lo 1b
+- dsb ish
+- isb
++ invalidate_icache_by_line x0, x1, x2, x3, 9f
+ mov x0, #0
+ 1:
+ uaccess_ttbr0_disable x1, x2
+@@ -81,6 +72,27 @@ ENDPROC(flush_icache_range)
+ ENDPROC(__flush_cache_user_range)
+
+ /*
++ * invalidate_icache_range(start,end)
++ *
++ * Ensure that the I cache is invalid within specified region.
++ *
++ * - start - virtual start address of region
++ * - end - virtual end address of region
++ */
++ENTRY(invalidate_icache_range)
++ uaccess_ttbr0_enable x2, x3
++
++ invalidate_icache_by_line x0, x1, x2, x3, 2f
++ mov x0, xzr
++1:
++ uaccess_ttbr0_disable x1
++ ret
++2:
++ mov x0, #-EFAULT
++ b 1b
++ENDPROC(invalidate_icache_range)
++
++/*
+ * __flush_dcache_area(kaddr, size)
+ *
+ * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
diff --git a/patches.suse/arm64-KVM-PTE-PMD-S2-XN-bit-definition.patch b/patches.suse/arm64-KVM-PTE-PMD-S2-XN-bit-definition.patch
new file mode 100644
index 0000000000..35c0cd47b2
--- /dev/null
+++ b/patches.suse/arm64-KVM-PTE-PMD-S2-XN-bit-definition.patch
@@ -0,0 +1,38 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Mon, 23 Oct 2017 17:11:18 +0100
+Subject: arm64: KVM: PTE/PMD S2 XN bit definition
+
+Git-commit: fefb876b9b96fa7e4ed3d906979ea45b4cf07349
+Patch-mainline: v4.16-rc1
+References: jsc#ECO-561,jsc#SLE-10671
+
+As we're about to make S2 page-tables eXecute Never by default,
+add the required bits for both PMDs and PTEs.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/include/asm/pgtable-hwdef.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
+index eb0c2bd90de9..af035331fb09 100644
+--- a/arch/arm64/include/asm/pgtable-hwdef.h
++++ b/arch/arm64/include/asm/pgtable-hwdef.h
+@@ -177,9 +177,11 @@
+ */
+ #define PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[2:1] */
+ #define PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
++#define PTE_S2_XN (_AT(pteval_t, 2) << 53) /* XN[1:0] */
+
+ #define PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[2:1] */
+ #define PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
++#define PMD_S2_XN (_AT(pmdval_t, 2) << 53) /* XN[1:0] */
+
+ /*
+ * Memory Attribute override for Stage-2 (MemAttr[3:0])
+--
+2.16.4
+
diff --git a/patches.suse/arm64-Make-ARM64_ERRATUM_1188873-depend-on-COMPAT.patch b/patches.suse/arm64-Make-ARM64_ERRATUM_1188873-depend-on-COMPAT.patch
new file mode 100644
index 0000000000..476a7a920f
--- /dev/null
+++ b/patches.suse/arm64-Make-ARM64_ERRATUM_1188873-depend-on-COMPAT.patch
@@ -0,0 +1,33 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Mon, 15 Apr 2019 13:03:52 +0100
+Subject: arm64: Make ARM64_ERRATUM_1188873 depend on COMPAT
+
+Git-commit: c2b5bba3967a000764e9148e6f020d776b7ecd82
+Patch-mainline: v5.2-rc1
+References: jsc#ECO-561
+
+Since ARM64_ERRATUM_1188873 only affects AArch32 EL0, it makes some
+sense that it should depend on COMPAT.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 7e34b9eba5de..560f2a860637 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -477,6 +477,7 @@ config ARM64_ERRATUM_1024718
+ config ARM64_ERRATUM_1188873
+ bool "Cortex-A76: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
+ default y
++ depends on COMPAT
+ select ARM_ARCH_TIMER_OOL_WORKAROUND
+ help
+ This option adds work arounds for ARM Cortex-A76 erratum 1188873
+--
+2.16.4
+
diff --git a/patches.suse/arm64-Restrict-ARM64_ERRATUM_1188873-mitigation-to-A.patch b/patches.suse/arm64-Restrict-ARM64_ERRATUM_1188873-mitigation-to-A.patch
new file mode 100644
index 0000000000..93c4630a4f
--- /dev/null
+++ b/patches.suse/arm64-Restrict-ARM64_ERRATUM_1188873-mitigation-to-A.patch
@@ -0,0 +1,96 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Mon, 15 Apr 2019 13:03:51 +0100
+Subject: arm64: Restrict ARM64_ERRATUM_1188873 mitigation to AArch32
+
+Git-commit: 0f80cad3124f986d0e46c14d46b8da06d87a2bf4
+Patch-mainline: v5.2-rc1
+References: jsc#ECO-561
+
+We currently deal with ARM64_ERRATUM_1188873 by always trapping EL0
+accesses for both instruction sets. Although nothing wrong comes out
+of that, people trying to squeeze the last drop of performance from
+buggy HW find this over the top. Oh well.
+
+Let's change the mitigation by flipping the counter enable bit
+on return to userspace. Non-broken HW gets an extra branch on
+the fast path, which is hopefully not the end of the world.
+The arch timer workaround is also removed.
+
+Acked-by: Daniel Lezcano <daniel.lezcano@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/kernel/entry.S | 19 +++++++++++++++++--
+ drivers/clocksource/arm_arch_timer.c | 15 ---------------
+ 2 files changed, 17 insertions(+), 17 deletions(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -330,6 +330,21 @@ alternative_if ARM64_WORKAROUND_845719
+ alternative_else_nop_endif
+ #endif
+ 3:
++#ifdef CONFIG_ARM64_ERRATUM_1188873
++alternative_if_not ARM64_WORKAROUND_1188873
++ b 4f
++alternative_else_nop_endif
++ /*
++ * if (x22.mode32 == cntkctl_el1.el0vcten)
++ * cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten
++ */
++ mrs x1, cntkctl_el1
++ eon x0, x1, x22, lsr #3
++ tbz x0, #1, 4f
++ eor x1, x1, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
++ msr cntkctl_el1, x1
++4:
++#endif
+ apply_ssbd 0, 5f, x0, x1
+ 5:
+ .endif
+@@ -357,11 +372,11 @@ alternative_else_nop_endif
+ .if \el == 0
+ alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
+ #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
+- bne 4f
++ bne 5f
+ msr far_el1, x30
+ tramp_alias x30, tramp_exit_native
+ br x30
+-4:
++5:
+ tramp_alias x30, tramp_exit_compat
+ br x30
+ #endif
+--- a/drivers/clocksource/arm_arch_timer.c
++++ b/drivers/clocksource/arm_arch_timer.c
+@@ -318,13 +318,6 @@ static u64 notrace arm64_858921_read_cnt
+ }
+ #endif
+
+-#ifdef CONFIG_ARM64_ERRATUM_1188873
+-static u64 notrace arm64_1188873_read_cntvct_el0(void)
+-{
+- return read_sysreg(cntvct_el0);
+-}
+-#endif
+-
+ #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
+ DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *,
+ timer_unstable_counter_workaround);
+@@ -415,14 +408,6 @@ static const struct arch_timer_erratum_w
+ .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
+ },
+ #endif
+-#ifdef CONFIG_ARM64_ERRATUM_1188873
+- {
+- .match_type = ate_match_local_cap_id,
+- .id = (void *)ARM64_WORKAROUND_1188873,
+- .desc = "ARM erratum 1188873",
+- .read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
+- },
+-#endif
+ };
+
+ typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
diff --git a/patches.suse/arm64-arch_timer-Add-workaround-for-ARM-erratum-1188.patch b/patches.suse/arm64-arch_timer-Add-workaround-for-ARM-erratum-1188.patch
new file mode 100644
index 0000000000..5e1d0edfe0
--- /dev/null
+++ b/patches.suse/arm64-arch_timer-Add-workaround-for-ARM-erratum-1188.patch
@@ -0,0 +1,123 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Thu, 27 Sep 2018 17:15:34 +0100
+Subject: arm64: arch_timer: Add workaround for ARM erratum 1188873
+
+Git-commit: 95b861a4a6d94f64d5242605569218160ebacdbe
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561
+
+When running on Cortex-A76, a timer access from an AArch32 EL0
+task may end up with a corrupted value or register. The workaround for
+this is to trap these accesses at EL1/EL2 and execute them there.
+
+This only affects versions r0p0, r1p0 and r2p0 of the CPU.
+
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/Kconfig | 12 ++++++++++++
+ arch/arm64/include/asm/cpucaps.h | 1 +
+ arch/arm64/include/asm/cputype.h | 2 ++
+ arch/arm64/kernel/cpu_errata.c | 8 ++++++++
+ drivers/clocksource/arm_arch_timer.c | 15 +++++++++++++++
+ 5 files changed, 38 insertions(+)
+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -454,6 +454,18 @@ config ARM64_ERRATUM_1024718
+
+ If unsure, say Y.
+
++config ARM64_ERRATUM_1188873
++ bool "Cortex-A76: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
++ default y
++ help
++ This option adds work arounds for ARM Cortex-A76 erratum 1188873
++
++ Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could cause
++ register corruption when accessing the timer registers from
++ AArch32 userspace.
++
++ If unsure, say Y.
++
+ config CAVIUM_ERRATUM_22375
+ bool "Cavium erratum 22375, 24313"
+ default y
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -49,6 +49,7 @@
+ #define ARM64_HAS_CNP 28
+ #define ARM64_WORKAROUND_843419 29
+ #define ARM64_SSBS 30
++#define ARM64_WORKAROUND_1188873 31
+ #define ARM64_HAS_CACHE_IDC 32
+ #define ARM64_HAS_CACHE_DIC 33
+ #define ARM64_MISMATCHED_CACHE_TYPE 34
+--- a/arch/arm64/include/asm/cputype.h
++++ b/arch/arm64/include/asm/cputype.h
+@@ -85,6 +85,7 @@
+ #define ARM_CPU_PART_CORTEX_A75 0xD0A
+ #define ARM_CPU_PART_CORTEX_A35 0xD04
+ #define ARM_CPU_PART_CORTEX_A55 0xD05
++#define ARM_CPU_PART_CORTEX_A76 0xD0B
+
+ #define APM_CPU_PART_POTENZA 0x000
+
+@@ -105,6 +106,7 @@
+ #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
+ #define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35)
+ #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
++#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
+ #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+ #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
+ #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -702,6 +702,14 @@ const struct arm64_cpu_capabilities arm6
+ .matches = has_ssbd_mitigation,
+ },
+ #endif
++#ifdef CONFIG_ARM64_ERRATUM_1188873
++ {
++ /* Cortex-A76 r0p0 to r2p0 */
++ .desc = "ARM erratum 1188873",
++ .capability = ARM64_WORKAROUND_1188873,
++ ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
++ },
++#endif
+ {
+ }
+ };
+--- a/drivers/clocksource/arm_arch_timer.c
++++ b/drivers/clocksource/arm_arch_timer.c
+@@ -318,6 +318,13 @@ static u64 notrace arm64_858921_read_cnt
+ }
+ #endif
+
++#ifdef CONFIG_ARM64_ERRATUM_1188873
++static u64 notrace arm64_1188873_read_cntvct_el0(void)
++{
++ return read_sysreg(cntvct_el0);
++}
++#endif
++
+ #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
+ DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *,
+ timer_unstable_counter_workaround);
+@@ -408,6 +415,14 @@ static const struct arch_timer_erratum_w
+ .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
+ },
+ #endif
++#ifdef CONFIG_ARM64_ERRATUM_1188873
++ {
++ .match_type = ate_match_local_cap_id,
++ .id = (void *)ARM64_WORKAROUND_1188873,
++ .desc = "ARM erratum 1188873",
++ .read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
++ },
++#endif
+ };
+
+ typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
diff --git a/patches.suse/arm64-arch_timer-avoid-unused-function-warning.patch b/patches.suse/arm64-arch_timer-avoid-unused-function-warning.patch
new file mode 100644
index 0000000000..15297b2cf0
--- /dev/null
+++ b/patches.suse/arm64-arch_timer-avoid-unused-function-warning.patch
@@ -0,0 +1,43 @@
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Tue, 2 Oct 2018 23:11:44 +0200
+Subject: arm64: arch_timer: avoid unused function warning
+
+Git-commit: 040f340134751d73bd03ee92fabb992946c55b3d
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561
+
+arm64_1188873_read_cntvct_el0() is protected by the correct
+CONFIG_ARM64_ERRATUM_1188873 #ifdef, but the only reference to it is
+also inside of an CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND section,
+and causes a warning if that is disabled:
+
+drivers/clocksource/arm_arch_timer.c:323:20: error: 'arm64_1188873_read_cntvct_el0' defined but not used [-Werror=unused-function]
+
+Since the erratum requires that we always apply the workaround
+in the timer driver, select that symbol as we do for SoC
+specific errata.
+
+Fixes: 95b861a4a6d9 ("arm64: arch_timer: Add workaround for ARM erratum 1188873")
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index 52985d175e5a..a8ae30fab508 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -484,6 +484,7 @@ config ARM64_ERRATUM_1024718
+ config ARM64_ERRATUM_1188873
+ bool "Cortex-A76: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
+ default y
++ select ARM_ARCH_TIMER_OOL_WORKAROUND
+ help
+ This option adds work arounds for ARM Cortex-A76 erratum 1188873
+
+--
+2.16.4
+
diff --git a/patches.suse/arm64-compat-Add-CNTFRQ-trap-handler.patch b/patches.suse/arm64-compat-Add-CNTFRQ-trap-handler.patch
new file mode 100644
index 0000000000..3bc8d2402c
--- /dev/null
+++ b/patches.suse/arm64-compat-Add-CNTFRQ-trap-handler.patch
@@ -0,0 +1,64 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Thu, 27 Sep 2018 17:15:33 +0100
+Subject: arm64: compat: Add CNTFRQ trap handler
+
+Git-commit: 32a3e635fb0ecc1b197d54f710e76c6481cf19f0
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561
+
+Just like CNTVCT, we need to handle userspace trapping into the
+kernel if we're decided that the timer wasn't fit for purpose...
+64bit userspace is already dealt with, but we're missing the
+equivalent compat handling.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/include/asm/esr.h | 3 +++
+ arch/arm64/kernel/traps.c | 13 +++++++++++++
+ 2 files changed, 16 insertions(+)
+
+diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
+index 5548712ce6e5..fb7dfe1b51bb 100644
+--- a/arch/arm64/include/asm/esr.h
++++ b/arch/arm64/include/asm/esr.h
+@@ -318,6 +318,9 @@
+ #define ESR_ELx_CP15_64_ISS_SYS_CNTVCT (ESR_ELx_CP15_64_ISS_SYS_VAL(1, 14) | \
+ ESR_ELx_CP15_64_ISS_DIR_READ)
+
++#define ESR_ELx_CP15_32_ISS_SYS_CNTFRQ (ESR_ELx_CP15_32_ISS_SYS_VAL(0, 0, 14, 0) |\
++ ESR_ELx_CP15_32_ISS_DIR_READ)
++
+ #ifndef __ASSEMBLY__
+ #include <asm/types.h>
+
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 3602b900ff1c..58134a97928f 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -625,7 +625,20 @@ static void arm64_compat_skip_faulting_instruction(struct pt_regs *regs,
+ arm64_skip_faulting_instruction(regs, sz);
+ }
+
++static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
++{
++ int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
++
++ pt_regs_write_reg(regs, reg, arch_timer_get_rate());
++ arm64_compat_skip_faulting_instruction(regs, 4);
++}
++
+ static struct sys64_hook cp15_32_hooks[] = {
++ {
++ .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
++ .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
++ .handler = compat_cntfrq_read_handler,
++ },
+ {},
+ };
+
+--
+2.16.4
+
diff --git a/patches.suse/arm64-compat-Add-CNTVCT-trap-handler.patch b/patches.suse/arm64-compat-Add-CNTVCT-trap-handler.patch
new file mode 100644
index 0000000000..c5d56bbb42
--- /dev/null
+++ b/patches.suse/arm64-compat-Add-CNTVCT-trap-handler.patch
@@ -0,0 +1,67 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Thu, 27 Sep 2018 17:15:32 +0100
+Subject: arm64: compat: Add CNTVCT trap handler
+
+Git-commit: 50de013d22e4e112d7b0778a0e7d032f16c46778
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561
+
+Since people seem to make a point in breaking the userspace visible
+counter, we have no choice but to trap the access. We already do this
+for 64bit userspace, but this is lacking for compat. Let's provide
+the required handler.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/include/asm/esr.h | 3 +++
+ arch/arm64/kernel/traps.c | 16 ++++++++++++++++
+ 2 files changed, 19 insertions(+)
+
+diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
+index 56d32e5557a5..5548712ce6e5 100644
+--- a/arch/arm64/include/asm/esr.h
++++ b/arch/arm64/include/asm/esr.h
+@@ -315,6 +315,9 @@
+ ESR_ELx_CP15_64_ISS_CRM_MASK | \
+ ESR_ELx_CP15_64_ISS_DIR_MASK)
+
++#define ESR_ELx_CP15_64_ISS_SYS_CNTVCT (ESR_ELx_CP15_64_ISS_SYS_VAL(1, 14) | \
++ ESR_ELx_CP15_64_ISS_DIR_READ)
++
+ #ifndef __ASSEMBLY__
+ #include <asm/types.h>
+
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 76ffb9f42aa4..3602b900ff1c 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -629,7 +629,23 @@ static struct sys64_hook cp15_32_hooks[] = {
+ {},
+ };
+
++static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
++{
++ int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
++ int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
++ u64 val = arch_counter_get_cntvct();
++
++ pt_regs_write_reg(regs, rt, lower_32_bits(val));
++ pt_regs_write_reg(regs, rt2, upper_32_bits(val));
++ arm64_compat_skip_faulting_instruction(regs, 4);
++}
++
+ static struct sys64_hook cp15_64_hooks[] = {
++ {
++ .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
++ .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
++ .handler = compat_cntvct_read_handler,
++ },
+ {},
+ };
+
+--
+2.16.4
+
diff --git a/patches.suse/arm64-compat-Add-condition-code-checks-and-IT-advanc.patch b/patches.suse/arm64-compat-Add-condition-code-checks-and-IT-advanc.patch
new file mode 100644
index 0000000000..82a4335a4f
--- /dev/null
+++ b/patches.suse/arm64-compat-Add-condition-code-checks-and-IT-advanc.patch
@@ -0,0 +1,123 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Thu, 27 Sep 2018 17:15:30 +0100
+Subject: arm64: compat: Add condition code checks and IT advance
+
+Git-commit: 1f1c014035a8084a768e7e902c6f5857995b1220
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561
+
+Here's a /really nice/ part of the architecture: a CP15 access is
+allowed to trap even if it fails its condition check, and SW must
+handle it. This includes decoding the IT state if this happens in
+am IT block. As a consequence, SW must also deal with advancing
+the IT state machine.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/kernel/traps.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 85 insertions(+)
+
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 0e2665936493..95a646c154fe 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -549,8 +549,93 @@ static struct sys64_hook sys64_hooks[] = {
+
+
+ #ifdef CONFIG_COMPAT
++#define PSTATE_IT_1_0_SHIFT 25
++#define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT)
++#define PSTATE_IT_7_2_SHIFT 10
++#define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT)
++
++static u32 compat_get_it_state(struct pt_regs *regs)
++{
++ u32 it, pstate = regs->pstate;
++
++ it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
++ it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
++
++ return it;
++}
++
++static void compat_set_it_state(struct pt_regs *regs, u32 it)
++{
++ u32 pstate_it;
++
++ pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
++ pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
++
++ regs->pstate &= ~PSR_AA32_IT_MASK;
++ regs->pstate |= pstate_it;
++}
++
++static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs)
++{
++ int cond;
++
++ /* Only a T32 instruction can trap without CV being set */
++ if (!(esr & ESR_ELx_CV)) {
++ u32 it;
++
++ it = compat_get_it_state(regs);
++ if (!it)
++ return true;
++
++ cond = it >> 4;
++ } else {
++ cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
++ }
++
++ return aarch32_opcode_cond_checks[cond](regs->pstate);
++}
++
++static void advance_itstate(struct pt_regs *regs)
++{
++ u32 it;
++
++ /* ARM mode */
++ if (!(regs->pstate & PSR_AA32_T_BIT) ||
++ !(regs->pstate & PSR_AA32_IT_MASK))
++ return;
++
++ it = compat_get_it_state(regs);
++
++ /*
++ * If this is the last instruction of the block, wipe the IT
++ * state. Otherwise advance it.
++ */
++ if (!(it & 7))
++ it = 0;
++ else
++ it = (it & 0xe0) | ((it << 1) & 0x1f);
++
++ compat_set_it_state(regs, it);
++}
++
++static void arm64_compat_skip_faulting_instruction(struct pt_regs *regs,
++ unsigned int sz)
++{
++ advance_itstate(regs);
++ arm64_skip_faulting_instruction(regs, sz);
++}
++
+ asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
+ {
++ if (!cp15_cond_valid(esr, regs)) {
++ /*
++ * There is no T16 variant of a CP access, so we
++ * always advance PC by 4 bytes.
++ */
++ arm64_compat_skip_faulting_instruction(regs, 4);
++ return;
++ }
++
+ /*
+ * New cp15 instructions may previously have been undefined at
+ * EL0. Fall back to our usual undefined instruction handler
+--
+2.16.4
+
diff --git a/patches.suse/arm64-compat-Add-cp15_32-and-cp15_64-handler-arrays.patch b/patches.suse/arm64-compat-Add-cp15_32-and-cp15_64-handler-arrays.patch
new file mode 100644
index 0000000000..77fefd18cb
--- /dev/null
+++ b/patches.suse/arm64-compat-Add-cp15_32-and-cp15_64-handler-arrays.patch
@@ -0,0 +1,71 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Thu, 27 Sep 2018 17:15:31 +0100
+Subject: arm64: compat: Add cp15_32 and cp15_64 handler arrays
+
+Git-commit: 2a8905e18c55d5576d7a53da495b4de0cfcbc459
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561
+
+We're now ready to start handling CP15 access. Let's add (empty)
+arrays for both 32 and 64bit accessors, and the code that deals
+with them.
+
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/kernel/traps.c | 28 ++++++++++++++++++++++++++++
+ 1 file changed, 28 insertions(+)
+
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 95a646c154fe..76ffb9f42aa4 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -625,8 +625,18 @@ static void arm64_compat_skip_faulting_instruction(struct pt_regs *regs,
+ arm64_skip_faulting_instruction(regs, sz);
+ }
+
++static struct sys64_hook cp15_32_hooks[] = {
++ {},
++};
++
++static struct sys64_hook cp15_64_hooks[] = {
++ {},
++};
++
+ asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
+ {
++ struct sys64_hook *hook, *hook_base;
++
+ if (!cp15_cond_valid(esr, regs)) {
+ /*
+ * There is no T16 variant of a CP access, so we
+@@ -636,6 +646,24 @@ asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
+ return;
+ }
+
++ switch (ESR_ELx_EC(esr)) {
++ case ESR_ELx_EC_CP15_32:
++ hook_base = cp15_32_hooks;
++ break;
++ case ESR_ELx_EC_CP15_64:
++ hook_base = cp15_64_hooks;
++ break;
++ default:
++ do_undefinstr(regs);
++ return;
++ }
++
++ for (hook = hook_base; hook->handler; hook++)
++ if ((hook->esr_mask & esr) == hook->esr_val) {
++ hook->handler(esr, regs);
++ return;
++ }
++
+ /*
+ * New cp15 instructions may previously have been undefined at
+ * EL0. Fall back to our usual undefined instruction handler
+--
+2.16.4
+
diff --git a/patches.suse/arm64-compat-Add-separate-CP15-trapping-hook.patch b/patches.suse/arm64-compat-Add-separate-CP15-trapping-hook.patch
new file mode 100644
index 0000000000..14420a9b0e
--- /dev/null
+++ b/patches.suse/arm64-compat-Add-separate-CP15-trapping-hook.patch
@@ -0,0 +1,82 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Thu, 27 Sep 2018 17:15:29 +0100
+Subject: arm64: compat: Add separate CP15 trapping hook
+
+Git-commit: 70c63cdfd6ee615714c5453cff370032587723c2
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561
+
+Instead of directly generating an UNDEF when trapping a CP15 access,
+let's add a new entry point to that effect (which only generates an
+UNDEF for now).
+
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/kernel/entry.S | 15 +++++++++++++--
+ arch/arm64/kernel/traps.c | 13 +++++++++++++
+ 2 files changed, 26 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 8556876c9109..f0a0464d4809 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -697,9 +697,9 @@ el0_sync_compat:
+ cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
+ b.eq el0_undef
+ cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
+- b.eq el0_undef
++ b.eq el0_cp15
+ cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
+- b.eq el0_undef
++ b.eq el0_cp15
+ cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
+ b.eq el0_undef
+ cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
+@@ -722,6 +722,17 @@ el0_irq_compat:
+ el0_error_compat:
+ kernel_entry 0, 32
+ b el0_error_naked
++
++el0_cp15:
++ /*
++ * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
++ */
++ enable_daif
++ ct_user_exit
++ mov x0, x25
++ mov x1, sp
++ bl do_cp15instr
++ b ret_to_user
+ #endif
+
+ el0_da:
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index 21689c6a985f..0e2665936493 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -547,6 +547,19 @@ static struct sys64_hook sys64_hooks[] = {
+ {},
+ };
+
++
++#ifdef CONFIG_COMPAT
++asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
++{
++ /*
++ * New cp15 instructions may previously have been undefined at
++ * EL0. Fall back to our usual undefined instruction handler
++ * so that we handle these consistently.
++ */
++ do_undefinstr(regs);
++}
++#endif
++
+ asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
+ {
+ struct sys64_hook *hook;
+--
+2.16.4
+
diff --git a/patches.suse/arm64-compat-Workaround-Neoverse-N1-1542419-for-comp.patch b/patches.suse/arm64-compat-Workaround-Neoverse-N1-1542419-for-comp.patch
new file mode 100644
index 0000000000..ee4e8eb7e2
--- /dev/null
+++ b/patches.suse/arm64-compat-Workaround-Neoverse-N1-1542419-for-comp.patch
@@ -0,0 +1,54 @@
+From: James Morse <james.morse@arm.com>
+Date: Thu, 17 Oct 2019 18:43:00 +0100
+Subject: arm64: compat: Workaround Neoverse-N1 #1542419 for compat user-space
+
+Git-commit: 222fc0c8503d98cec3cb2bac2780cdd21a6e31c0
+Patch-mainline: Queued
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
+References: jsc#ECO-561,jsc#SLE-10671
+
+Compat user-space is unable to perform ICIMVAU instructions from
+user-space. Instead it uses a compat-syscall. Add the workaround for
+Neoverse-N1 #1542419 to this code path.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
+---
+ arch/arm64/kernel/sys_compat.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/arch/arm64/kernel/sys_compat.c
++++ b/arch/arm64/kernel/sys_compat.c
+@@ -19,6 +19,7 @@
+ */
+
+ #include <linux/compat.h>
++#include <linux/cpufeature.h>
+ #include <linux/personality.h>
+ #include <linux/sched.h>
+ #include <linux/sched/signal.h>
+@@ -27,6 +28,7 @@
+ #include <linux/uaccess.h>
+
+ #include <asm/cacheflush.h>
++#include <asm/tlbflush.h>
+ #include <asm/unistd.h>
+
+ static long
+@@ -40,6 +42,15 @@ __do_compat_cache_op(unsigned long start
+ if (fatal_signal_pending(current))
+ return 0;
+
++ if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
++ /*
++ * The workaround requires an inner-shareable tlbi.
++ * We pick the reserved-ASID to minimise the impact.
++ */
++ __tlbi(aside1is, 0);
++ dsb(ish);
++ }
++
+ ret = __flush_cache_user_range(start, start + chunk);
+ if (ret)
+ return ret;
diff --git a/patches.suse/arm64-cpu-Move-errata-and-feature-enable-callbacks-c.patch b/patches.suse/arm64-cpu-Move-errata-and-feature-enable-callbacks-c.patch
new file mode 100644
index 0000000000..6f2ba6bbfe
--- /dev/null
+++ b/patches.suse/arm64-cpu-Move-errata-and-feature-enable-callbacks-c.patch
@@ -0,0 +1,144 @@
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 7 Aug 2018 13:53:41 +0100
+Subject: arm64: cpu: Move errata and feature enable callbacks closer to
+ callers
+
+Git-commit: b8925ee2e12d1cb9a11d6f28b5814f2bfa59dce1
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561
+
+The cpu errata and feature enable callbacks are only called via their
+respective arm64_cpu_capabilities structure and therefore shouldn't
+exist in the global namespace.
+
+Move the PAN, RAS and cache maintenance emulation enable callbacks into
+the same files as their corresponding arm64_cpu_capabilities structures,
+making them static in the process.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/include/asm/processor.h | 4 ----
+ arch/arm64/kernel/cpu_errata.c | 6 ++++++
+ arch/arm64/kernel/cpufeature.c | 28 ++++++++++++++++++++++------
+ arch/arm64/kernel/traps.c | 5 -----
+ arch/arm64/mm/fault.c | 14 --------------
+ 5 files changed, 28 insertions(+), 29 deletions(-)
+
+diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
+index f6835374ed9f..2bf6691371c2 100644
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -251,10 +251,6 @@ static inline void spin_lock_prefetch(const void *ptr)
+
+ #endif
+
+-void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused);
+-void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused);
+-void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused);
+-
+ extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
+ extern void __init minsigstksz_setup(void);
+
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index c063490d7b51..8900cb0615f8 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -433,6 +433,12 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
+ }
+ #endif /* CONFIG_ARM64_SSBD */
+
++static void __maybe_unused
++cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
++{
++ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
++}
++
+ #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
+ .matches = is_affected_midr_range, \
+ .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 9aa18a0df0d7..35796ca1db50 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1081,6 +1081,28 @@ static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
+ }
+ #endif /* CONFIG_ARM64_SSBD */
+
++#ifdef CONFIG_ARM64_PAN
++static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
++{
++ /*
++ * We modify PSTATE. This won't work from irq context as the PSTATE
++ * is discarded once we return from the exception.
++ */
++ WARN_ON_ONCE(in_interrupt());
++
++ sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
++ asm(SET_PSTATE_PAN(1));
++}
++#endif /* CONFIG_ARM64_PAN */
++
++#ifdef CONFIG_ARM64_RAS_EXTN
++static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
++{
++ /* Firmware may have left a deferred SError in this register. */
++ write_sysreg_s(0, SYS_DISR_EL1);
++}
++#endif /* CONFIG_ARM64_RAS_EXTN */
++
+ static const struct arm64_cpu_capabilities arm64_features[] = {
+ {
+ .desc = "GIC system register CPU interface",
+@@ -1824,9 +1846,3 @@ static int __init enable_mrs_emulation(void)
+ }
+
+ core_initcall(enable_mrs_emulation);
+-
+-void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
+-{
+- /* Firmware may have left a deferred SError in this register. */
+- write_sysreg_s(0, SYS_DISR_EL1);
+-}
+diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
+index b9da093e0341..148de417ed3e 100644
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -412,11 +412,6 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
+ BUG_ON(!user_mode(regs));
+ }
+
+-void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
+-{
+- sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
+-}
+-
+ #define __user_cache_maint(insn, address, res) \
+ if (address >= user_addr_max()) { \
+ res = -EFAULT; \
+diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
+index 50b30ff30de4..6342f1793c70 100644
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -864,17 +864,3 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
+ return rv;
+ }
+ NOKPROBE_SYMBOL(do_debug_exception);
+-
+-#ifdef CONFIG_ARM64_PAN
+-void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
+-{
+- /*
+- * We modify PSTATE. This won't work from irq context as the PSTATE
+- * is discarded once we return from the exception.
+- */
+- WARN_ON_ONCE(in_interrupt());
+-
+- sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
+- asm(SET_PSTATE_PAN(1));
+-}
+-#endif /* CONFIG_ARM64_PAN */
+--
+2.16.4
+
diff --git a/patches.suse/arm64-cpu_errata-Remove-ARM64_MISMATCHED_CACHE_LINE_.patch b/patches.suse/arm64-cpu_errata-Remove-ARM64_MISMATCHED_CACHE_LINE_.patch
new file mode 100644
index 0000000000..9a5322a8c5
--- /dev/null
+++ b/patches.suse/arm64-cpu_errata-Remove-ARM64_MISMATCHED_CACHE_LINE_.patch
@@ -0,0 +1,73 @@
+From: Will Deacon <will.deacon@arm.com>
+Date: Wed, 19 Sep 2018 11:41:21 +0100
+Subject: arm64: cpu_errata: Remove ARM64_MISMATCHED_CACHE_LINE_SIZE
+
+Git-commit: 880f7cc47265e7b195781dfa9a0cd62ef78304e3
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561,jsc#SLE-10671
+
+There's no need to treat mismatched cache-line sizes reported by CTR_EL0
+differently to any other mismatched fields that we treat as "STRICT" in
+the cpufeature code. In both cases we need to trap and emulate EL0
+accesses to the register, so drop ARM64_MISMATCHED_CACHE_LINE_SIZE and
+rely on ARM64_MISMATCHED_CACHE_TYPE instead.
+
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+[catalin.marinas@arm.com: move ARM64_HAS_CNP in the empty cpucaps.h slot]
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
+---
+ arch/arm64/include/asm/assembler.h | 7 +++----
+ arch/arm64/kernel/cpu_errata.c | 15 ++-------------
+ 2 files changed, 5 insertions(+), 17 deletions(-)
+
+--- a/arch/arm64/include/asm/assembler.h
++++ b/arch/arm64/include/asm/assembler.h
+@@ -315,12 +315,11 @@ alternative_endif
+ ldr \rd, [\rn, #MM_CONTEXT_ID]
+ .endm
+ /*
+- * read_ctr - read CTR_EL0. If the system has mismatched
+- * cache line sizes, provide the system wide safe value
+- * from arm64_ftr_reg_ctrel0.sys_val
++ * read_ctr - read CTR_EL0. If the system has mismatched register fields,
++ * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
+ */
+ .macro read_ctr, reg
+-alternative_if_not ARM64_MISMATCHED_CACHE_LINE_SIZE
++alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
+ mrs \reg, ctr_el0 // read CTR
+ nop
+ alternative_else
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -54,11 +54,7 @@ static bool
+ has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
+ int scope)
+ {
+- u64 mask = CTR_CACHE_MINLINE_MASK;
+-
+- /* Skip matching the min line sizes for cache type check */
+- if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
+- mask ^= arm64_ftr_reg_ctrel0.strict_mask;
++ u64 mask = arm64_ftr_reg_ctrel0.strict_mask;;
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ return (read_cpuid_cachetype() & mask) !=
+@@ -646,14 +642,7 @@ const struct arm64_cpu_capabilities arm6
+ },
+ #endif
+ {
+- .desc = "Mismatched cache line size",
+- .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
+- .matches = has_mismatched_cache_type,
+- .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+- .cpu_enable = cpu_enable_trap_ctr_access,
+- },
+- {
+- .desc = "Mismatched cache type",
++ .desc = "Mismatched cache type (CTR_EL0)",
+ .capability = ARM64_MISMATCHED_CACHE_TYPE,
+ .matches = has_mismatched_cache_type,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
diff --git a/patches.suse/arm64-cpufeature-Convert-hook_lock-to-raw_spin_lock_.patch b/patches.suse/arm64-cpufeature-Convert-hook_lock-to-raw_spin_lock_.patch
new file mode 100644
index 0000000000..d4a8ea5e08
--- /dev/null
+++ b/patches.suse/arm64-cpufeature-Convert-hook_lock-to-raw_spin_lock_.patch
@@ -0,0 +1,51 @@
+From: Julien Grall <julien.grall@arm.com>
+Date: Thu, 30 May 2019 12:30:58 +0100
+Subject: arm64/cpufeature: Convert hook_lock to raw_spin_lock_t in
+ cpu_enable_ssbs()
+
+Git-commit: 27e6e7d63fc2b43334ce79070a727a9ca6e58700
+Patch-mainline: v5.3-rc1
+References: jsc#ECO-561
+
+cpu_enable_ssbs() is called via stop_machine() as part of the cpu_enable
+callback. A spin lock is used to ensure the hook is registered before
+the rest of the callback is executed.
+
+On -RT spin_lock() may sleep. However, all the callees in stop_machine()
+are expected to not sleep. Therefore a raw_spin_lock() is required here.
+
+Given this is already done under stop_machine() and the work done under
+the lock is quite small, the latency should not increase too much.
+
+Signed-off-by: Julien Grall <julien.grall@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/kernel/cpufeature.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index ca27e08e3d8a..2a7159fda3ce 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1194,14 +1194,14 @@ static struct undef_hook ssbs_emulation_hook = {
+ static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
+ {
+ static bool undef_hook_registered = false;
+- static DEFINE_SPINLOCK(hook_lock);
++ static DEFINE_RAW_SPINLOCK(hook_lock);
+
+- spin_lock(&hook_lock);
++ raw_spin_lock(&hook_lock);
+ if (!undef_hook_registered) {
+ register_undef_hook(&ssbs_emulation_hook);
+ undef_hook_registered = true;
+ }
+- spin_unlock(&hook_lock);
++ raw_spin_unlock(&hook_lock);
+
+ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
+--
+2.16.4
+
diff --git a/patches.suse/arm64-cpufeature-Detect-SSBS-and-advertise-to-usersp.patch b/patches.suse/arm64-cpufeature-Detect-SSBS-and-advertise-to-usersp.patch
new file mode 100644
index 0000000000..d88463b82f
--- /dev/null
+++ b/patches.suse/arm64-cpufeature-Detect-SSBS-and-advertise-to-usersp.patch
@@ -0,0 +1,175 @@
+From: Will Deacon <will.deacon@arm.com>
+Date: Fri, 15 Jun 2018 11:37:34 +0100
+Subject: arm64: cpufeature: Detect SSBS and advertise to userspace
+
+Git-commit: d71be2b6c0e19180b5f80a6d42039cc074a693a2
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561
+
+Armv8.5 introduces a new PSTATE bit known as Speculative Store Bypass
+Safe (SSBS) which can be used as a mitigation against Spectre variant 4.
+
+Additionally, a CPU may provide instructions to manipulate PSTATE.SSBS
+directly, so that userspace can toggle the SSBS control without trapping
+to the kernel.
+
+This patch probes for the existence of SSBS and advertise the new instructions
+to userspace if they exist.
+
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/include/asm/cpucaps.h | 1 +
+ arch/arm64/include/asm/sysreg.h | 16 ++++++++++++----
+ arch/arm64/include/uapi/asm/hwcap.h | 1 +
+ arch/arm64/kernel/cpufeature.c | 19 +++++++++++++++++--
+ arch/arm64/kernel/cpuinfo.c | 1 +
+ 5 files changed, 32 insertions(+), 6 deletions(-)
+
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -47,6 +47,7 @@
+ #define ARM64_HW_DBM 26
+ #define ARM64_SSBD 27
+ #define ARM64_WORKAROUND_843419 29
++#define ARM64_SSBS 30
+ #define ARM64_HAS_CACHE_IDC 32
+ #define ARM64_HAS_CACHE_DIC 33
+ #define ARM64_MISMATCHED_CACHE_TYPE 34
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -411,6 +411,7 @@
+ #define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7)
+
+ /* Common SCTLR_ELx flags. */
++#define SCTLR_ELx_DSSBS (1UL << 44)
+ #define SCTLR_ELx_EE (1 << 25)
+ #define SCTLR_ELx_IESB (1 << 21)
+ #define SCTLR_ELx_WXN (1 << 19)
+@@ -431,7 +432,7 @@
+ (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
+ (1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \
+ (1 << 27) | (1 << 30) | (1 << 31) | \
+- (0xffffffffUL << 32))
++ (0xffffefffUL << 32))
+
+ #ifdef CONFIG_CPU_BIG_ENDIAN
+ #define ENDIAN_SET_EL2 SCTLR_ELx_EE
+@@ -445,7 +446,7 @@
+ #define SCTLR_EL2_SET (SCTLR_ELx_IESB | ENDIAN_SET_EL2 | SCTLR_EL2_RES1)
+ #define SCTLR_EL2_CLEAR (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
+ SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
+- ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
++ SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
+
+ #if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
+ #error "Inconsistent SCTLR_EL2 set/clear bits"
+@@ -469,7 +470,7 @@
+ (1 << 29))
+ #define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \
+ (1 << 27) | (1 << 30) | (1 << 31) | \
+- (0xffffffffUL << 32))
++ (0xffffefffUL << 32))
+
+ #ifdef CONFIG_CPU_BIG_ENDIAN
+ #define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
+@@ -486,7 +487,7 @@
+ ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
+ #define SCTLR_EL1_CLEAR (SCTLR_ELx_A | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD |\
+ SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
+- SCTLR_EL1_RES0)
++ SCTLR_ELx_DSSBS | SCTLR_EL1_RES0)
+
+ #if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
+ #error "Inconsistent SCTLR_EL1 set/clear bits"
+@@ -535,6 +536,13 @@
+ #define ID_AA64PFR0_EL0_64BIT_ONLY 0x1
+ #define ID_AA64PFR0_EL0_32BIT_64BIT 0x2
+
++/* id_aa64pfr1 */
++#define ID_AA64PFR1_SSBS_SHIFT 4
++
++#define ID_AA64PFR1_SSBS_PSTATE_NI 0
++#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1
++#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
++
+ /* id_aa64mmfr0 */
+ #define ID_AA64MMFR0_TGRAN4_SHIFT 28
+ #define ID_AA64MMFR0_TGRAN64_SHIFT 24
+--- a/arch/arm64/include/uapi/asm/hwcap.h
++++ b/arch/arm64/include/uapi/asm/hwcap.h
+@@ -46,5 +46,6 @@
+ #define HWCAP_USCAT (1 << 25)
+ #define HWCAP_ILRCPC (1 << 26)
+ #define HWCAP_FLAGM (1 << 27)
++#define HWCAP_SSBS (1 << 28)
+
+ #endif /* _UAPI__ASM_HWCAP_H */
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -164,6 +164,11 @@ static const struct arm64_ftr_bits ftr_i
+ ARM64_FTR_END,
+ };
+
++static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
++ ARM64_FTR_END,
++};
++
+ static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
+@@ -370,7 +375,7 @@ static const struct __ftr_reg_entry {
+
+ /* Op1 = 0, CRn = 0, CRm = 4 */
+ ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
+- ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_raz),
++ ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
+ ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
+
+ /* Op1 = 0, CRn = 0, CRm = 5 */
+@@ -656,7 +661,6 @@ void update_cpu_features(int cpu,
+
+ /*
+ * EL3 is not our concern.
+- * ID_AA64PFR1 is currently RES0.
+ */
+ taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
+ info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
+@@ -1212,6 +1216,16 @@ static const struct arm64_cpu_capabiliti
+ .cpu_enable = cpu_enable_hw_dbm,
+ },
+ #endif
++ {
++ .desc = "Speculative Store Bypassing Safe (SSBS)",
++ .capability = ARM64_SSBS,
++ .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
++ .matches = has_cpuid_feature,
++ .sys_reg = SYS_ID_AA64PFR1_EL1,
++ .field_pos = ID_AA64PFR1_SSBS_SHIFT,
++ .sign = FTR_UNSIGNED,
++ .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
++ },
+ {},
+ };
+
+@@ -1256,6 +1270,7 @@ static const struct arm64_cpu_capabiliti
+ #ifdef CONFIG_ARM64_SVE
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
+ #endif
++ HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
+ {},
+ };
+
+--- a/arch/arm64/kernel/cpuinfo.c
++++ b/arch/arm64/kernel/cpuinfo.c
+@@ -81,6 +81,7 @@ static const char *const hwcap_str[] = {
+ "uscat",
+ "ilrcpc",
+ "flagm",
++ "ssbs",
+ NULL
+ };
+
diff --git a/patches.suse/arm64-cpufeature-Fix-handling-of-CTR_EL0.IDC-field.patch b/patches.suse/arm64-cpufeature-Fix-handling-of-CTR_EL0.IDC-field.patch
new file mode 100644
index 0000000000..9aeba8c0d2
--- /dev/null
+++ b/patches.suse/arm64-cpufeature-Fix-handling-of-CTR_EL0.IDC-field.patch
@@ -0,0 +1,191 @@
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Tue, 9 Oct 2018 14:47:06 +0100
+Subject: arm64: cpufeature: Fix handling of CTR_EL0.IDC field
+
+Git-commit: 1602df02f33f61fe0de1bbfeba0d1c97c14bff19
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561,jsc#SLE-10671
+
+CTR_EL0.IDC reports the data cache clean requirements for instruction
+to data coherence. However, if the field is 0, we need to check the
+CLIDR_EL1 fields to detect the status of the feature. Currently we
+don't do this and generate a warning with tainting the kernel, when
+there is a mismatch in the field among the CPUs. Also the userspace
+doesn't have a reliable way to check the CLIDR_EL1 register to check
+the status.
+
+This patch fixes the problem by checking the CLIDR_EL1 fields, when
+(CTR_EL0.IDC == 0) and updates the kernel's copy of the CTR_EL0 for
+the CPU with the actual status of the feature. This would allow the
+sanity check infrastructure to do the proper checking of the fields
+and also allow the CTR_EL0 emulation code to supply the real status
+of the feature.
+
+Now, if a CPU has raw CTR_EL0.IDC == 0 and effective IDC == 1 (with
+overall system wide IDC == 1), we need to expose the real value to
+the user. So, we trap CTR_EL0 access on the CPU which reports incorrect
+CTR_EL0.IDC.
+
+Fixes: commit 6ae4b6e057888 ("arm64: Add support for new control bits CTR_EL0.DIC and CTR_EL0.IDC")
+Cc: Shanker Donthineni <shankerd@codeaurora.org>
+Cc: Philip Elcan <pelcan@codeaurora.org>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/include/asm/cache.h | 40 ++++++++++++++++++++++++++++++++++++++++
+ arch/arm64/kernel/cpu_errata.c | 27 ++++++++++++++++++++++++---
+ arch/arm64/kernel/cpufeature.c | 15 ++++++++++++++-
+ arch/arm64/kernel/cpuinfo.c | 10 +++++++++-
+ 4 files changed, 87 insertions(+), 5 deletions(-)
+
+--- a/arch/arm64/include/asm/cache.h
++++ b/arch/arm64/include/asm/cache.h
+@@ -40,6 +40,15 @@
+ #define L1_CACHE_SHIFT (6)
+ #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
++
++#define CLIDR_LOUU_SHIFT 27
++#define CLIDR_LOC_SHIFT 24
++#define CLIDR_LOUIS_SHIFT 21
++
++#define CLIDR_LOUU(clidr) (((clidr) >> CLIDR_LOUU_SHIFT) & 0x7)
++#define CLIDR_LOC(clidr) (((clidr) >> CLIDR_LOC_SHIFT) & 0x7)
++#define CLIDR_LOUIS(clidr) (((clidr) >> CLIDR_LOUIS_SHIFT) & 0x7)
++
+ /*
+ * Memory returned by kmalloc() may be used for DMA, so we must make
+ * sure that all such allocations are cache aligned. Otherwise,
+@@ -84,6 +93,37 @@ static inline int cache_line_size(void)
+ return cwg ? 4 << cwg : L1_CACHE_BYTES;
+ }
+
++/*
++ * Read the effective value of CTR_EL0.
++ *
++ * According to ARM ARM for ARMv8-A (ARM DDI 0487C.a),
++ * section D10.2.33 "CTR_EL0, Cache Type Register" :
++ *
++ * CTR_EL0.IDC reports the data cache clean requirements for
++ * instruction to data coherence.
++ *
++ * 0 - dcache clean to PoU is required unless :
++ * (CLIDR_EL1.LoC == 0) || (CLIDR_EL1.LoUIS == 0 && CLIDR_EL1.LoUU == 0)
++ * 1 - dcache clean to PoU is not required for i-to-d coherence.
++ *
++ * This routine provides the CTR_EL0 with the IDC field updated to the
++ * effective state.
++ */
++static inline u32 __attribute_const__ read_cpuid_effective_cachetype(void)
++{
++ u32 ctr = read_cpuid_cachetype();
++
++ if (!(ctr & BIT(CTR_IDC_SHIFT))) {
++ u64 clidr = read_sysreg(clidr_el1);
++
++ if (CLIDR_LOC(clidr) == 0 ||
++ (CLIDR_LOUIS(clidr) == 0 && CLIDR_LOUU(clidr) == 0))
++ ctr |= BIT(CTR_IDC_SHIFT);
++ }
++
++ return ctr;
++}
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -54,11 +54,32 @@ static bool
+ has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
+ int scope)
+ {
+- u64 mask = arm64_ftr_reg_ctrel0.strict_mask;;
++ u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
++ u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
++ u64 ctr_raw, ctr_real;
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+- return (read_cpuid_cachetype() & mask) !=
+- (arm64_ftr_reg_ctrel0.sys_val & mask);
++
++ /*
++ * We want to make sure that all the CPUs in the system expose
++ * a consistent CTR_EL0 to make sure that applications behaves
++ * correctly with migration.
++ *
++ * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
++ *
++ * 1) It is safe if the system doesn't support IDC, as CPU anyway
++ * reports IDC = 0, consistent with the rest.
++ *
++ * 2) If the system has IDC, it is still safe as we trap CTR_EL0
++ * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
++ *
++ * So, we need to make sure either the raw CTR_EL0 or the effective
++ * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
++ */
++ ctr_raw = read_cpuid_cachetype() & mask;
++ ctr_real = read_cpuid_effective_cachetype() & mask;
++
++ return (ctr_real != sys) && (ctr_raw != sys);
+ }
+
+ static void
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -873,11 +873,23 @@ static bool has_cache_idc(const struct a
+ if (scope == SCOPE_SYSTEM)
+ ctr = arm64_ftr_reg_ctrel0.sys_val;
+ else
+- ctr = read_cpuid_cachetype();
++ ctr = read_cpuid_effective_cachetype();
+
+ return ctr & BIT(CTR_IDC_SHIFT);
+ }
+
++static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused)
++{
++ /*
++ * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively
++ * CTR_EL0.IDC = 1 (from CLIDR values), we need to trap accesses
++ * to the CTR_EL0 on this CPU and emulate it with the real/safe
++ * value.
++ */
++ if (!(read_cpuid_cachetype() & BIT(CTR_IDC_SHIFT)))
++ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
++}
++
+ static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
+ int scope)
+ {
+@@ -1283,6 +1295,7 @@ static const struct arm64_cpu_capabiliti
+ .capability = ARM64_HAS_CACHE_IDC,
+ .type = ARM64_CPUCAP_SYSTEM_FEATURE,
+ .matches = has_cache_idc,
++ .cpu_enable = cpu_emulate_effective_ctr,
+ },
+ {
+ .desc = "Instruction cache invalidation not required for I/D coherence",
+--- a/arch/arm64/kernel/cpuinfo.c
++++ b/arch/arm64/kernel/cpuinfo.c
+@@ -325,7 +325,15 @@ static void cpuinfo_detect_icache_policy
+ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
+ {
+ info->reg_cntfrq = arch_timer_get_cntfrq();
+- info->reg_ctr = read_cpuid_cachetype();
++ /*
++ * Use the effective value of the CTR_EL0 than the raw value
++ * exposed by the CPU. CTR_E0.IDC field value must be interpreted
++ * with the CLIDR_EL1 fields to avoid triggering false warnings
++ * when there is a mismatch across the CPUs. Keep track of the
++ * effective value of the CTR_EL0 in our internal records for
++ * acurate sanity check and feature enablement.
++ */
++ info->reg_ctr = read_cpuid_effective_cachetype();
+ info->reg_dczid = read_cpuid(DCZID_EL0);
+ info->reg_midr = read_cpuid_id();
+ info->reg_revidr = read_cpuid(REVIDR_EL1);
diff --git a/patches.suse/arm64-cpufeature-Trap-CTR_EL0-access-only-where-it-i.patch b/patches.suse/arm64-cpufeature-Trap-CTR_EL0-access-only-where-it-i.patch
new file mode 100644
index 0000000000..ff241d443a
--- /dev/null
+++ b/patches.suse/arm64-cpufeature-Trap-CTR_EL0-access-only-where-it-i.patch
@@ -0,0 +1,38 @@
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Tue, 9 Oct 2018 14:47:07 +0100
+Subject: arm64: cpufeature: Trap CTR_EL0 access only where it is necessary
+
+Git-commit: 4afe8e79da920bdf6698b01bc668fffc6758f37b
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561,jsc#SLE-10671
+
+When there is a mismatch in the CTR_EL0 field, we trap
+access to CTR from EL0 on all CPUs to expose the safe
+value. However, we could skip trapping on a CPU which
+matches the safe value.
+
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/kernel/cpu_errata.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -85,7 +85,12 @@ has_mismatched_cache_type(const struct a
+ static void
+ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
+ {
+- sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
++ u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
++
++ /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
++ if ((read_cpuid_cachetype() & mask) !=
++ (arm64_ftr_reg_ctrel0.sys_val & mask))
++ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
+ }
+
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
diff --git a/patches.suse/arm64-cpufeature-ctr-Fix-cpu-capability-check-for-la.patch b/patches.suse/arm64-cpufeature-ctr-Fix-cpu-capability-check-for-la.patch
new file mode 100644
index 0000000000..c254742fb1
--- /dev/null
+++ b/patches.suse/arm64-cpufeature-ctr-Fix-cpu-capability-check-for-la.patch
@@ -0,0 +1,74 @@
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Tue, 9 Oct 2018 14:47:05 +0100
+Subject: arm64: cpufeature: ctr: Fix cpu capability check for late CPUs
+
+Git-commit: 8ab66cbe63aeaf9e5970fb4aaef1c660fca59321
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561,jsc#SLE-10671
+
+The matches() routine for a capability must honor the "scope"
+passed to it and return the proper results.
+i.e, when passed with SCOPE_LOCAL_CPU, it should check the
+status of the capability on the current CPU. This is used by
+verify_local_cpu_capabilities() on a late secondary CPU to make
+sure that it's compliant with the established system features.
+However, ARM64_HAS_CACHE_{IDC/DIC} always checks the system wide
+registers and this could mean that a late secondary CPU could return
+"true" (since the CPU hasn't updated the system wide registers yet)
+and thus lead the system in an inconsistent state, where
+the system assumes it has IDC/DIC feature, while the new CPU
+doesn't.
+
+Fixes: commit 6ae4b6e0578886eb36 ("arm64: Add support for new control bits CTR_EL0.DIC and CTR_EL0.IDC")
+Cc: Philip Elcan <pelcan@codeaurora.org>
+Cc: Shanker Donthineni <shankerd@codeaurora.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/kernel/cpufeature.c | 22 ++++++++++++++++++----
+ 1 file changed, 18 insertions(+), 4 deletions(-)
+
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 00e7c313f088..ba16bb7762ca 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -854,15 +854,29 @@ static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unus
+ }
+
+ static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
+- int __unused)
++ int scope)
+ {
+- return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_IDC_SHIFT);
++ u64 ctr;
++
++ if (scope == SCOPE_SYSTEM)
++ ctr = arm64_ftr_reg_ctrel0.sys_val;
++ else
++ ctr = read_cpuid_cachetype();
++
++ return ctr & BIT(CTR_IDC_SHIFT);
+ }
+
+ static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
+- int __unused)
++ int scope)
+ {
+- return read_sanitised_ftr_reg(SYS_CTR_EL0) & BIT(CTR_DIC_SHIFT);
++ u64 ctr;
++
++ if (scope == SCOPE_SYSTEM)
++ ctr = arm64_ftr_reg_ctrel0.sys_val;
++ else
++ ctr = read_cpuid_cachetype();
++
++ return ctr & BIT(CTR_DIC_SHIFT);
+ }
+
+ static bool __maybe_unused
+--
+2.16.4
+
diff --git a/patches.suse/arm64-entry-Allow-handling-of-undefined-instructions.patch b/patches.suse/arm64-entry-Allow-handling-of-undefined-instructions.patch
new file mode 100644
index 0000000000..183827f706
--- /dev/null
+++ b/patches.suse/arm64-entry-Allow-handling-of-undefined-instructions.patch
@@ -0,0 +1,58 @@
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 7 Aug 2018 13:43:06 +0100
+Subject: arm64: entry: Allow handling of undefined instructions from EL1
+
+Git-commit: 0bf0f444b2c49241b2b39aa3cf210d7c95ef6c34
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561
+
+Rather than panic() when taking an undefined instruction exception from
+EL1, allow a hook to be registered in case we want to emulate the
+instruction, like we will for the SSBS PSTATE manipulation instructions.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/kernel/entry.S | 2 +-
+ arch/arm64/kernel/traps.c | 11 +++++++----
+ 2 files changed, 8 insertions(+), 5 deletions(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -594,7 +594,7 @@ el1_undef:
+ inherit_daif pstate=x23, tmp=x2
+ mov x0, sp
+ bl do_undefinstr
+- ASM_BUG()
++ kernel_exit 1
+ el1_dbg:
+ /*
+ * Debug exception handling
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -272,10 +272,12 @@ static int call_undef_hook(struct pt_reg
+ int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
+ void __user *pc = (void __user *)instruction_pointer(regs);
+
+- if (!user_mode(regs))
+- return 1;
+-
+- if (compat_thumb_mode(regs)) {
++ if (!user_mode(regs)) {
++ __le32 instr_le;
++ if (probe_kernel_address((__force __le32 *)pc, instr_le))
++ goto exit;
++ instr = le32_to_cpu(instr_le);
++ } else if (compat_thumb_mode(regs)) {
+ /* 16-bit Thumb instruction */
+ if (get_user(instr, (u16 __user *)pc))
+ goto exit;
+@@ -367,6 +369,7 @@ asmlinkage void __exception do_undefinst
+ return;
+
+ force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
++ BUG_ON(!user_mode(regs));
+ }
+
+ void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
diff --git a/patches.suse/arm64-errata-Hide-CTR_EL0.DIC-on-systems-affected-by.patch b/patches.suse/arm64-errata-Hide-CTR_EL0.DIC-on-systems-affected-by.patch
new file mode 100644
index 0000000000..31e2f363ab
--- /dev/null
+++ b/patches.suse/arm64-errata-Hide-CTR_EL0.DIC-on-systems-affected-by.patch
@@ -0,0 +1,148 @@
+From: James Morse <james.morse@arm.com>
+Date: Thu, 17 Oct 2019 18:42:58 +0100
+Subject: arm64: errata: Hide CTR_EL0.DIC on systems affected by Neoverse-N1
+ #1542419
+
+Git-commit: 05460849c3b51180d5ada3373d0449aea19075e4
+Patch-mainline: Queued
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
+References: jsc#ECO-561,jsc#SLE-10671
+
+Cores affected by Neoverse-N1 #1542419 could execute a stale instruction
+when a branch is updated to point to freshly generated instructions.
+
+To workaround this issue we need user-space to issue unnecessary
+icache maintenance that we can trap. Start by hiding CTR_EL0.DIC.
+
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
+---
+ Documentation/arm64/silicon-errata.txt | 1 +
+ arch/arm64/Kconfig | 16 ++++++++++++++++
+ arch/arm64/include/asm/cpucaps.h | 3 ++-
+ arch/arm64/kernel/cpu_errata.c | 32 +++++++++++++++++++++++++++++++-
+ arch/arm64/kernel/traps.c | 3 +++
+ 5 files changed, 53 insertions(+), 2 deletions(-)
+
+--- a/Documentation/arm64/silicon-errata.txt
++++ b/Documentation/arm64/silicon-errata.txt
+@@ -58,6 +58,7 @@ stable kernels.
+ | ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
+ | ARM | Cortex-A76 | #1188873,1418040| ARM64_ERRATUM_1418040 |
+ | ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 |
++| ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 |
+ | ARM | MMU-500 | #841119,826419 | N/A |
+ | | | | |
+ | Cavium | ThunderX ITS | #22375,24313 | CAVIUM_ERRATUM_22375 |
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -468,6 +468,22 @@ config ARM64_ERRATUM_1418040
+
+ If unsure, say Y.
+
++config ARM64_ERRATUM_1542419
++ bool "Neoverse-N1: workaround mis-ordering of instruction fetches"
++ default y
++ help
++ This option adds a workaround for ARM Neoverse-N1 erratum
++ 1542419.
++
++ Affected Neoverse-N1 cores could execute a stale instruction when
++ modified by another CPU. The workaround depends on a firmware
++ counterpart.
++
++ Workaround the issue by hiding the DIC feature from EL0. This
++ forces user-space to perform cache maintenance.
++
++ If unsure, say Y.
++
+ config CAVIUM_ERRATUM_22375
+ bool "Cavium erratum 22375, 24313"
+ default y
+--- a/arch/arm64/include/asm/cpucaps.h
++++ b/arch/arm64/include/asm/cpucaps.h
+@@ -53,7 +53,8 @@
+ #define ARM64_HAS_CACHE_IDC 32
+ #define ARM64_HAS_CACHE_DIC 33
+ #define ARM64_MISMATCHED_CACHE_TYPE 34
++#define ARM64_WORKAROUND_1542419 35
+
+-#define ARM64_NCAPS 35
++#define ARM64_NCAPS 36
+
+ #endif /* __ASM_CPUCAPS_H */
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -83,13 +83,21 @@ has_mismatched_cache_type(const struct a
+ }
+
+ static void
+-cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
++cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
+ {
+ u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
++ bool enable_uct_trap = false;
+
+ /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
+ if ((read_cpuid_cachetype() & mask) !=
+ (arm64_ftr_reg_ctrel0.sys_val & mask))
++ enable_uct_trap = true;
++
++ /* ... or if the system is affected by an erratum */
++ if (cap->capability == ARM64_WORKAROUND_1542419)
++ enable_uct_trap = true;
++
++ if (enable_uct_trap)
+ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
+ }
+
+@@ -527,6 +535,18 @@ multi_entry_cap_cpu_enable(const struct
+ caps->cpu_enable(caps);
+ }
+
++static bool __maybe_unused
++has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
++ int scope)
++{
++ u32 midr = read_cpuid_id();
++ bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
++ const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
++
++ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
++ return is_midr_in_range(midr, &range) && has_dic;
++}
++
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+
+ /*
+@@ -738,6 +758,16 @@ const struct arm64_cpu_capabilities arm6
+ ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
+ },
+ #endif
++#ifdef CONFIG_ARM64_ERRATUM_1542419
++ {
++ /* we depend on the firmware portion for correctness */
++ .desc = "ARM erratum 1542419 (kernel portion)",
++ .capability = ARM64_WORKAROUND_1542419,
++ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
++ .matches = has_neoverse_n1_erratum_1542419,
++ .cpu_enable = cpu_enable_trap_ctr_access,
++ },
++#endif
+ {
+ }
+ };
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -433,6 +433,9 @@ static void ctr_read_handler(unsigned in
+ int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT;
+ unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
+
++ if (cpus_have_const_cap(ARM64_WORKAROUND_1542419))
++ val &= ~BIT(CTR_DIC_SHIFT);
++
+ pt_regs_write_reg(regs, rt, val);
+
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
diff --git a/patches.suse/arm64-fix-SSBS-sanitization.patch b/patches.suse/arm64-fix-SSBS-sanitization.patch
new file mode 100644
index 0000000000..4696c1a65e
--- /dev/null
+++ b/patches.suse/arm64-fix-SSBS-sanitization.patch
@@ -0,0 +1,71 @@
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Fri, 15 Feb 2019 16:34:27 +0000
+Subject: arm64: fix SSBS sanitization
+
+Git-commit: f54dada8274643e3ff4436df0ea124aeedc43cae
+Patch-mainline: v5.0-rc8
+References: jsc#ECO-561
+
+In valid_user_regs() we treat SSBS as a RES0 bit, and consequently it is
+unexpectedly cleared when we restore a sigframe or fiddle with GPRs via
+ptrace.
+
+This patch fixes valid_user_regs() to account for this, updating the
+function to refer to the latest ARM ARM (ARM DDI 0487D.a). For AArch32
+tasks, SSBS appears in bit 23 of SPSR_EL1, matching its position in the
+AArch32-native PSR format, and we don't need to translate it as we have
+to for DIT.
+
+There are no other bit assignments that we need to account for today.
+As the recent documentation describes the DIT bit, we can drop our
+comment regarding DIT.
+
+While removing SSBS from the RES0 masks, existing inconsistent
+whitespace is corrected.
+
+Fixes: d71be2b6c0e19180 ("arm64: cpufeature: Detect SSBS and advertise to userspace")
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/kernel/ptrace.c | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index 9dce33b0e260..ddaea0fd2fa4 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -1702,19 +1702,20 @@ void syscall_trace_exit(struct pt_regs *regs)
+ }
+
+ /*
+- * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a
+- * We also take into account DIT (bit 24), which is not yet documented, and
+- * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be
+- * allocated an EL0 meaning in future.
++ * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
++ * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
++ * not described in ARM DDI 0487D.a.
++ * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
++ * be allocated an EL0 meaning in future.
+ * Userspace cannot use these until they have an architectural meaning.
+ * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
+ * We also reserve IL for the kernel; SS is handled dynamically.
+ */
+ #define SPSR_EL1_AARCH64_RES0_BITS \
+- (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
+- GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5))
++ (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
++ GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
+ #define SPSR_EL1_AARCH32_RES0_BITS \
+- (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20))
++ (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
+
+ static int valid_compat_regs(struct user_pt_regs *regs)
+ {
+--
+2.16.4
+
diff --git a/patches.suse/arm64-force_signal_inject-WARN-if-called-from-kernel.patch b/patches.suse/arm64-force_signal_inject-WARN-if-called-from-kernel.patch
new file mode 100644
index 0000000000..b25e41cf66
--- /dev/null
+++ b/patches.suse/arm64-force_signal_inject-WARN-if-called-from-kernel.patch
@@ -0,0 +1,42 @@
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 14 Aug 2018 16:24:54 +0100
+Subject: arm64: force_signal_inject: WARN if called from kernel context
+
+Git-commit: 8a60419d36762a1131c2b29f7bd14371db4df1b5
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561
+
+force_signal_inject() is designed to send a fatal signal to userspace,
+so WARN if the current pt_regs indicates a kernel context. This can
+currently happen for the undefined instruction trap, so patch that up so
+we always BUG() if we didn't have a handler.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/kernel/traps.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -315,6 +315,9 @@ void force_signal_inject(int signal, int
+ void __user *pc = (void __user *)instruction_pointer(regs);
+ const char *desc;
+
++ if (WARN_ON(!user_mode(regs)))
++ return;
++
+ switch (signal) {
+ case SIGILL:
+ desc = "undefined instruction";
+@@ -368,8 +371,8 @@ asmlinkage void __exception do_undefinst
+ if (call_undef_hook(regs) == 0)
+ return;
+
+- force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
+ BUG_ON(!user_mode(regs));
++ force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
+ }
+
+ #define __user_cache_maint(insn, address, res) \
diff --git a/patches.suse/arm64-kill-change_cpacr.patch b/patches.suse/arm64-kill-change_cpacr.patch
new file mode 100644
index 0000000000..e95d98060d
--- /dev/null
+++ b/patches.suse/arm64-kill-change_cpacr.patch
@@ -0,0 +1,55 @@
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 11 Jul 2018 14:56:39 +0100
+Subject: arm64: kill change_cpacr()
+
+Git-commit: 8d370933faecec098acb99fbf317cf9dfa9ee995
+Patch-mainline: v4.19-rc1
+References: jsc#ECO-561
+
+Now that we have sysreg_clear_set(), we can use this instead of
+change_cpacr().
+
+Note that the order of the set and clear arguments differs between
+change_cpacr() and sysreg_clear_set(), so these are flipped as part of
+the conversion. Also, sve_user_enable() redundantly clears
+CPACR_EL1_ZEN_EL0EN before setting it; this is removed for clarity.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/kernel/fpsimd.c | 13 ++-----------
+ 1 file changed, 2 insertions(+), 11 deletions(-)
+
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -171,23 +171,14 @@ static void *sve_pffr(struct task_struct
+ sve_ffr_offset(task->thread.sve_vl);
+ }
+
+-static void change_cpacr(u64 val, u64 mask)
+-{
+- u64 cpacr = read_sysreg(CPACR_EL1);
+- u64 new = (cpacr & ~mask) | val;
+-
+- if (new != cpacr)
+- write_sysreg(new, CPACR_EL1);
+-}
+-
+ static void sve_user_disable(void)
+ {
+- change_cpacr(0, CPACR_EL1_ZEN_EL0EN);
++ sysreg_clear_set(cpacr_el1, CPACR_EL1_ZEN_EL0EN, 0);
+ }
+
+ static void sve_user_enable(void)
+ {
+- change_cpacr(CPACR_EL1_ZEN_EL0EN, CPACR_EL1_ZEN_EL0EN);
++ sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_ZEN_EL0EN);
+ }
+
+ /*
diff --git a/patches.suse/arm64-kill-config_sctlr_el1.patch b/patches.suse/arm64-kill-config_sctlr_el1.patch
new file mode 100644
index 0000000000..e47af68831
--- /dev/null
+++ b/patches.suse/arm64-kill-config_sctlr_el1.patch
@@ -0,0 +1,105 @@
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 11 Jul 2018 14:56:38 +0100
+Subject: arm64: kill config_sctlr_el1()
+
+Git-commit: 25be597ada0b49d2748ab520a78a28c1764d69e4
+Patch-mainline: v4.19-rc1
+References: jsc#ECO-561
+
+Now that we have sysreg_clear_set(), we can consistently use this
+instead of config_sctlr_el1().
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Dave Martin <dave.martin@arm.com>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/include/asm/sysreg.h | 10 ----------
+ arch/arm64/kernel/armv8_deprecated.c | 8 ++++----
+ arch/arm64/kernel/cpu_errata.c | 3 +--
+ arch/arm64/kernel/traps.c | 2 +-
+ arch/arm64/mm/fault.c | 2 +-
+ 5 files changed, 7 insertions(+), 18 deletions(-)
+
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -735,16 +735,6 @@ asm(
+ write_sysreg(__scs_new, sysreg); \
+ } while (0)
+
+-static inline void config_sctlr_el1(u32 clear, u32 set)
+-{
+- u32 val;
+-
+- val = read_sysreg(sctlr_el1);
+- val &= ~clear;
+- val |= set;
+- write_sysreg(val, sctlr_el1);
+-}
+-
+ #endif
+
+ #endif /* __ASM_SYSREG_H */
+--- a/arch/arm64/kernel/armv8_deprecated.c
++++ b/arch/arm64/kernel/armv8_deprecated.c
+@@ -510,9 +510,9 @@ ret:
+ static int cp15_barrier_set_hw_mode(bool enable)
+ {
+ if (enable)
+- config_sctlr_el1(0, SCTLR_EL1_CP15BEN);
++ sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_CP15BEN);
+ else
+- config_sctlr_el1(SCTLR_EL1_CP15BEN, 0);
++ sysreg_clear_set(sctlr_el1, SCTLR_EL1_CP15BEN, 0);
+ return 0;
+ }
+
+@@ -547,9 +547,9 @@ static int setend_set_hw_mode(bool enabl
+ return -EINVAL;
+
+ if (enable)
+- config_sctlr_el1(SCTLR_EL1_SED, 0);
++ sysreg_clear_set(sctlr_el1, SCTLR_EL1_SED, 0);
+ else
+- config_sctlr_el1(0, SCTLR_EL1_SED);
++ sysreg_clear_set(sctlr_el1, 0, SCTLR_EL1_SED);
+ return 0;
+ }
+
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -62,8 +62,7 @@ has_mismatched_cache_line_size(const str
+ static void
+ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
+ {
+- /* Clear SCTLR_EL1.UCT */
+- config_sctlr_el1(SCTLR_EL1_UCT, 0);
++ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
+ }
+
+ #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+--- a/arch/arm64/kernel/traps.c
++++ b/arch/arm64/kernel/traps.c
+@@ -371,7 +371,7 @@ asmlinkage void __exception do_undefinst
+
+ void cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
+ {
+- config_sctlr_el1(SCTLR_EL1_UCI, 0);
++ sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
+ }
+
+ #define __user_cache_maint(insn, address, res) \
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -824,7 +824,7 @@ void cpu_enable_pan(const struct arm64_c
+ */
+ WARN_ON_ONCE(in_interrupt());
+
+- config_sctlr_el1(SCTLR_EL1_SPAN, 0);
++ sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
+ asm(SET_PSTATE_PAN(1));
+ }
+ #endif /* CONFIG_ARM64_PAN */
diff --git a/patches.suse/arm64-move-SCTLR_EL-1-2-assertions-to-asm-sysreg.h.patch b/patches.suse/arm64-move-SCTLR_EL-1-2-assertions-to-asm-sysreg.h.patch
new file mode 100644
index 0000000000..2abf144b2c
--- /dev/null
+++ b/patches.suse/arm64-move-SCTLR_EL-1-2-assertions-to-asm-sysreg.h.patch
@@ -0,0 +1,100 @@
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 11 Jul 2018 14:56:37 +0100
+Subject: arm64: move SCTLR_EL{1,2} assertions to <asm/sysreg.h>
+
+Git-commit: 1c312e84c2d71da4101754fa6118f703f7473e01
+Patch-mainline: v4.19-rc1
+References: jsc#ECO-561
+
+Currently we assert that the SCTLR_EL{1,2}_{SET,CLEAR} bits are
+self-consistent with an assertion in config_sctlr_el1(). This is a bit
+unusual, since config_sctlr_el1() doesn't make use of these definitions,
+and is far away from the definitions themselves.
+
+We can use the CPP #error directive to have equivalent assertions in
+<asm/sysreg.h>, next to the definitions of the set/clear bits, which is
+a bit clearer and simpler.
+
+At the same time, lets fill in the upper 32 bits for both registers in
+their respective RES0 definitions. This could be a little nicer with
+GENMASK_ULL(63, 32), but this currently lives in <linux/bitops.h>, which
+cannot safely be included from assembly, as <asm/sysreg.h> can.
+
+Note the when the preprocessor evaluates an expression for an #if
+directive, all signed or unsigned values are treated as intmax_t or
+uintmax_t respectively. To avoid ambiguity, we define explicitly define
+the mask of all 64 bits.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Dave Martin <dave.martin@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/include/asm/sysreg.h | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
+index a8f84812c6e8..fefc17dae8ee 100644
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -436,7 +436,8 @@
+ #define SCTLR_EL2_RES0 ((1 << 6) | (1 << 7) | (1 << 8) | (1 << 9) | \
+ (1 << 10) | (1 << 13) | (1 << 14) | (1 << 15) | \
+ (1 << 17) | (1 << 20) | (1 << 24) | (1 << 26) | \
+- (1 << 27) | (1 << 30) | (1 << 31))
++ (1 << 27) | (1 << 30) | (1 << 31) | \
++ (0xffffffffUL << 32))
+
+ #ifdef CONFIG_CPU_BIG_ENDIAN
+ #define ENDIAN_SET_EL2 SCTLR_ELx_EE
+@@ -452,9 +453,9 @@
+ SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
+ ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
+
+-/* Check all the bits are accounted for */
+-#define SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != ~0)
+-
++#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
++#error "Inconsistent SCTLR_EL2 set/clear bits"
++#endif
+
+ /* SCTLR_EL1 specific flags. */
+ #define SCTLR_EL1_UCI (1 << 26)
+@@ -473,7 +474,8 @@
+ #define SCTLR_EL1_RES1 ((1 << 11) | (1 << 20) | (1 << 22) | (1 << 28) | \
+ (1 << 29))
+ #define SCTLR_EL1_RES0 ((1 << 6) | (1 << 10) | (1 << 13) | (1 << 17) | \
+- (1 << 27) | (1 << 30) | (1 << 31))
++ (1 << 27) | (1 << 30) | (1 << 31) | \
++ (0xffffffffUL << 32))
+
+ #ifdef CONFIG_CPU_BIG_ENDIAN
+ #define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
+@@ -492,8 +494,9 @@
+ SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
+ SCTLR_EL1_RES0)
+
+-/* Check all the bits are accounted for */
+-#define SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS BUILD_BUG_ON((SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != ~0)
++#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
++#error "Inconsistent SCTLR_EL1 set/clear bits"
++#endif
+
+ /* id_aa64isar0 */
+ #define ID_AA64ISAR0_TS_SHIFT 52
+@@ -743,9 +746,6 @@ static inline void config_sctlr_el1(u32 clear, u32 set)
+ {
+ u32 val;
+
+- SCTLR_EL2_BUILD_BUG_ON_MISSING_BITS;
+- SCTLR_EL1_BUILD_BUG_ON_MISSING_BITS;
+-
+ val = read_sysreg(sctlr_el1);
+ val &= ~clear;
+ val |= set;
+--
+2.16.4
+
diff --git a/patches.suse/arm64-ssbd-Add-support-for-PSTATE.SSBS-rather-than-t.patch b/patches.suse/arm64-ssbd-Add-support-for-PSTATE.SSBS-rather-than-t.patch
new file mode 100644
index 0000000000..3d0a9f3da3
--- /dev/null
+++ b/patches.suse/arm64-ssbd-Add-support-for-PSTATE.SSBS-rather-than-t.patch
@@ -0,0 +1,292 @@
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 7 Aug 2018 13:47:06 +0100
+Subject: arm64: ssbd: Add support for PSTATE.SSBS rather than trapping to EL3
+
+Git-commit: 8f04e8e6e29c93421a95b61cad62e3918425eac7
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561
+
+On CPUs with support for PSTATE.SSBS, the kernel can toggle the SSBD
+state without needing to call into firmware.
+
+This patch hooks into the existing SSBD infrastructure so that SSBS is
+used on CPUs that support it, but it's all made horribly complicated by
+the very real possibility of big/little systems that don't uniformly
+provide the new capability.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/include/asm/processor.h | 7 +++++
+ arch/arm64/include/asm/ptrace.h | 1
+ arch/arm64/include/asm/sysreg.h | 3 ++
+ arch/arm64/include/uapi/asm/ptrace.h | 1
+ arch/arm64/kernel/cpu_errata.c | 26 ++++++++++++++++++--
+ arch/arm64/kernel/cpufeature.c | 45 +++++++++++++++++++++++++++++++++++
+ arch/arm64/kernel/process.c | 4 +++
+ arch/arm64/kernel/ssbd.c | 21 ++++++++++++++++
+ 8 files changed, 106 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -147,6 +147,10 @@ static inline void start_thread(struct p
+ {
+ start_thread_common(regs, pc);
+ regs->pstate = PSR_MODE_EL0t;
++
++ if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
++ regs->pstate |= PSR_SSBS_BIT;
++
+ regs->sp = sp;
+ }
+
+@@ -163,6 +167,9 @@ static inline void compat_start_thread(s
+ regs->pstate |= PSR_AA32_E_BIT;
+ #endif
+
++ if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
++ regs->pstate |= PSR_AA32_SSBS_BIT;
++
+ regs->compat_sp = sp;
+ }
+ #endif
+--- a/arch/arm64/include/asm/ptrace.h
++++ b/arch/arm64/include/asm/ptrace.h
+@@ -50,6 +50,7 @@
+ #define PSR_AA32_I_BIT 0x00000080
+ #define PSR_AA32_A_BIT 0x00000100
+ #define PSR_AA32_E_BIT 0x00000200
++#define PSR_AA32_SSBS_BIT 0x00800000
+ #define PSR_AA32_DIT_BIT 0x01000000
+ #define PSR_AA32_Q_BIT 0x08000000
+ #define PSR_AA32_V_BIT 0x10000000
+--- a/arch/arm64/include/asm/sysreg.h
++++ b/arch/arm64/include/asm/sysreg.h
+@@ -86,11 +86,14 @@
+
+ #define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
+ #define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
++#define REG_PSTATE_SSBS_IMM sys_reg(0, 3, 4, 0, 1)
+
+ #define SET_PSTATE_PAN(x) __emit_inst(0xd5000000 | REG_PSTATE_PAN_IMM | \
+ (!!x)<<8 | 0x1f)
+ #define SET_PSTATE_UAO(x) __emit_inst(0xd5000000 | REG_PSTATE_UAO_IMM | \
+ (!!x)<<8 | 0x1f)
++#define SET_PSTATE_SSBS(x) __emit_inst(0xd5000000 | REG_PSTATE_SSBS_IMM | \
++ (!!x)<<8 | 0x1f)
+
+ #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
+ #define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
+--- a/arch/arm64/include/uapi/asm/ptrace.h
++++ b/arch/arm64/include/uapi/asm/ptrace.h
+@@ -45,6 +45,7 @@
+ #define PSR_I_BIT 0x00000080
+ #define PSR_A_BIT 0x00000100
+ #define PSR_D_BIT 0x00000200
++#define PSR_SSBS_BIT 0x00001000
+ #define PSR_PAN_BIT 0x00400000
+ #define PSR_UAO_BIT 0x00800000
+ #define PSR_Q_BIT 0x08000000
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -304,6 +304,14 @@ void __init arm64_enable_wa2_handling(st
+
+ void arm64_set_ssbd_mitigation(bool state)
+ {
++ if (this_cpu_has_cap(ARM64_SSBS)) {
++ if (state)
++ asm volatile(SET_PSTATE_SSBS(0));
++ else
++ asm volatile(SET_PSTATE_SSBS(1));
++ return;
++ }
++
+ switch (psci_ops.conduit) {
+ case PSCI_CONDUIT_HVC:
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
+@@ -328,6 +336,11 @@ static bool has_ssbd_mitigation(const st
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+
++ if (this_cpu_has_cap(ARM64_SSBS)) {
++ required = false;
++ goto out_printmsg;
++ }
++
+ if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
+ ssbd_state = ARM64_SSBD_UNKNOWN;
+ return false;
+@@ -376,7 +389,6 @@ static bool has_ssbd_mitigation(const st
+
+ switch (ssbd_state) {
+ case ARM64_SSBD_FORCE_DISABLE:
+- pr_info_once("%s disabled from command-line\n", entry->desc);
+ arm64_set_ssbd_mitigation(false);
+ required = false;
+ break;
+@@ -389,7 +401,6 @@ static bool has_ssbd_mitigation(const st
+ break;
+
+ case ARM64_SSBD_FORCE_ENABLE:
+- pr_info_once("%s forced from command-line\n", entry->desc);
+ arm64_set_ssbd_mitigation(true);
+ required = true;
+ break;
+@@ -399,6 +410,17 @@ static bool has_ssbd_mitigation(const st
+ break;
+ }
+
++out_printmsg:
++ switch (ssbd_state) {
++ case ARM64_SSBD_FORCE_DISABLE:
++ pr_info_once("%s disabled from command-line\n", entry->desc);
++ break;
++
++ case ARM64_SSBD_FORCE_ENABLE:
++ pr_info_once("%s forced from command-line\n", entry->desc);
++ break;
++ }
++
+ return required;
+ }
+ #endif /* CONFIG_ARM64_SSBD */
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1033,6 +1033,48 @@ static bool runs_at_el2(const struct arm
+ /* static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused) */
+ #endif
+
++#ifdef CONFIG_ARM64_SSBD
++static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
++{
++ if (user_mode(regs))
++ return 1;
++
++ if (instr & BIT(CRm_shift))
++ regs->pstate |= PSR_SSBS_BIT;
++ else
++ regs->pstate &= ~PSR_SSBS_BIT;
++
++ arm64_skip_faulting_instruction(regs, 4);
++ return 0;
++}
++
++static struct undef_hook ssbs_emulation_hook = {
++ .instr_mask = ~(1U << CRm_shift),
++ .instr_val = 0xd500001f | REG_PSTATE_SSBS_IMM,
++ .fn = ssbs_emulation_handler,
++};
++
++static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
++{
++ static bool undef_hook_registered = false;
++ static DEFINE_SPINLOCK(hook_lock);
++
++ spin_lock(&hook_lock);
++ if (!undef_hook_registered) {
++ register_undef_hook(&ssbs_emulation_hook);
++ undef_hook_registered = true;
++ }
++ spin_unlock(&hook_lock);
++
++ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
++ sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
++ arm64_set_ssbd_mitigation(false);
++ } else {
++ arm64_set_ssbd_mitigation(true);
++ }
++}
++#endif /* CONFIG_ARM64_SSBD */
++
+ static const struct arm64_cpu_capabilities arm64_features[] = {
+ {
+ .desc = "GIC system register CPU interface",
+@@ -1192,6 +1234,7 @@ static const struct arm64_cpu_capabiliti
+ .cpu_enable = cpu_enable_hw_dbm,
+ },
+ #endif
++#ifdef CONFIG_ARM64_SSBD
+ {
+ .desc = "Speculative Store Bypassing Safe (SSBS)",
+ .capability = ARM64_SSBS,
+@@ -1201,7 +1244,9 @@ static const struct arm64_cpu_capabiliti
+ .field_pos = ID_AA64PFR1_SSBS_SHIFT,
+ .sign = FTR_UNSIGNED,
+ .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
++ .cpu_enable = cpu_enable_ssbs,
+ },
++#endif
+ {},
+ };
+
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -352,6 +352,10 @@ int copy_thread(unsigned long clone_flag
+ if (IS_ENABLED(CONFIG_ARM64_UAO) &&
+ cpus_have_const_cap(ARM64_HAS_UAO))
+ childregs->pstate |= PSR_UAO_BIT;
++
++ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
++ childregs->pstate |= PSR_SSBS_BIT;
++
+ p->thread.cpu_context.x19 = stack_start;
+ p->thread.cpu_context.x20 = stk_sz;
+ }
+--- a/arch/arm64/kernel/ssbd.c
++++ b/arch/arm64/kernel/ssbd.c
+@@ -3,13 +3,31 @@
+ * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
+ */
+
++#include <linux/compat.h>
+ #include <linux/errno.h>
+ #include <linux/sched.h>
++#include <linux/sched/task_stack.h>
+ #include <linux/thread_info.h>
+ #include <linux/prctl.h>
+
+ #include <asm/cpufeature.h>
+
++static void ssbd_ssbs_enable(struct task_struct *task)
++{
++ u64 val = is_compat_thread(task_thread_info(task)) ?
++ PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
++
++ task_pt_regs(task)->pstate |= val;
++}
++
++static void ssbd_ssbs_disable(struct task_struct *task)
++{
++ u64 val = is_compat_thread(task_thread_info(task)) ?
++ PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
++
++ task_pt_regs(task)->pstate &= ~val;
++}
++
+ /*
+ * prctl interface for SSBD
+ */
+@@ -45,12 +63,14 @@ static int ssbd_prctl_set(struct task_st
+ return -EPERM;
+ task_clear_spec_ssb_disable(task);
+ clear_tsk_thread_flag(task, TIF_SSBD);
++ ssbd_ssbs_enable(task);
+ break;
+ case PR_SPEC_DISABLE:
+ if (state == ARM64_SSBD_FORCE_DISABLE)
+ return -EPERM;
+ task_set_spec_ssb_disable(task);
+ set_tsk_thread_flag(task, TIF_SSBD);
++ ssbd_ssbs_disable(task);
+ break;
+ case PR_SPEC_FORCE_DISABLE:
+ if (state == ARM64_SSBD_FORCE_DISABLE)
+@@ -58,6 +78,7 @@ static int ssbd_prctl_set(struct task_st
+ task_set_spec_ssb_disable(task);
+ task_set_spec_ssb_force_disable(task);
+ set_tsk_thread_flag(task, TIF_SSBD);
++ ssbd_ssbs_disable(task);
+ break;
+ default:
+ return -ERANGE;
diff --git a/patches.suse/arm64-ssbd-Drop-ifdefs-for-PR_SPEC_STORE_BYPASS.patch b/patches.suse/arm64-ssbd-Drop-ifdefs-for-PR_SPEC_STORE_BYPASS.patch
new file mode 100644
index 0000000000..00a2216b92
--- /dev/null
+++ b/patches.suse/arm64-ssbd-Drop-ifdefs-for-PR_SPEC_STORE_BYPASS.patch
@@ -0,0 +1,40 @@
+From: Will Deacon <will.deacon@arm.com>
+Date: Fri, 15 Jun 2018 11:50:42 +0100
+Subject: arm64: ssbd: Drop #ifdefs for PR_SPEC_STORE_BYPASS
+
+Git-commit: 2d1b2a91d56b19636b740ea70c8399d1df249f20
+Patch-mainline: v4.20-rc1
+References: jsc#ECO-561
+
+Now that we're all merged nicely into mainline, there's no need to check
+to see if PR_SPEC_STORE_BYPASS is defined.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Mian Yousaf Kaukab <ykaukab@suse.de>
+---
+ arch/arm64/kernel/ssbd.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/arch/arm64/kernel/ssbd.c b/arch/arm64/kernel/ssbd.c
+index 3432e5ef9f41..07b12c034ec2 100644
+--- a/arch/arm64/kernel/ssbd.c
++++ b/arch/arm64/kernel/ssbd.c
+@@ -11,9 +11,7 @@
+
+ /*
+ * prctl interface for SSBD
+- * FIXME: Drop the below ifdefery once merged in 4.18.
+ */
+-#ifdef PR_SPEC_STORE_BYPASS
+ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
+ {
+ int state = arm64_get_ssbd_state();
+@@ -107,4 +105,3 @@ int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+ return -ENODEV;
+ }
+ }
+-#endif /* PR_SPEC_STORE_BYPASS */
+--
+2.16.4
+
diff --git a/patches.suse/irqchip-gic-v3-its-Fix-LPI-release-for-Multi-MSI-dev.patch b/patches.suse/irqchip-gic-v3-its-Fix-LPI-release-for-Multi-MSI-dev.patch
new file mode 100644
index 0000000000..3354877e6a
--- /dev/null
+++ b/patches.suse/irqchip-gic-v3-its-Fix-LPI-release-for-Multi-MSI-dev.patch
@@ -0,0 +1,53 @@
+From: Marc Zyngier <maz@kernel.org>
+Date: Thu, 5 Sep 2019 14:56:47 +0100
+Subject: irqchip/gic-v3-its: Fix LPI release for Multi-MSI devices
+
+Git-commit: c9c96e30ecaa0aafa225aa1a5392cb7db17c7a82
+Patch-mainline: v5.4-rc1
+References: jsc#ECO-561
+
+When allocating a range of LPIs for a Multi-MSI capable device,
+this allocation extended to the closest power of 2.
+
+But on the release path, the interrupts are released one by
+one. This results in not releasing the "extra" range, leaking
+the its_device. Trying to reprobe the device will then fail.
+
+Fix it by releasing the LPIs the same way we allocate them.
+
+Fixes: 8208d1708b88 ("irqchip/gic-v3-its: Align PCI Multi-MSI allocation on their size")
+Reported-by: Jiaxing Luo <luojiaxing@huawei.com>
+Tested-by: John Garry <john.garry@huawei.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/f5e948aa-e32f-3f74-ae30-31fee06c2a74@huawei.com
+Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
+---
+ drivers/irqchip/irq-gic-v3-its.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 9380aa43493d..62e54f1a248b 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -2641,14 +2641,13 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+ struct its_node *its = its_dev->its;
+ int i;
+
++ bitmap_release_region(its_dev->event_map.lpi_map,
++ its_get_event_id(irq_domain_get_irq_data(domain, virq)),
++ get_count_order(nr_irqs));
++
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *data = irq_domain_get_irq_data(domain,
+ virq + i);
+- u32 event = its_get_event_id(data);
+-
+- /* Mark interrupt index as unused */
+- clear_bit(event, its_dev->event_map.lpi_map);
+-
+ /* Nuke the entry in the domain */
+ irq_domain_reset_irq_data(data);
+ }
+--
+2.16.4
+
diff --git a/patches.suse/irqchip-gic-v3-its-Fix-command-queue-pointer-compari.patch b/patches.suse/irqchip-gic-v3-its-Fix-command-queue-pointer-compari.patch
new file mode 100644
index 0000000000..3141d29f41
--- /dev/null
+++ b/patches.suse/irqchip-gic-v3-its-Fix-command-queue-pointer-compari.patch
@@ -0,0 +1,148 @@
+From: Heyi Guo <guoheyi@huawei.com>
+Date: Mon, 13 May 2019 19:42:06 +0800
+Subject: irqchip/gic-v3-its: Fix command queue pointer comparison bug
+
+Git-commit: a050fa5476d418fc16b25abe168b3d38ba11e13c
+Patch-mainline: v5.2-rc7
+References: jsc#ECO-561
+
+When we run several VMs with PCI passthrough and GICv4 enabled, not
+pinning vCPUs, we will occasionally see below warnings in dmesg:
+
+ITS queue timeout (65440 65504 480)
+ITS cmd its_build_vmovp_cmd failed
+
+The reason for the above issue is that in BUILD_SINGLE_CMD_FUNC:
+1. Post the write command.
+2. Release the lock.
+3. Start to read GITS_CREADR to get the reader pointer.
+4. Compare the reader pointer to the target pointer.
+5. If reader pointer does not reach the target, sleep 1us and continue
+to try.
+
+If we have several processors running the above concurrently, other
+CPUs will post write commands while the 1st CPU is waiting the
+completion. So we may have below issue:
+
+phase 1:
+---rd_idx-----from_idx-----to_idx--0---------
+
+wait 1us:
+
+phase 2:
+--------------from_idx-----to_idx--0-rd_idx--
+
+That is the rd_idx may fly ahead of to_idx, and if in case to_idx is
+near the wrap point, rd_idx will wrap around. So the below condition
+will not be met even after 1s:
+
+if (from_idx < to_idx && rd_idx >= to_idx)
+
+There is another theoretical issue. For a slow and busy ITS, the
+initial rd_idx may fall behind from_idx a lot, just as below:
+
+---rd_idx---0--from_idx-----to_idx-----------
+
+This will cause the wait function exit too early.
+
+Actually, it does not make much sense to use from_idx to judge if
+to_idx is wrapped, but we need a initial rd_idx when lock is still
+acquired, and it can be used to judge whether to_idx is wrapped and
+the current rd_idx is wrapped.
+
+We switch to a method of calculating the delta of two adjacent reads
+and accumulating it to get the sum, so that we can get the real rd_idx
+from the wrapped value even when the queue is almost full.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Jason Cooper <jason@lakedaemon.net>
+Signed-off-by: Heyi Guo <guoheyi@huawei.com>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+
+Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
+---
+ drivers/irqchip/irq-gic-v3-its.c | 35 ++++++++++++++++++++++++-----------
+ 1 file changed, 24 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 1e364d3ad9c5..f0523916232d 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -744,32 +744,43 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
+ }
+
+ static int its_wait_for_range_completion(struct its_node *its,
+- struct its_cmd_block *from,
++ u64 prev_idx,
+ struct its_cmd_block *to)
+ {
+- u64 rd_idx, from_idx, to_idx;
++ u64 rd_idx, to_idx, linear_idx;
+ u32 count = 1000000; /* 1s! */
+
+- from_idx = its_cmd_ptr_to_offset(its, from);
++ /* Linearize to_idx if the command set has wrapped around */
+ to_idx = its_cmd_ptr_to_offset(its, to);
++ if (to_idx < prev_idx)
++ to_idx += ITS_CMD_QUEUE_SZ;
++
++ linear_idx = prev_idx;
+
+ while (1) {
++ s64 delta;
++
+ rd_idx = readl_relaxed(its->base + GITS_CREADR);
+
+- /* Direct case */
+- if (from_idx < to_idx && rd_idx >= to_idx)
+- break;
++ /*
++ * Compute the read pointer progress, taking the
++ * potential wrap-around into account.
++ */
++ delta = rd_idx - prev_idx;
++ if (rd_idx < prev_idx)
++ delta += ITS_CMD_QUEUE_SZ;
+
+- /* Wrapped case */
+- if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
++ linear_idx += delta;
++ if (linear_idx >= to_idx)
+ break;
+
+ count--;
+ if (!count) {
+- pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
+- from_idx, to_idx, rd_idx);
++ pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
++ to_idx, linear_idx);
+ return -1;
+ }
++ prev_idx = rd_idx;
+ cpu_relax();
+ udelay(1);
+ }
+@@ -786,6 +797,7 @@ void name(struct its_node *its, \
+ struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
+ synctype *sync_obj; \
+ unsigned long flags; \
++ u64 rd_idx; \
+ \
+ raw_spin_lock_irqsave(&its->lock, flags); \
+ \
+@@ -807,10 +819,11 @@ void name(struct its_node *its, \
+ } \
+ \
+ post: \
++ rd_idx = readl_relaxed(its->base + GITS_CREADR); \
+ next_cmd = its_post_commands(its); \
+ raw_spin_unlock_irqrestore(&its->lock, flags); \
+ \
+- if (its_wait_for_range_completion(its, cmd, next_cmd)) \
++ if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
+ pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
+ }
+
+--
+2.16.4
+
diff --git a/patches.suse/irqchip-gic-v3-its-Fix-misuse-of-GENMASK-macro.patch b/patches.suse/irqchip-gic-v3-its-Fix-misuse-of-GENMASK-macro.patch
new file mode 100644
index 0000000000..e5323c2dfe
--- /dev/null
+++ b/patches.suse/irqchip-gic-v3-its-Fix-misuse-of-GENMASK-macro.patch
@@ -0,0 +1,35 @@
+From: Joe Perches <joe@perches.com>
+Date: Tue, 9 Jul 2019 22:04:18 -0700
+Subject: irqchip/gic-v3-its: Fix misuse of GENMASK macro
+
+Git-commit: 20faba848752901de23a4d45a1174d64d2069dde
+Patch-mainline: v5.3-rc1
+References: jsc#ECO-561
+
+Arguments are supposed to be ordered high then low.
+
+Signed-off-by: Joe Perches <joe@perches.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Link: https://lkml.kernel.org/r/ab5deb4fc3cd604cb620054770b7d00016d736bc.1562734889.git.joe@perches.com
+Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
+---
+ drivers/irqchip/irq-gic-v3-its.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 35500801dc2b..730fbe0e2a9d 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -185,7 +185,7 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev,
+
+ static struct its_collection *valid_col(struct its_collection *col)
+ {
+- if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
++ if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
+ return NULL;
+
+ return col;
+--
+2.16.4
+
diff --git a/series.conf b/series.conf
index 9c529e63a2..ef0d1bf120 100644
--- a/series.conf
+++ b/series.conf
@@ -24278,6 +24278,15 @@
patches.suse/kvm-s390-vsie-use-read_once-to-access-some-scb-fields.patch
patches.suse/KVM-s390-diagnoses-are-instructions-as-well.patch
patches.suse/KVM-s390-add-vcpu-stat-counters-for-many-instruction.patch
+ patches.suse/KVM-arm-arm64-Detangle-kvm_mmu.h-from-kvm_hyp.h.patch
+ patches.suse/KVM-arm-arm64-Split-dcache-icache-flushing.patch
+ patches.suse/arm64-KVM-Add-invalidate_icache_range-helper.patch
+ patches.suse/arm-KVM-Add-optimized-PIPT-icache-flushing.patch
+ patches.suse/arm64-KVM-PTE-PMD-S2-XN-bit-definition.patch
+ patches.suse/KVM-arm-arm64-Limit-icache-invalidation-to-prefetch-.patch
+ patches.suse/KVM-arm-arm64-Only-clean-the-dcache-on-translation-f.patch
+ patches.suse/KVM-arm-arm64-Preserve-Exec-permission-across-R-W-pe.patch
+ patches.suse/KVM-arm-arm64-Drop-vcpu-parameter-from-guest-cache-m.patch
patches.suse/KVM-arm-arm64-Handle-CPU_PM_ENTER_FAILED.patch
patches.suse/x86-kvm-vmx-do-not-use-vm-exit-instruction-length-fo.patch
patches.suse/msft-hv-1588-x86-hyperv-Check-for-required-priviliges-in-hyperv_i.patch
@@ -28105,6 +28114,7 @@
patches.suse/arm64-kernel-don-t-ban-ADRP-to-work-around-Cortex-A5.patch
patches.suse/0015-arm64-errata-add-REVIDR-handling-to-framework.patch
patches.suse/arm64-kernel-enable-A53-erratum-8434319-handling-at-.patch
+ patches.suse/arm64-Add-support-for-new-control-bits-CTR_EL0.DIC-a.patch
patches.suse/ACPI-IORT-Remove-temporary-iort_get_id_mapping_index.patch
patches.suse/arm64-fix-undefined-reference-to-printk.patch
patches.suse/0018-arm64-Expose-Arm-v8.4-features.patch
@@ -34729,6 +34739,7 @@
patches.suse/scsi-xen-scsifront-add-error-handling-for-xenbus_pri.patch
patches.suse/0001-xen-Remove-unnecessary-BUG_ON-from-__unbind_from_irq.patch
patches.suse/KVM-arm-arm64-Drop-resource-size-check-for-GICV-wind.patch
+ patches.suse/arm64-Introduce-sysreg_clear_set.patch
patches.suse/kvm-enforce-error-in-ioctl-for-compat-tasks-when-kvm_compat
patches.suse/0017-arm64-dma-mapping-clear-buffers-allocated-with-FORCE.patch
patches.suse/0001-arm64-kpti-Use-early_param-for-kpti-command-line-opt.patch
@@ -36056,6 +36067,8 @@
patches.suse/docs-zh_CN-fix-location-of-oops-tracing.txt.patch
patches.suse/MAINTAINERS-fix-location-of-ina2xx.txt-device-tree-f.patch
patches.suse/x86-smp-fix-non-smp-broken-build-due-to-redefinition-of-apic_id_is_primary_thread
+ patches.suse/arm64-Fix-mismatched-cache-line-size-detection.patch
+ patches.suse/arm64-Handle-mismatched-cache-type.patch
patches.suse/0003-arm64-add-PSR_AA32_-definitions.patch
patches.suse/0004-arm64-don-t-zero-DIT-on-signal-return.patch
patches.suse/0005-arm64-compat-map-SPSR_ELx-PSR-for-signals.patch
@@ -36064,6 +36077,9 @@
patches.suse/0008-kvm-arm-use-PSR_AA32-definitions.patch
patches.suse/0009-arm64-remove-unused-COMPAT_PSR-definitions.patch
patches.suse/0019-arm64-numa-rework-ACPI-NUMA-initialization.patch
+ patches.suse/arm64-move-SCTLR_EL-1-2-assertions-to-asm-sysreg.h.patch
+ patches.suse/arm64-kill-config_sctlr_el1.patch
+ patches.suse/arm64-kill-change_cpacr.patch
patches.suse/0008-arm64-export-memblock_reserve-d-regions-via-proc-iom.patch
patches.suse/drivers-acpi-add-dependency-of-EFI-for-arm64.patch
patches.suse/efi-arm-preserve-early-mapping-of-UEFI-memory-map-lo.patch
@@ -40051,6 +40067,7 @@
patches.suse/s390-sles15sp1-00-04-16-KVM-s390-Fix-pfmf-and-conditional-skey-emulation.patch
patches.suse/kvm-s390-vsie-copy-wrapping-keys-to-right-place.patch
patches.suse/s390-sles15sp1-00-04-17-KVM-s390-Properly-lock-mm-context-allow_gmap_hpage_1.patch
+ patches.suse/KVM-arm-arm64-Clean-dcache-to-PoC-when-changing-PTE-.patch
patches.suse/kbuild-make-missing-depmod-a-warning-instead-of-an-error.patch
patches.suse/irqchip-gic-v3-its-Cap-lpi_id_bits-to-reduce-memory-.patch
patches.suse/x86-microcode-make-sure-boot_cpu_data-microcode-is-up-to-date
@@ -40698,11 +40715,32 @@
patches.suse/net-fix-pskb_trim_rcsum_slow-with-odd-trim-offset.patch
patches.suse/net-ipv6-Fix-index-counter-for-unicast-addresses-in-.patch
patches.suse/i2c-rcar-cleanup-DMA-for-all-kinds-of-failure.patch
+ patches.suse/arm64-Fix-silly-typo-in-comment.patch
+ patches.suse/arm64-cpufeature-Detect-SSBS-and-advertise-to-usersp.patch
+ patches.suse/arm64-ssbd-Drop-ifdefs-for-PR_SPEC_STORE_BYPASS.patch
+ patches.suse/arm64-entry-Allow-handling-of-undefined-instructions.patch
+ patches.suse/arm64-ssbd-Add-support-for-PSTATE.SSBS-rather-than-t.patch
+ patches.suse/KVM-arm64-Set-SCTLR_EL2.DSSBS-if-SSBD-is-forcefully-.patch
+ patches.suse/arm64-cpu-Move-errata-and-feature-enable-callbacks-c.patch
+ patches.suse/arm64-force_signal_inject-WARN-if-called-from-kernel.patch
patches.suse/0026-arm64-mm-Support-Common-Not-Private-translations.patch
patches.suse/0028-arm64-KVM-Enable-Common-Not-Private-translations.patch
+ patches.suse/arm64-cpu_errata-Remove-ARM64_MISMATCHED_CACHE_LINE_.patch
patches.suse/0006-arm64-lse-remove-fcall-used-x0-flag.patch
+ patches.suse/arm64-Add-decoding-macros-for-CP15_32-and-CP15_64-tr.patch
+ patches.suse/arm64-compat-Add-separate-CP15-trapping-hook.patch
+ patches.suse/arm64-compat-Add-condition-code-checks-and-IT-advanc.patch
+ patches.suse/arm64-compat-Add-cp15_32-and-cp15_64-handler-arrays.patch
+ patches.suse/arm64-compat-Add-CNTVCT-trap-handler.patch
+ patches.suse/arm64-compat-Add-CNTFRQ-trap-handler.patch
+ patches.suse/arm64-arch_timer-Add-workaround-for-ARM-erratum-1188.patch
patches.suse/0008-arm64-numa-Report-correct-memblock-range-for-the-dum.patch
patches.suse/0009-arm64-numa-Unify-common-error-path-in-numa_init.patch
+ patches.suse/arm64-arch_timer-avoid-unused-function-warning.patch
+ patches.suse/arm64-Add-silicon-errata.txt-entry-for-ARM-erratum-1.patch
+ patches.suse/arm64-cpufeature-ctr-Fix-cpu-capability-check-for-la.patch
+ patches.suse/arm64-cpufeature-Fix-handling-of-CTR_EL0.IDC-field.patch
+ patches.suse/arm64-cpufeature-Trap-CTR_EL0-access-only-where-it-i.patch
patches.suse/block-remove-bio_rewind_iter.patch
patches.suse/block-bfq-correctly-charge-and-reset-entity-service-.patch
patches.suse/block-bfq-inject-other-queue-I-O-into-seeky-idle-que.patch
@@ -45672,6 +45710,7 @@
patches.suse/ASoC-topology-free-created-components-in-tplg-load-e.patch
patches.suse/mm-page_alloc-fix-a-division-by-zero-error-when-boosting-watermarks-v2.patch
patches.suse/tmpfs-fix-link-accounting-when-a-tmpfile-is-linked-i.patch
+ patches.suse/arm64-fix-SSBS-sanitization.patch
patches.suse/libceph-handle-an-empty-authorize-reply.patch
patches.suse/ceph-avoid-repeatedly-adding-inode-to-mdsc-snap_flush_list.patch
patches.suse/clk-sunxi-ng-v3s-Fix-TCON-reset-de-assert-bit.patch
@@ -47382,6 +47421,10 @@
patches.suse/s390-pci-provide-support-for-mio-instructions
patches.suse/s390-pci-add-parameter-to-disable-usage-of-mio-instructions
patches.suse/s390-enable-processes-for-mio-instructions
+ patches.suse/arm64-Restrict-ARM64_ERRATUM_1188873-mitigation-to-A.patch
+ patches.suse/arm64-Make-ARM64_ERRATUM_1188873-depend-on-COMPAT.patch
+ patches.suse/arm64-Add-part-number-for-Neoverse-N1.patch
+ patches.suse/arm64-Apply-ARM64_ERRATUM_1188873-to-Neoverse-N1.patch
patches.suse/ACPI-button-reinitialize-button-state-upon-resume.patch
patches.suse/0002-cpufreq-qoriq-add-support-for-lx2160a.patch
patches.suse/cpufreq-kirkwood-fix-possible-object-reference-leak.patch
@@ -48473,6 +48516,7 @@
patches.suse/mmc-sdhci-iproc-Set-NO_HISPD-bit-to-fix-HS50-data-ho.patch
patches.suse/platform-x86-pmc_atom-Add-Lex-3I380D-industrial-PC-t.patch
patches.suse/platform-x86-pmc_atom-Add-several-Beckhoff-Automatio.patch
+ patches.suse/arm64-Handle-erratum-1418040-as-a-superset-of-erratu.patch
patches.suse/bio-fix-improper-use-of-smp_mb__before_atomic.patch
patches.suse/sbitmap-fix-improper-use-of-smp_mb__before_atomic.patch
patches.suse/blk-mq-fix-hang-caused-by-freeze-unfreeze-sequence.patch
@@ -48793,6 +48837,7 @@
patches.suse/mm-soft-offline-return-EBUSY-if-set_hwpoison_free_bu.patch
patches.suse/mm-hugetlb-soft-offline-dissolve_free_huge_page-retu.patch
patches.suse/efi-bgrt-Drop-BGRT-status-field-reserved-bits-check.patch
+ patches.suse/irqchip-gic-v3-its-Fix-command-queue-pointer-compari.patch
patches.suse/x86-microcode-fix-the-microcode-load-on-cpu-hotplug-for-real.patch
patches.suse/x86-speculation-allow-guests-to-use-ssbd-even-if-host-does-not.patch
patches.suse/cpu-speculation-warn-on-unsupported-mitigations-parameter.patch
@@ -48821,6 +48866,7 @@
patches.suse/kvm-lapic-fix-pending-interrupt-in-irr-blocked-by-software-disable-lapic
patches.suse/scsi-target-iblock-fix-overrun-in-write-same-emulation
patches.suse/dmaengine-imx-sdma-remove-BD_INTR-for-channel0.patch
+ patches.suse/arm64-cpufeature-Convert-hook_lock-to-raw_spin_lock_.patch
patches.suse/acpi-arm64-ignore-5.1-FADTs-that-are-reported-as-5.0.patch
patches.suse/s390-jump_label-replace-stop_machine-with-smp_call_f.patch
patches.suse/0001-s390-qdio-handle-PENDING-state-for-QEBSM-devices.patch
@@ -49176,6 +49222,7 @@
patches.suse/nfc-fix-potential-illegal-memory-access.patch
patches.suse/tcp-Reset-bytes_acked-and-bytes_received-when-discon.patch
patches.suse/net-tls-fix-socket-wmem-accounting-on-fallback-with-.patch
+ patches.suse/irqchip-gic-v3-its-Fix-misuse-of-GENMASK-macro.patch
patches.suse/scsi-hpsa-correct-simple-mode
patches.suse/scsi-hpsa-correct-device-resets
patches.suse/scsi-megaraid_sas-IRQ-poll-to-avoid-CPU-hard-lockups.patch
@@ -49575,6 +49622,7 @@
patches.suse/bcache-fix-possible-memory-leak-in-bch_cached_dev_ru.patch
patches.suse/nvme-fix-memory-leak-caused-by-incorrect-subsystem-free.patch
patches.suse/nvme-fix-multipath-crash-when-ANA-is-deactivated.patch
+ patches.suse/arm64-Force-SSBS-on-context-switch.patch
patches.suse/ACPI-IORT-Fix-off-by-one-check-in-iort_dev_find_its_.patch
patches.suse/drm-silence-variable-conn-set-but-not-used.patch
patches.suse/drm-amd-display-Wait-for-backlight-programming-compl.patch
@@ -49938,6 +49986,7 @@
patches.suse/efi-cper-print-AER-info-of-PCIe-fatal-error.patch
patches.suse/platform-x86-pmc_atom-Add-Siemens-SIMATIC-IPC227E-to.patch
patches.suse/alarmtimer-Use-EOPNOTSUPP-instead-of-ENOTSUPP.patch
+ patches.suse/irqchip-gic-v3-its-Fix-LPI-release-for-Multi-MSI-dev.patch
patches.suse/s390-add-support-for-ibm-z15-machines
patches.suse/s390-pci-fix-msi-message-data
patches.suse/blk-mq-introduce-blk_mq_request_completed.patch
@@ -50934,6 +50983,10 @@
patches.suse/iommu-amd-Apply-the-same-IVRS-IOAPIC-workaround-to-A.patch
+ patches.suse/arm64-errata-Hide-CTR_EL0.DIC-on-systems-affected-by.patch
+ patches.suse/arm64-Fake-the-IminLine-size-on-systems-affected-by-.patch
+ patches.suse/arm64-compat-Workaround-Neoverse-N1-1542419-for-comp.patch
+
########################################################
# Filesystem
########################################################