Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2018-04-11 07:37:06 +0200
committerTakashi Iwai <tiwai@suse.de>2018-04-11 07:37:06 +0200
commit56553a9dc7b1fed1cb69b429ceccb0794b767328 (patch)
tree8331cf0002ab6be025a1e4bfbfb4dc48d281d690
parente7986b52ce287ab8f5b444c9ed5c8a1ab4b75efc (diff)
parent3b29e62d6c7ac5b37b4ed43a79ce3d206afda483 (diff)
Merge branch 'users/mbrugger/SLE12-SP3/for-next' into SLE12-SP3
Pull arm64 fixes from Matthias Brugger (bsc#1088050)
-rw-r--r--patches.arch/0001-arm64-KVM-Increment-PC-after-handling-an-SMC-trap.patch49
-rw-r--r--patches.arch/0002-arm-arm64-KVM-Consolidate-the-PSCI-include-files.patch178
-rw-r--r--patches.arch/0003-arm-arm64-KVM-Add-PSCI_VERSION-helper.patch78
-rw-r--r--patches.arch/0004-arm-arm64-KVM-Add-smccc-accessors-to-PSCI-code.patch140
-rw-r--r--patches.arch/0005-arm-arm64-KVM-Implement-PSCI-1.0-support.patch113
-rw-r--r--patches.arch/0006-arm-arm64-KVM-Advertise-SMCCC-v1.1.patch131
-rw-r--r--patches.arch/0007-arm-arm64-KVM-Turn-kvm_psci_version-into-a-static-in.patch139
-rw-r--r--patches.arch/0008-arm64-KVM-Report-SMCCC_ARCH_WORKAROUND_1-BP-hardenin.patch90
-rw-r--r--patches.arch/0009-arm64-KVM-Add-SMCCC_ARCH_WORKAROUND_1-fast-handling.patch71
-rw-r--r--patches.arch/0010-firmware-psci-Expose-PSCI-conduit.patch112
-rw-r--r--patches.arch/0011-firmware-psci-Expose-SMCCC-version-through-psci_ops.patch101
-rw-r--r--patches.arch/0012-arm-arm64-smccc-Make-function-identifiers-an-unsigne.patch48
-rw-r--r--patches.arch/0013-arm-arm64-smccc-Implement-SMCCC-v1.1-inline-primitiv.patch170
-rw-r--r--patches.arch/0014-arm64-Add-ARM_SMCCC_ARCH_WORKAROUND_1-BP-hardening-s.patch160
-rw-r--r--patches.arch/0015-arm64-Kill-PSCI_GET_VERSION-as-a-variant-2-workaroun.patch216
-rw-r--r--patches.arch/arm64-Relax-ARM_SMCCC_ARCH_WORKAROUND_1-discovery.patch48
-rw-r--r--patches.arch/arm64-capabilities-Handle-duplicate-entries-for-a-ca.patch107
-rw-r--r--patches.arch/arm64-cpufeature-__this_cpu_has_cap-shouldn-t-stop-e.patch45
-rw-r--r--patches.arch/arm64-fix-smccc-compilation.patch54
-rw-r--r--patches.arch/arm64-mm-fix-thinko-in-non-global-page-table-attribu.patch55
-rw-r--r--patches.drivers/net-hns-Fix-ethtool-private-flags.patch80
-rw-r--r--patches.drivers/watchdog-sbsa-use-32-bit-read-for-WCV.patch53
-rw-r--r--series.conf27
23 files changed, 2265 insertions, 0 deletions
diff --git a/patches.arch/0001-arm64-KVM-Increment-PC-after-handling-an-SMC-trap.patch b/patches.arch/0001-arm64-KVM-Increment-PC-after-handling-an-SMC-trap.patch
new file mode 100644
index 0000000000..150705b08d
--- /dev/null
+++ b/patches.arch/0001-arm64-KVM-Increment-PC-after-handling-an-SMC-trap.patch
@@ -0,0 +1,49 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:07 +0000
+Subject: arm64: KVM: Increment PC after handling an SMC trap
+Git-commit: f5115e8869e1dfafac0e414b4f1664f3a84a4683
+Patch-mainline: v4.16-rc1
+References: bsc#1068032
+
+When handling an SMC trap, the "preferred return address" is set
+to that of the SMC, and not the next PC (which is a departure from
+the behaviour of an SMC that isn't trapped).
+
+Increment PC in the handler, as the guest is otherwise forever
+stuck...
+
+Cc: stable@vger.kernel.org
+Fixes: acfb3b883f6d ("arm64: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls")
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/arm64/kvm/handle_exit.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
+index 520b0dad3c62..5493bbefbd0d 100644
+--- a/arch/arm64/kvm/handle_exit.c
++++ b/arch/arm64/kvm/handle_exit.c
+@@ -62,7 +62,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+
+ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ {
++ /*
++ * "If an SMC instruction executed at Non-secure EL1 is
++ * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
++ * Trap exception, not a Secure Monitor Call exception [...]"
++ *
++ * We need to advance the PC after the trap, as it would
++ * otherwise return to the same address...
++ */
+ vcpu_set_reg(vcpu, 0, ~0UL);
++ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+ return 1;
+ }
+
+--
+2.16.2
+
diff --git a/patches.arch/0002-arm-arm64-KVM-Consolidate-the-PSCI-include-files.patch b/patches.arch/0002-arm-arm64-KVM-Consolidate-the-PSCI-include-files.patch
new file mode 100644
index 0000000000..3f1ce9c5b2
--- /dev/null
+++ b/patches.arch/0002-arm-arm64-KVM-Consolidate-the-PSCI-include-files.patch
@@ -0,0 +1,178 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:08 +0000
+Subject: arm/arm64: KVM: Consolidate the PSCI include files
+Git-commit: 1a2fb94e6a771ff94f4afa22497a4695187b820c
+Patch-mainline: v4.16-rc1
+References: bsc#1068032
+
+As we're about to update the PSCI support, and because I'm lazy,
+let's move the PSCI include file to include/kvm so that both
+ARM architectures can find it.
+
+Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/arm/include/asm/kvm_psci.h | 27 ---------------------------
+ arch/arm/kvm/handle_exit.c | 2 +-
+ arch/arm64/include/asm/kvm_psci.h | 27 ---------------------------
+ arch/arm64/kvm/handle_exit.c | 3 ++-
+ include/kvm/arm_psci.h | 27 +++++++++++++++++++++++++++
+ virt/kvm/arm/arm.c | 2 +-
+ virt/kvm/arm/psci.c | 3 ++-
+ 7 files changed, 33 insertions(+), 58 deletions(-)
+ delete mode 100644 arch/arm/include/asm/kvm_psci.h
+ rename arch/arm64/include/asm/kvm_psci.h => include/kvm/arm_psci.h (89%)
+
+--- a/arch/arm/include/asm/kvm_psci.h
++++ /dev/null
+@@ -1,27 +0,0 @@
+-/*
+- * Copyright (C) 2012 - ARM Ltd
+- * Author: Marc Zyngier <marc.zyngier@arm.com>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+- */
+-
+-#ifndef __ARM_KVM_PSCI_H__
+-#define __ARM_KVM_PSCI_H__
+-
+-#define KVM_ARM_PSCI_0_1 1
+-#define KVM_ARM_PSCI_0_2 2
+-
+-int kvm_psci_version(struct kvm_vcpu *vcpu);
+-int kvm_psci_call(struct kvm_vcpu *vcpu);
+-
+-#endif /* __ARM_KVM_PSCI_H__ */
+--- a/arch/arm/kvm/handle_exit.c
++++ b/arch/arm/kvm/handle_exit.c
+@@ -21,7 +21,7 @@
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_coproc.h>
+ #include <asm/kvm_mmu.h>
+-#include <asm/kvm_psci.h>
++#include <kvm/arm_psci.h>
+ #include <trace/events/kvm.h>
+
+ #include "trace.h"
+--- a/arch/arm64/include/asm/kvm_psci.h
++++ /dev/null
+@@ -1,27 +0,0 @@
+-/*
+- * Copyright (C) 2012,2013 - ARM Ltd
+- * Author: Marc Zyngier <marc.zyngier@arm.com>
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License version 2 as
+- * published by the Free Software Foundation.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+- */
+-
+-#ifndef __ARM64_KVM_PSCI_H__
+-#define __ARM64_KVM_PSCI_H__
+-
+-#define KVM_ARM_PSCI_0_1 1
+-#define KVM_ARM_PSCI_0_2 2
+-
+-int kvm_psci_version(struct kvm_vcpu *vcpu);
+-int kvm_psci_call(struct kvm_vcpu *vcpu);
+-
+-#endif /* __ARM64_KVM_PSCI_H__ */
+--- a/arch/arm64/kvm/handle_exit.c
++++ b/arch/arm64/kvm/handle_exit.c
+@@ -22,12 +22,13 @@
+ #include <linux/kvm.h>
+ #include <linux/kvm_host.h>
+
++#include <kvm/arm_psci.h>
++
+ #include <asm/esr.h>
+ #include <asm/kvm_asm.h>
+ #include <asm/kvm_coproc.h>
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_mmu.h>
+-#include <asm/kvm_psci.h>
+
+ #define CREATE_TRACE_POINTS
+ #include "trace.h"
+--- /dev/null
++++ b/include/kvm/arm_psci.h
+@@ -0,0 +1,27 @@
++/*
++ * Copyright (C) 2012,2013 - ARM Ltd
++ * Author: Marc Zyngier <marc.zyngier@arm.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ */
++
++#ifndef __KVM_ARM_PSCI_H__
++#define __KVM_ARM_PSCI_H__
++
++#define KVM_ARM_PSCI_0_1 1
++#define KVM_ARM_PSCI_0_2 2
++
++int kvm_psci_version(struct kvm_vcpu *vcpu);
++int kvm_psci_call(struct kvm_vcpu *vcpu);
++
++#endif /* __KVM_ARM_PSCI_H__ */
+--- a/virt/kvm/arm/arm.c
++++ b/virt/kvm/arm/arm.c
+@@ -29,6 +29,7 @@
+ #include <linux/kvm.h>
+ #include <trace/events/kvm.h>
+ #include <kvm/arm_pmu.h>
++#include <kvm/arm_psci.h>
+
+ #define CREATE_TRACE_POINTS
+ #include "trace.h"
+@@ -44,7 +45,6 @@
+ #include <asm/kvm_mmu.h>
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_coproc.h>
+-#include <asm/kvm_psci.h>
+ #include <asm/sections.h>
+
+ #ifdef REQUIRES_VIRT
+--- a/virt/kvm/arm/psci.c
++++ b/virt/kvm/arm/psci.c
+@@ -21,9 +21,10 @@
+
+ #include <asm/cputype.h>
+ #include <asm/kvm_emulate.h>
+-#include <asm/kvm_psci.h>
+ #include <asm/kvm_host.h>
+
++#include <kvm/arm_psci.h>
++
+ #include <uapi/linux/psci.h>
+
+ /*
diff --git a/patches.arch/0003-arm-arm64-KVM-Add-PSCI_VERSION-helper.patch b/patches.arch/0003-arm-arm64-KVM-Add-PSCI_VERSION-helper.patch
new file mode 100644
index 0000000000..a53d8bae4b
--- /dev/null
+++ b/patches.arch/0003-arm-arm64-KVM-Add-PSCI_VERSION-helper.patch
@@ -0,0 +1,78 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:09 +0000
+Subject: arm/arm64: KVM: Add PSCI_VERSION helper
+Git-commit: d0a144f12a7ca8368933eae6583c096c363ec506
+Patch-mainline: v4.16-rc1
+References: bsc#1068032
+
+As we're about to trigger a PSCI version explosion, it doesn't
+hurt to introduce a PSCI_VERSION helper that is going to be
+used everywhere.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ include/kvm/arm_psci.h | 6 ++++--
+ include/uapi/linux/psci.h | 3 +++
+ virt/kvm/arm/psci.c | 4 +---
+ 3 files changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
+index 2042bb909474..5659343580a3 100644
+--- a/include/kvm/arm_psci.h
++++ b/include/kvm/arm_psci.h
+@@ -18,8 +18,10 @@
+ #ifndef __KVM_ARM_PSCI_H__
+ #define __KVM_ARM_PSCI_H__
+
+-#define KVM_ARM_PSCI_0_1 1
+-#define KVM_ARM_PSCI_0_2 2
++#include <uapi/linux/psci.h>
++
++#define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1)
++#define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2)
+
+ int kvm_psci_version(struct kvm_vcpu *vcpu);
+ int kvm_psci_call(struct kvm_vcpu *vcpu);
+diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h
+index 760e52a9640f..b3bcabe380da 100644
+--- a/include/uapi/linux/psci.h
++++ b/include/uapi/linux/psci.h
+@@ -88,6 +88,9 @@
+ (((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
+ #define PSCI_VERSION_MINOR(ver) \
+ ((ver) & PSCI_VERSION_MINOR_MASK)
++#define PSCI_VERSION(maj, min) \
++ ((((maj) << PSCI_VERSION_MAJOR_SHIFT) & PSCI_VERSION_MAJOR_MASK) | \
++ ((min) & PSCI_VERSION_MINOR_MASK))
+
+ /* PSCI features decoding (>=1.0) */
+ #define PSCI_1_0_FEATURES_CPU_SUSPEND_PF_SHIFT 1
+diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
+index b322e46fd142..999f94d6bb98 100644
+--- a/virt/kvm/arm/psci.c
++++ b/virt/kvm/arm/psci.c
+@@ -25,8 +25,6 @@
+
+ #include <kvm/arm_psci.h>
+
+-#include <uapi/linux/psci.h>
+-
+ /*
+ * This is an implementation of the Power State Coordination Interface
+ * as described in ARM document number ARM DEN 0022A.
+@@ -222,7 +220,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ * Bits[31:16] = Major Version = 0
+ * Bits[15:0] = Minor Version = 2
+ */
+- val = 2;
++ val = KVM_ARM_PSCI_0_2;
+ break;
+ case PSCI_0_2_FN_CPU_SUSPEND:
+ case PSCI_0_2_FN64_CPU_SUSPEND:
+--
+2.16.2
+
diff --git a/patches.arch/0004-arm-arm64-KVM-Add-smccc-accessors-to-PSCI-code.patch b/patches.arch/0004-arm-arm64-KVM-Add-smccc-accessors-to-PSCI-code.patch
new file mode 100644
index 0000000000..0ebcb54a6b
--- /dev/null
+++ b/patches.arch/0004-arm-arm64-KVM-Add-smccc-accessors-to-PSCI-code.patch
@@ -0,0 +1,140 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:10 +0000
+Subject: arm/arm64: KVM: Add smccc accessors to PSCI code
+Git-commit: 84684fecd7ea381824a96634a027b7719587fb77
+Patch-mainline: v4.16-rc1
+References: bsc#1068032
+
+Instead of open coding the accesses to the various registers,
+let's add explicit SMCCC accessors.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ virt/kvm/arm/psci.c | 52 ++++++++++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 42 insertions(+), 10 deletions(-)
+
+diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
+index 999f94d6bb98..c41553d35110 100644
+--- a/virt/kvm/arm/psci.c
++++ b/virt/kvm/arm/psci.c
+@@ -32,6 +32,38 @@
+
+ #define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
+
++static u32 smccc_get_function(struct kvm_vcpu *vcpu)
++{
++ return vcpu_get_reg(vcpu, 0);
++}
++
++static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
++{
++ return vcpu_get_reg(vcpu, 1);
++}
++
++static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
++{
++ return vcpu_get_reg(vcpu, 2);
++}
++
++static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
++{
++ return vcpu_get_reg(vcpu, 3);
++}
++
++static void smccc_set_retval(struct kvm_vcpu *vcpu,
++ unsigned long a0,
++ unsigned long a1,
++ unsigned long a2,
++ unsigned long a3)
++{
++ vcpu_set_reg(vcpu, 0, a0);
++ vcpu_set_reg(vcpu, 1, a1);
++ vcpu_set_reg(vcpu, 2, a2);
++ vcpu_set_reg(vcpu, 3, a3);
++}
++
+ static unsigned long psci_affinity_mask(unsigned long affinity_level)
+ {
+ if (affinity_level <= 3)
+@@ -77,7 +109,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+ unsigned long context_id;
+ phys_addr_t target_pc;
+
+- cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
++ cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
+ if (vcpu_mode_is_32bit(source_vcpu))
+ cpu_id &= ~((u32) 0);
+
+@@ -96,8 +128,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+ return PSCI_RET_INVALID_PARAMS;
+ }
+
+- target_pc = vcpu_get_reg(source_vcpu, 2);
+- context_id = vcpu_get_reg(source_vcpu, 3);
++ target_pc = smccc_get_arg2(source_vcpu);
++ context_id = smccc_get_arg3(source_vcpu);
+
+ kvm_reset_vcpu(vcpu);
+
+@@ -116,7 +148,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+ * NOTE: We always update r0 (or x0) because for PSCI v0.1
+ * the general puspose registers are undefined upon CPU_ON.
+ */
+- vcpu_set_reg(vcpu, 0, context_id);
++ smccc_set_retval(vcpu, context_id, 0, 0, 0);
+ vcpu->arch.power_off = false;
+ smp_mb(); /* Make sure the above is visible */
+
+@@ -136,8 +168,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_vcpu *tmp;
+
+- target_affinity = vcpu_get_reg(vcpu, 1);
+- lowest_affinity_level = vcpu_get_reg(vcpu, 2);
++ target_affinity = smccc_get_arg1(vcpu);
++ lowest_affinity_level = smccc_get_arg2(vcpu);
+
+ /* Determine target affinity mask */
+ target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
+@@ -210,7 +242,7 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
+ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ {
+ struct kvm *kvm = vcpu->kvm;
+- unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
++ u32 psci_fn = smccc_get_function(vcpu);
+ unsigned long val;
+ int ret = 1;
+
+@@ -277,14 +309,14 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ break;
+ }
+
+- vcpu_set_reg(vcpu, 0, val);
++ smccc_set_retval(vcpu, val, 0, 0, 0);
+ return ret;
+ }
+
+ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
+ {
+ struct kvm *kvm = vcpu->kvm;
+- unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
++ u32 psci_fn = smccc_get_function(vcpu);
+ unsigned long val;
+
+ switch (psci_fn) {
+@@ -302,7 +334,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
+ break;
+ }
+
+- vcpu_set_reg(vcpu, 0, val);
++ smccc_set_retval(vcpu, val, 0, 0, 0);
+ return 1;
+ }
+
+--
+2.16.2
+
diff --git a/patches.arch/0005-arm-arm64-KVM-Implement-PSCI-1.0-support.patch b/patches.arch/0005-arm-arm64-KVM-Implement-PSCI-1.0-support.patch
new file mode 100644
index 0000000000..c03d12bf4e
--- /dev/null
+++ b/patches.arch/0005-arm-arm64-KVM-Implement-PSCI-1.0-support.patch
@@ -0,0 +1,113 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:11 +0000
+Subject: arm/arm64: KVM: Implement PSCI 1.0 support
+Git-commit: 58e0b2239a4d997094ba63986ef4de29ddc91d87
+Patch-mainline: v4.16-rc1
+References: bsc#1068032
+
+PSCI 1.0 can be trivially implemented by providing the FEATURES
+call on top of PSCI 0.2 and returning 1.0 as the PSCI version.
+
+We happily ignore everything else, as they are either optional or
+are clarifications that do not require any additional change.
+
+PSCI 1.0 is now the default until we decide to add a userspace
+selection API.
+
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ include/kvm/arm_psci.h | 3 +++
+ virt/kvm/arm/psci.c | 45 ++++++++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 47 insertions(+), 1 deletion(-)
+
+diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
+index 5659343580a3..32360432cff5 100644
+--- a/include/kvm/arm_psci.h
++++ b/include/kvm/arm_psci.h
+@@ -22,6 +22,9 @@
+
+ #define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1)
+ #define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2)
++#define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0)
++
++#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0
+
+ int kvm_psci_version(struct kvm_vcpu *vcpu);
+ int kvm_psci_call(struct kvm_vcpu *vcpu);
+diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
+index c41553d35110..3e7c63e15f04 100644
+--- a/virt/kvm/arm/psci.c
++++ b/virt/kvm/arm/psci.c
+@@ -234,7 +234,7 @@ static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
+ int kvm_psci_version(struct kvm_vcpu *vcpu)
+ {
+ if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
+- return KVM_ARM_PSCI_0_2;
++ return KVM_ARM_PSCI_LATEST;
+
+ return KVM_ARM_PSCI_0_1;
+ }
+@@ -313,6 +313,47 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ return ret;
+ }
+
++static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
++{
++ u32 psci_fn = smccc_get_function(vcpu);
++ u32 feature;
++ unsigned long val;
++ int ret = 1;
++
++ switch(psci_fn) {
++ case PSCI_0_2_FN_PSCI_VERSION:
++ val = KVM_ARM_PSCI_1_0;
++ break;
++ case PSCI_1_0_FN_PSCI_FEATURES:
++ feature = smccc_get_arg1(vcpu);
++ switch(feature) {
++ case PSCI_0_2_FN_PSCI_VERSION:
++ case PSCI_0_2_FN_CPU_SUSPEND:
++ case PSCI_0_2_FN64_CPU_SUSPEND:
++ case PSCI_0_2_FN_CPU_OFF:
++ case PSCI_0_2_FN_CPU_ON:
++ case PSCI_0_2_FN64_CPU_ON:
++ case PSCI_0_2_FN_AFFINITY_INFO:
++ case PSCI_0_2_FN64_AFFINITY_INFO:
++ case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
++ case PSCI_0_2_FN_SYSTEM_OFF:
++ case PSCI_0_2_FN_SYSTEM_RESET:
++ case PSCI_1_0_FN_PSCI_FEATURES:
++ val = 0;
++ break;
++ default:
++ val = PSCI_RET_NOT_SUPPORTED;
++ break;
++ }
++ break;
++ default:
++ return kvm_psci_0_2_call(vcpu);
++ }
++
++ smccc_set_retval(vcpu, val, 0, 0, 0);
++ return ret;
++}
++
+ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
+ {
+ struct kvm *kvm = vcpu->kvm;
+@@ -355,6 +396,8 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
+ int kvm_psci_call(struct kvm_vcpu *vcpu)
+ {
+ switch (kvm_psci_version(vcpu)) {
++ case KVM_ARM_PSCI_1_0:
++ return kvm_psci_1_0_call(vcpu);
+ case KVM_ARM_PSCI_0_2:
+ return kvm_psci_0_2_call(vcpu);
+ case KVM_ARM_PSCI_0_1:
+--
+2.16.2
+
diff --git a/patches.arch/0006-arm-arm64-KVM-Advertise-SMCCC-v1.1.patch b/patches.arch/0006-arm-arm64-KVM-Advertise-SMCCC-v1.1.patch
new file mode 100644
index 0000000000..cbd261355a
--- /dev/null
+++ b/patches.arch/0006-arm-arm64-KVM-Advertise-SMCCC-v1.1.patch
@@ -0,0 +1,131 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:12 +0000
+Subject: arm/arm64: KVM: Advertise SMCCC v1.1
+Git-commit: 09e6be12effdb33bf7210c8867bbd213b66a499e
+Patch-mainline: v4.16-rc1
+References: bsc#1068032
+
+The new SMC Calling Convention (v1.1) allows for a reduced overhead
+when calling into the firmware, and provides a new feature discovery
+mechanism.
+
+Make it visible to KVM guests.
+
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/arm/kvm/handle_exit.c | 2 +-
+ arch/arm64/kvm/handle_exit.c | 2 +-
+ include/kvm/arm_psci.h | 2 +-
+ include/linux/arm-smccc.h | 13 +++++++++++++
+ virt/kvm/arm/psci.c | 24 +++++++++++++++++++++++-
+ 5 files changed, 39 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/kvm/handle_exit.c
++++ b/arch/arm/kvm/handle_exit.c
+@@ -36,7 +36,7 @@ static int handle_hvc(struct kvm_vcpu *v
+ kvm_vcpu_hvc_get_imm(vcpu));
+ vcpu->stat.hvc_exit_stat++;
+
+- ret = kvm_psci_call(vcpu);
++ ret = kvm_hvc_call_handler(vcpu);
+ if (ret < 0) {
+ vcpu_set_reg(vcpu, 0, ~0UL);
+ return 1;
+--- a/arch/arm64/kvm/handle_exit.c
++++ b/arch/arm64/kvm/handle_exit.c
+@@ -43,7 +43,7 @@ static int handle_hvc(struct kvm_vcpu *v
+ kvm_vcpu_hvc_get_imm(vcpu));
+ vcpu->stat.hvc_exit_stat++;
+
+- ret = kvm_psci_call(vcpu);
++ ret = kvm_hvc_call_handler(vcpu);
+ if (ret < 0) {
+ vcpu_set_reg(vcpu, 0, ~0UL);
+ return 1;
+--- a/include/kvm/arm_psci.h
++++ b/include/kvm/arm_psci.h
+@@ -27,6 +27,6 @@
+ #define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0
+
+ int kvm_psci_version(struct kvm_vcpu *vcpu);
+-int kvm_psci_call(struct kvm_vcpu *vcpu);
++int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
+
+ #endif /* __KVM_ARM_PSCI_H__ */
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -60,6 +60,19 @@
+ #define ARM_SMCCC_OWNER_TRUSTED_OS 50
+ #define ARM_SMCCC_OWNER_TRUSTED_OS_END 63
+
++#define ARM_SMCCC_VERSION_1_0 0x10000
++#define ARM_SMCCC_VERSION_1_1 0x10001
++
++#define ARM_SMCCC_VERSION_FUNC_ID \
++ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
++ ARM_SMCCC_SMC_32, \
++ 0, 0)
++
++#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \
++ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
++ ARM_SMCCC_SMC_32, \
++ 0, 1)
++
+ /**
+ * struct arm_smccc_res - Result from SMC/HVC call
+ * @a0-a3 result values from registers 0 to 3
+--- a/virt/kvm/arm/psci.c
++++ b/virt/kvm/arm/psci.c
+@@ -15,6 +15,7 @@
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
++#include <linux/arm-smccc.h>
+ #include <linux/preempt.h>
+ #include <linux/kvm_host.h>
+ #include <linux/wait.h>
+@@ -337,6 +338,7 @@ static int kvm_psci_1_0_call(struct kvm_
+ case PSCI_0_2_FN_SYSTEM_OFF:
+ case PSCI_0_2_FN_SYSTEM_RESET:
+ case PSCI_1_0_FN_PSCI_FEATURES:
++ case ARM_SMCCC_VERSION_FUNC_ID:
+ val = 0;
+ break;
+ default:
+@@ -391,7 +393,7 @@ static int kvm_psci_0_1_call(struct kvm_
+ * Errors:
+ * -EINVAL: Unrecognized PSCI function
+ */
+-int kvm_psci_call(struct kvm_vcpu *vcpu)
++static int kvm_psci_call(struct kvm_vcpu *vcpu)
+ {
+ switch (kvm_psci_version(vcpu)) {
+ case KVM_ARM_PSCI_1_0:
+@@ -404,3 +406,23 @@ int kvm_psci_call(struct kvm_vcpu *vcpu)
+ return -EINVAL;
+ };
+ }
++
++int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
++{
++ u32 func_id = smccc_get_function(vcpu);
++ u32 val = PSCI_RET_NOT_SUPPORTED;
++
++ switch (func_id) {
++ case ARM_SMCCC_VERSION_FUNC_ID:
++ val = ARM_SMCCC_VERSION_1_1;
++ break;
++ case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
++ /* Nothing supported yet */
++ break;
++ default:
++ return kvm_psci_call(vcpu);
++ }
++
++ smccc_set_retval(vcpu, val, 0, 0, 0);
++ return 1;
++}
diff --git a/patches.arch/0007-arm-arm64-KVM-Turn-kvm_psci_version-into-a-static-in.patch b/patches.arch/0007-arm-arm64-KVM-Turn-kvm_psci_version-into-a-static-in.patch
new file mode 100644
index 0000000000..c7455fb9dd
--- /dev/null
+++ b/patches.arch/0007-arm-arm64-KVM-Turn-kvm_psci_version-into-a-static-in.patch
@@ -0,0 +1,139 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:13 +0000
+Subject: arm/arm64: KVM: Turn kvm_psci_version into a static inline
+Git-commit: a4097b351118e821841941a79ec77d3ce3f1c5d9
+Patch-mainline: v4.16-rc1
+References: bsc#1068032
+
+We're about to need kvm_psci_version in HYP too. So let's turn it
+into a static inline, and pass the kvm structure as a second
+parameter (so that HYP can do a kern_hyp_va on it).
+
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/arm64/kvm/hyp/switch.c | 20 ++++++++++++--------
+ include/kvm/arm_psci.h | 21 ++++++++++++++++++++-
+ virt/kvm/arm/psci.c | 12 ++----------
+ 3 files changed, 34 insertions(+), 19 deletions(-)
+
+diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
+index 036e1f3d77a6..408c04d789a5 100644
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -19,6 +19,8 @@
+ #include <linux/jump_label.h>
+ #include <uapi/linux/psci.h>
+
++#include <kvm/arm_psci.h>
++
+ #include <asm/kvm_asm.h>
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_hyp.h>
+@@ -350,14 +352,16 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
+
+ if (exit_code == ARM_EXCEPTION_TRAP &&
+ (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC64 ||
+- kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32) &&
+- vcpu_get_reg(vcpu, 0) == PSCI_0_2_FN_PSCI_VERSION) {
+- u64 val = PSCI_RET_NOT_SUPPORTED;
+- if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
+- val = 2;
+-
+- vcpu_set_reg(vcpu, 0, val);
+- goto again;
++ kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32)) {
++ u32 val = vcpu_get_reg(vcpu, 0);
++
++ if (val == PSCI_0_2_FN_PSCI_VERSION) {
++ val = kvm_psci_version(vcpu, kern_hyp_va(vcpu->kvm));
++ if (unlikely(val == KVM_ARM_PSCI_0_1))
++ val = PSCI_RET_NOT_SUPPORTED;
++ vcpu_set_reg(vcpu, 0, val);
++ goto again;
++ }
+ }
+
+ if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
+diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
+index ed1dd8088f1c..e518e4e3dfb5 100644
+--- a/include/kvm/arm_psci.h
++++ b/include/kvm/arm_psci.h
+@@ -18,6 +18,7 @@
+ #ifndef __KVM_ARM_PSCI_H__
+ #define __KVM_ARM_PSCI_H__
+
++#include <linux/kvm_host.h>
+ #include <uapi/linux/psci.h>
+
+ #define KVM_ARM_PSCI_0_1 PSCI_VERSION(0, 1)
+@@ -26,7 +27,25 @@
+
+ #define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_0
+
+-int kvm_psci_version(struct kvm_vcpu *vcpu);
++/*
++ * We need the KVM pointer independently from the vcpu as we can call
++ * this from HYP, and need to apply kern_hyp_va on it...
++ */
++static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
++{
++ /*
++ * Our PSCI implementation stays the same across versions from
++ * v0.2 onward, only adding the few mandatory functions (such
++ * as FEATURES with 1.0) that are required by newer
++ * revisions. It is thus safe to return the latest.
++ */
++ if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
++ return KVM_ARM_PSCI_LATEST;
++
++ return KVM_ARM_PSCI_0_1;
++}
++
++
+ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
+
+ #endif /* __KVM_ARM_PSCI_H__ */
+diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
+index 46a98fee3ef5..e105c1153794 100644
+--- a/virt/kvm/arm/psci.c
++++ b/virt/kvm/arm/psci.c
+@@ -123,7 +123,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
+ if (!vcpu)
+ return PSCI_RET_INVALID_PARAMS;
+ if (!vcpu->arch.power_off) {
+- if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
++ if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
+ return PSCI_RET_ALREADY_ON;
+ else
+ return PSCI_RET_INVALID_PARAMS;
+@@ -232,14 +232,6 @@ static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
+ kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
+ }
+
+-int kvm_psci_version(struct kvm_vcpu *vcpu)
+-{
+- if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
+- return KVM_ARM_PSCI_LATEST;
+-
+- return KVM_ARM_PSCI_0_1;
+-}
+-
+ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
+ {
+ struct kvm *kvm = vcpu->kvm;
+@@ -397,7 +389,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
+ */
+ static int kvm_psci_call(struct kvm_vcpu *vcpu)
+ {
+- switch (kvm_psci_version(vcpu)) {
++ switch (kvm_psci_version(vcpu, vcpu->kvm)) {
+ case KVM_ARM_PSCI_1_0:
+ return kvm_psci_1_0_call(vcpu);
+ case KVM_ARM_PSCI_0_2:
+--
+2.16.2
+
diff --git a/patches.arch/0008-arm64-KVM-Report-SMCCC_ARCH_WORKAROUND_1-BP-hardenin.patch b/patches.arch/0008-arm64-KVM-Report-SMCCC_ARCH_WORKAROUND_1-BP-hardenin.patch
new file mode 100644
index 0000000000..c7f4b43f74
--- /dev/null
+++ b/patches.arch/0008-arm64-KVM-Report-SMCCC_ARCH_WORKAROUND_1-BP-hardenin.patch
@@ -0,0 +1,90 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:14 +0000
+Subject: arm64: KVM: Report SMCCC_ARCH_WORKAROUND_1 BP hardening support
+Git-commit: 6167ec5c9145cdf493722dfd80a5d48bafc4a18a
+Patch-mainline: v4.16-rc1
+References: bsc#1068032
+
+A new feature of SMCCC 1.1 is that it offers firmware-based CPU
+workarounds. In particular, SMCCC_ARCH_WORKAROUND_1 provides
+BP hardening for CVE-2017-5715.
+
+If the host has some mitigation for this issue, report that
+we deal with it using SMCCC_ARCH_WORKAROUND_1, as we apply the
+host workaround on every guest exit.
+
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/arm/include/asm/kvm_host.h | 6 ++++++
+ arch/arm64/include/asm/kvm_host.h | 5 +++++
+ include/linux/arm-smccc.h | 5 +++++
+ virt/kvm/arm/psci.c | 9 ++++++++-
+ 4 files changed, 24 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/include/asm/kvm_host.h
++++ b/arch/arm/include/asm/kvm_host.h
+@@ -306,4 +306,10 @@ static inline int kvm_arm_vcpu_arch_has_
+ return -ENXIO;
+ }
+
++static inline bool kvm_arm_harden_branch_predictor(void)
++{
++ /* No way to detect it yet, pretend it is not there. */
++ return false;
++}
++
+ #endif /* __ARM_KVM_HOST_H__ */
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -389,4 +389,9 @@ static inline void __cpu_init_stage2(voi
+ "PARange is %d bits, unsupported configuration!", parange);
+ }
+
++static inline bool kvm_arm_harden_branch_predictor(void)
++{
++ return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
++}
++
+ #endif /* __ARM64_KVM_HOST_H__ */
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -73,6 +73,11 @@
+ ARM_SMCCC_SMC_32, \
+ 0, 1)
+
++#define ARM_SMCCC_ARCH_WORKAROUND_1 \
++ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
++ ARM_SMCCC_SMC_32, \
++ 0, 0x8000)
++
+ /**
+ * struct arm_smccc_res - Result from SMC/HVC call
+ * @a0-a3 result values from registers 0 to 3
+--- a/virt/kvm/arm/psci.c
++++ b/virt/kvm/arm/psci.c
+@@ -403,13 +403,20 @@ int kvm_hvc_call_handler(struct kvm_vcpu
+ {
+ u32 func_id = smccc_get_function(vcpu);
+ u32 val = PSCI_RET_NOT_SUPPORTED;
++ u32 feature;
+
+ switch (func_id) {
+ case ARM_SMCCC_VERSION_FUNC_ID:
+ val = ARM_SMCCC_VERSION_1_1;
+ break;
+ case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
+- /* Nothing supported yet */
++ feature = smccc_get_arg1(vcpu);
++ switch(feature) {
++ case ARM_SMCCC_ARCH_WORKAROUND_1:
++ if (kvm_arm_harden_branch_predictor())
++ val = 0;
++ break;
++ }
+ break;
+ default:
+ return kvm_psci_call(vcpu);
diff --git a/patches.arch/0009-arm64-KVM-Add-SMCCC_ARCH_WORKAROUND_1-fast-handling.patch b/patches.arch/0009-arm64-KVM-Add-SMCCC_ARCH_WORKAROUND_1-fast-handling.patch
new file mode 100644
index 0000000000..3688f5df79
--- /dev/null
+++ b/patches.arch/0009-arm64-KVM-Add-SMCCC_ARCH_WORKAROUND_1-fast-handling.patch
@@ -0,0 +1,71 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:15 +0000
+Subject: arm64: KVM: Add SMCCC_ARCH_WORKAROUND_1 fast handling
+Git-commit: f72af90c3783d924337624659b43e2d36f1b36b4
+Patch-mainline: v4.16-rc1
+References: bsc#1068032
+
+We want SMCCC_ARCH_WORKAROUND_1 to be fast. As fast as possible.
+So let's intercept it as early as we can by testing for the
+function call number as soon as we've identified a HVC call
+coming from the guest.
+
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/arm64/kvm/hyp/hyp-entry.S | 20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
+index e4f37b9dd47c..f36464bd57c5 100644
+--- a/arch/arm64/kvm/hyp/hyp-entry.S
++++ b/arch/arm64/kvm/hyp/hyp-entry.S
+@@ -15,6 +15,7 @@
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
++#include <linux/arm-smccc.h>
+ #include <linux/linkage.h>
+
+ #include <asm/alternative.h>
+@@ -64,10 +65,11 @@ alternative_endif
+ lsr x0, x1, #ESR_ELx_EC_SHIFT
+
+ cmp x0, #ESR_ELx_EC_HVC64
++ ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
+ b.ne el1_trap
+
+- mrs x1, vttbr_el2 // If vttbr is valid, the 64bit guest
+- cbnz x1, el1_trap // called HVC
++ mrs x1, vttbr_el2 // If vttbr is valid, the guest
++ cbnz x1, el1_hvc_guest // called HVC
+
+ /* Here, we're pretty sure the host called HVC. */
+ ldp x0, x1, [sp], #16
+@@ -100,6 +102,20 @@ alternative_endif
+
+ eret
+
++el1_hvc_guest:
++ /*
++ * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
++ * The workaround has already been applied on the host,
++ * so let's quickly get back to the guest. We don't bother
++ * restoring x1, as it can be clobbered anyway.
++ */
++ ldr x1, [sp] // Guest's x0
++ eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
++ cbnz w1, el1_trap
++ mov x0, x1
++ add sp, sp, #16
++ eret
++
+ el1_trap:
+ /*
+ * x0: ESR_EC
+--
+2.16.2
+
diff --git a/patches.arch/0010-firmware-psci-Expose-PSCI-conduit.patch b/patches.arch/0010-firmware-psci-Expose-PSCI-conduit.patch
new file mode 100644
index 0000000000..f30150f197
--- /dev/null
+++ b/patches.arch/0010-firmware-psci-Expose-PSCI-conduit.patch
@@ -0,0 +1,112 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:16 +0000
+Subject: firmware/psci: Expose PSCI conduit
+Git-commit: 09a8d6d48499f93e2abde691f5800081cd858726
+Patch-mainline: v4.16-rc1
+References: bsc#1068032
+
+In order to call into the firmware to apply workarounds, it is
+useful to find out whether we're using HVC or SMC. Let's expose
+this through the psci_ops.
+
+Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/psci.c | 28 +++++++++++++++++++++++-----
+ include/linux/psci.h | 7 +++++++
+ 2 files changed, 30 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
+index 8b25d31e8401..e9493da2b111 100644
+--- a/drivers/firmware/psci.c
++++ b/drivers/firmware/psci.c
+@@ -59,7 +59,9 @@ bool psci_tos_resident_on(int cpu)
+ return cpu == resident_cpu;
+ }
+
+-struct psci_operations psci_ops;
++struct psci_operations psci_ops = {
++ .conduit = PSCI_CONDUIT_NONE,
++};
+
+ typedef unsigned long (psci_fn)(unsigned long, unsigned long,
+ unsigned long, unsigned long);
+@@ -210,6 +212,22 @@ static unsigned long psci_migrate_info_up_cpu(void)
+ 0, 0, 0);
+ }
+
++static void set_conduit(enum psci_conduit conduit)
++{
++ switch (conduit) {
++ case PSCI_CONDUIT_HVC:
++ invoke_psci_fn = __invoke_psci_fn_hvc;
++ break;
++ case PSCI_CONDUIT_SMC:
++ invoke_psci_fn = __invoke_psci_fn_smc;
++ break;
++ default:
++ WARN(1, "Unexpected PSCI conduit %d\n", conduit);
++ }
++
++ psci_ops.conduit = conduit;
++}
++
+ static int get_set_conduit_method(struct device_node *np)
+ {
+ const char *method;
+@@ -222,9 +240,9 @@ static int get_set_conduit_method(struct device_node *np)
+ }
+
+ if (!strcmp("hvc", method)) {
+- invoke_psci_fn = __invoke_psci_fn_hvc;
++ set_conduit(PSCI_CONDUIT_HVC);
+ } else if (!strcmp("smc", method)) {
+- invoke_psci_fn = __invoke_psci_fn_smc;
++ set_conduit(PSCI_CONDUIT_SMC);
+ } else {
+ pr_warn("invalid \"method\" property: %s\n", method);
+ return -EINVAL;
+@@ -654,9 +672,9 @@ int __init psci_acpi_init(void)
+ pr_info("probing for conduit method from ACPI.\n");
+
+ if (acpi_psci_use_hvc())
+- invoke_psci_fn = __invoke_psci_fn_hvc;
++ set_conduit(PSCI_CONDUIT_HVC);
+ else
+- invoke_psci_fn = __invoke_psci_fn_smc;
++ set_conduit(PSCI_CONDUIT_SMC);
+
+ return psci_probe();
+ }
+diff --git a/include/linux/psci.h b/include/linux/psci.h
+index f724fd8c78e8..f2679e5faa4f 100644
+--- a/include/linux/psci.h
++++ b/include/linux/psci.h
+@@ -25,6 +25,12 @@ bool psci_tos_resident_on(int cpu);
+ int psci_cpu_init_idle(unsigned int cpu);
+ int psci_cpu_suspend_enter(unsigned long index);
+
++enum psci_conduit {
++ PSCI_CONDUIT_NONE,
++ PSCI_CONDUIT_SMC,
++ PSCI_CONDUIT_HVC,
++};
++
+ struct psci_operations {
+ u32 (*get_version)(void);
+ int (*cpu_suspend)(u32 state, unsigned long entry_point);
+@@ -34,6 +40,7 @@ struct psci_operations {
+ int (*affinity_info)(unsigned long target_affinity,
+ unsigned long lowest_affinity_level);
+ int (*migrate_info_type)(void);
++ enum psci_conduit conduit;
+ };
+
+ extern struct psci_operations psci_ops;
+--
+2.16.2
+
diff --git a/patches.arch/0011-firmware-psci-Expose-SMCCC-version-through-psci_ops.patch b/patches.arch/0011-firmware-psci-Expose-SMCCC-version-through-psci_ops.patch
new file mode 100644
index 0000000000..8741a83801
--- /dev/null
+++ b/patches.arch/0011-firmware-psci-Expose-SMCCC-version-through-psci_ops.patch
@@ -0,0 +1,101 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:17 +0000
+Subject: firmware/psci: Expose SMCCC version through psci_ops
+Git-commit: e78eef554a912ef6c1e0bbf97619dafbeae3339f
+Patch-mainline: v4.16-rc1
+References: bsc#1068032
+
+Since PSCI 1.0 allows the SMCCC version to be (indirectly) probed,
+let's do that at boot time, and expose the version of the calling
+convention as part of the psci_ops structure.
+
+Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/psci.c | 27 +++++++++++++++++++++++++++
+ include/linux/psci.h | 6 ++++++
+ 2 files changed, 33 insertions(+)
+
+diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
+index e9493da2b111..c80ec1d03274 100644
+--- a/drivers/firmware/psci.c
++++ b/drivers/firmware/psci.c
+@@ -61,6 +61,7 @@ bool psci_tos_resident_on(int cpu)
+
+ struct psci_operations psci_ops = {
+ .conduit = PSCI_CONDUIT_NONE,
++ .smccc_version = SMCCC_VERSION_1_0,
+ };
+
+ typedef unsigned long (psci_fn)(unsigned long, unsigned long,
+@@ -511,6 +512,31 @@ static void __init psci_init_migrate(void)
+ pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
+ }
+
++static void __init psci_init_smccc(void)
++{
++ u32 ver = ARM_SMCCC_VERSION_1_0;
++ int feature;
++
++ feature = psci_features(ARM_SMCCC_VERSION_FUNC_ID);
++
++ if (feature != PSCI_RET_NOT_SUPPORTED) {
++ u32 ret;
++ ret = invoke_psci_fn(ARM_SMCCC_VERSION_FUNC_ID, 0, 0, 0);
++ if (ret == ARM_SMCCC_VERSION_1_1) {
++ psci_ops.smccc_version = SMCCC_VERSION_1_1;
++ ver = ret;
++ }
++ }
++
++ /*
++ * Conveniently, the SMCCC and PSCI versions are encoded the
++ * same way. No, this isn't accidental.
++ */
++ pr_info("SMC Calling Convention v%d.%d\n",
++ PSCI_VERSION_MAJOR(ver), PSCI_VERSION_MINOR(ver));
++
++}
++
+ static void __init psci_0_2_set_functions(void)
+ {
+ pr_info("Using standard PSCI v0.2 function IDs\n");
+@@ -559,6 +585,7 @@ static int __init psci_probe(void)
+ psci_init_migrate();
+
+ if (PSCI_VERSION_MAJOR(ver) >= 1) {
++ psci_init_smccc();
+ psci_init_cpu_suspend();
+ psci_init_system_suspend();
+ }
+diff --git a/include/linux/psci.h b/include/linux/psci.h
+index f2679e5faa4f..8b1b3b5935ab 100644
+--- a/include/linux/psci.h
++++ b/include/linux/psci.h
+@@ -31,6 +31,11 @@ enum psci_conduit {
+ PSCI_CONDUIT_HVC,
+ };
+
++enum smccc_version {
++ SMCCC_VERSION_1_0,
++ SMCCC_VERSION_1_1,
++};
++
+ struct psci_operations {
+ u32 (*get_version)(void);
+ int (*cpu_suspend)(u32 state, unsigned long entry_point);
+@@ -41,6 +46,7 @@ struct psci_operations {
+ unsigned long lowest_affinity_level);
+ int (*migrate_info_type)(void);
+ enum psci_conduit conduit;
++ enum smccc_version smccc_version;
+ };
+
+ extern struct psci_operations psci_ops;
+--
+2.16.2
+
diff --git a/patches.arch/0012-arm-arm64-smccc-Make-function-identifiers-an-unsigne.patch b/patches.arch/0012-arm-arm64-smccc-Make-function-identifiers-an-unsigne.patch
new file mode 100644
index 0000000000..745a5dcd27
--- /dev/null
+++ b/patches.arch/0012-arm-arm64-smccc-Make-function-identifiers-an-unsigne.patch
@@ -0,0 +1,48 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:18 +0000
+Subject: arm/arm64: smccc: Make function identifiers an unsigned quantity
+Git-commit: ded4c39e93f3b72968fdb79baba27f3b83dad34c
+Patch-mainline: v4.16-rc1
+References: bsc#1068032
+
+Function identifiers are a 32bit, unsigned quantity. But we never
+tell so to the compiler, resulting in the following:
+
+ 4ac: b26187e0 mov x0, #0xffffffff80000001
+
+We thus rely on the firmware narrowing it for us, which is not
+always a reasonable expectation.
+
+Cc: stable@vger.kernel.org
+Reported-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ include/linux/arm-smccc.h | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -17,14 +17,16 @@
+ #include <linux/linkage.h>
+ #include <linux/types.h>
+
++#include <uapi/linux/const.h>
++
+ /*
+ * This file provides common defines for ARM SMC Calling Convention as
+ * specified in
+ * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
+ */
+
+-#define ARM_SMCCC_STD_CALL 0
+-#define ARM_SMCCC_FAST_CALL 1
++#define ARM_SMCCC_STD_CALL _AC(0,U)
++#define ARM_SMCCC_FAST_CALL _AC(1,U)
+ #define ARM_SMCCC_TYPE_SHIFT 31
+
+ #define ARM_SMCCC_SMC_32 0
diff --git a/patches.arch/0013-arm-arm64-smccc-Implement-SMCCC-v1.1-inline-primitiv.patch b/patches.arch/0013-arm-arm64-smccc-Implement-SMCCC-v1.1-inline-primitiv.patch
new file mode 100644
index 0000000000..5deaf2ea9f
--- /dev/null
+++ b/patches.arch/0013-arm-arm64-smccc-Implement-SMCCC-v1.1-inline-primitiv.patch
@@ -0,0 +1,170 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:19 +0000
+Subject: arm/arm64: smccc: Implement SMCCC v1.1 inline primitive
+Git-commit: f2d3b2e8759a5833df6f022e42df2d581e6d843c
+Patch-mainline: v4.16-rc1
+References: bsc#1068032
+
+One of the major improvement of SMCCC v1.1 is that it only clobbers
+the first 4 registers, both on 32 and 64bit. This means that it
+becomes very easy to provide an inline version of the SMC call
+primitive, and avoid performing a function call to stash the
+registers that would otherwise be clobbered by SMCCC v1.0.
+
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ include/linux/arm-smccc.h | 141 ++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 141 insertions(+)
+
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -121,4 +121,145 @@ asmlinkage void arm_smccc_hvc(unsigned l
+ unsigned long a5, unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res);
+
++/* SMCCC v1.1 implementation madness follows */
++#ifdef CONFIG_ARM64
++
++#define SMCCC_SMC_INST "smc #0"
++#define SMCCC_HVC_INST "hvc #0"
++
++#elif defined(CONFIG_ARM)
++#include <asm/opcodes-sec.h>
++#include <asm/opcodes-virt.h>
++
++#define SMCCC_SMC_INST __SMC(0)
++#define SMCCC_HVC_INST __HVC(0)
++
++#endif
++
++#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
++
++#define __count_args(...) \
++ ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
++
++#define __constraint_write_0 \
++ "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3)
++#define __constraint_write_1 \
++ "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3)
++#define __constraint_write_2 \
++ "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3)
++#define __constraint_write_3 \
++ "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3)
++#define __constraint_write_4 __constraint_write_3
++#define __constraint_write_5 __constraint_write_4
++#define __constraint_write_6 __constraint_write_5
++#define __constraint_write_7 __constraint_write_6
++
++#define __constraint_read_0
++#define __constraint_read_1
++#define __constraint_read_2
++#define __constraint_read_3
++#define __constraint_read_4 "r" (r4)
++#define __constraint_read_5 __constraint_read_4, "r" (r5)
++#define __constraint_read_6 __constraint_read_5, "r" (r6)
++#define __constraint_read_7 __constraint_read_6, "r" (r7)
++
++#define __declare_arg_0(a0, res) \
++ struct arm_smccc_res *___res = res; \
++ register u32 r0 asm("r0") = a0; \
++ register unsigned long r1 asm("r1"); \
++ register unsigned long r2 asm("r2"); \
++ register unsigned long r3 asm("r3")
++
++#define __declare_arg_1(a0, a1, res) \
++ struct arm_smccc_res *___res = res; \
++ register u32 r0 asm("r0") = a0; \
++ register typeof(a1) r1 asm("r1") = a1; \
++ register unsigned long r2 asm("r2"); \
++ register unsigned long r3 asm("r3")
++
++#define __declare_arg_2(a0, a1, a2, res) \
++ struct arm_smccc_res *___res = res; \
++ register u32 r0 asm("r0") = a0; \
++ register typeof(a1) r1 asm("r1") = a1; \
++ register typeof(a2) r2 asm("r2") = a2; \
++ register unsigned long r3 asm("r3")
++
++#define __declare_arg_3(a0, a1, a2, a3, res) \
++ struct arm_smccc_res *___res = res; \
++ register u32 r0 asm("r0") = a0; \
++ register typeof(a1) r1 asm("r1") = a1; \
++ register typeof(a2) r2 asm("r2") = a2; \
++ register typeof(a3) r3 asm("r3") = a3
++
++#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
++ __declare_arg_3(a0, a1, a2, a3, res); \
++ register typeof(a4) r4 asm("r4") = a4
++
++#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
++ __declare_arg_4(a0, a1, a2, a3, a4, res); \
++ register typeof(a5) r5 asm("r5") = a5
++
++#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
++ __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
++ register typeof(a6) r6 asm("r6") = a6
++
++#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
++ __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
++ register typeof(a7) r7 asm("r7") = a7
++
++#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
++#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
++
++#define ___constraints(count) \
++ : __constraint_write_ ## count \
++ : __constraint_read_ ## count \
++ : "memory"
++#define __constraints(count) ___constraints(count)
++
++/*
++ * We have an output list that is not necessarily used, and GCC feels
++ * entitled to optimise the whole sequence away. "volatile" is what
++ * makes it stick.
++ */
++#define __arm_smccc_1_1(inst, ...) \
++ do { \
++ __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
++ asm volatile(inst "\n" \
++ __constraints(__count_args(__VA_ARGS__))); \
++ if (___res) \
++ *___res = (typeof(*___res)){r0, r1, r2, r3}; \
++ } while (0)
++
++/*
++ * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call
++ *
++ * This is a variadic macro taking one to eight source arguments, and
++ * an optional return structure.
++ *
++ * @a0-a7: arguments passed in registers 0 to 7
++ * @res: result values from registers 0 to 3
++ *
++ * This macro is used to make SMC calls following SMC Calling Convention v1.1.
++ * The content of the supplied param are copied to registers 0 to 7 prior
++ * to the SMC instruction. The return values are updated with the content
++ * from register 0 to 3 on return from the SMC instruction if not NULL.
++ */
++#define arm_smccc_1_1_smc(...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__)
++
++/*
++ * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call
++ *
++ * This is a variadic macro taking one to eight source arguments, and
++ * an optional return structure.
++ *
++ * @a0-a7: arguments passed in registers 0 to 7
++ * @res: result values from registers 0 to 3
++ *
++ * This macro is used to make HVC calls following SMC Calling Convention v1.1.
++ * The content of the supplied param are copied to registers 0 to 7 prior
++ * to the HVC instruction. The return values are updated with the content
++ * from register 0 to 3 on return from the HVC instruction if not NULL.
++ */
++#define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
++
+ #endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/patches.arch/0014-arm64-Add-ARM_SMCCC_ARCH_WORKAROUND_1-BP-hardening-s.patch b/patches.arch/0014-arm64-Add-ARM_SMCCC_ARCH_WORKAROUND_1-BP-hardening-s.patch
new file mode 100644
index 0000000000..fb06b27aa9
--- /dev/null
+++ b/patches.arch/0014-arm64-Add-ARM_SMCCC_ARCH_WORKAROUND_1-BP-hardening-s.patch
@@ -0,0 +1,160 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:20 +0000
+Subject: arm64: Add ARM_SMCCC_ARCH_WORKAROUND_1 BP hardening support
+Git-commit: b092201e0020614127f495c092e0a12d26a2116e
+Patch-mainline: v4.16-rc1
+References: bsc#1068032
+
+Add the detection and runtime code for ARM_SMCCC_ARCH_WORKAROUND_1.
+It is lovely. Really.
+
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/arm64/kernel/bpi.S | 20 +++++++++++++
+ arch/arm64/kernel/cpu_errata.c | 68 +++++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 87 insertions(+), 1 deletion(-)
+
+diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
+index 76225c2611ea..fdeed629f2c6 100644
+--- a/arch/arm64/kernel/bpi.S
++++ b/arch/arm64/kernel/bpi.S
+@@ -17,6 +17,7 @@
+ */
+
+ #include <linux/linkage.h>
++#include <linux/arm-smccc.h>
+
+ .macro ventry target
+ .rept 31
+@@ -85,3 +86,22 @@ ENTRY(__qcom_hyp_sanitize_link_stack_start)
+ .endr
+ ldp x29, x30, [sp], #16
+ ENTRY(__qcom_hyp_sanitize_link_stack_end)
++
++.macro smccc_workaround_1 inst
++ sub sp, sp, #(8 * 4)
++ stp x2, x3, [sp, #(8 * 0)]
++ stp x0, x1, [sp, #(8 * 2)]
++ mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
++ \inst #0
++ ldp x2, x3, [sp, #(8 * 0)]
++ ldp x0, x1, [sp, #(8 * 2)]
++ add sp, sp, #(8 * 4)
++.endm
++
++ENTRY(__smccc_workaround_1_smc_start)
++ smccc_workaround_1 smc
++ENTRY(__smccc_workaround_1_smc_end)
++
++ENTRY(__smccc_workaround_1_hvc_start)
++ smccc_workaround_1 hvc
++ENTRY(__smccc_workaround_1_hvc_end)
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index ed6881882231..9e77809a3b23 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -70,6 +70,10 @@ DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+ extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
+ extern char __qcom_hyp_sanitize_link_stack_start[];
+ extern char __qcom_hyp_sanitize_link_stack_end[];
++extern char __smccc_workaround_1_smc_start[];
++extern char __smccc_workaround_1_smc_end[];
++extern char __smccc_workaround_1_hvc_start[];
++extern char __smccc_workaround_1_hvc_end[];
+
+ static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
+ const char *hyp_vecs_end)
+@@ -116,6 +120,10 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+ #define __psci_hyp_bp_inval_end NULL
+ #define __qcom_hyp_sanitize_link_stack_start NULL
+ #define __qcom_hyp_sanitize_link_stack_end NULL
++#define __smccc_workaround_1_smc_start NULL
++#define __smccc_workaround_1_smc_end NULL
++#define __smccc_workaround_1_hvc_start NULL
++#define __smccc_workaround_1_hvc_end NULL
+
+ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+ const char *hyp_vecs_start,
+@@ -142,17 +150,75 @@ static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
+ __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
+ }
+
++#include <uapi/linux/psci.h>
++#include <linux/arm-smccc.h>
+ #include <linux/psci.h>
+
++static void call_smc_arch_workaround_1(void)
++{
++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
++}
++
++static void call_hvc_arch_workaround_1(void)
++{
++ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
++}
++
++static bool check_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
++{
++ bp_hardening_cb_t cb;
++ void *smccc_start, *smccc_end;
++ struct arm_smccc_res res;
++
++ if (!entry->matches(entry, SCOPE_LOCAL_CPU))
++ return false;
++
++ if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
++ return false;
++
++ switch (psci_ops.conduit) {
++ case PSCI_CONDUIT_HVC:
++ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
++ if (res.a0)
++ return false;
++ cb = call_hvc_arch_workaround_1;
++ smccc_start = __smccc_workaround_1_hvc_start;
++ smccc_end = __smccc_workaround_1_hvc_end;
++ break;
++
++ case PSCI_CONDUIT_SMC:
++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
++ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
++ if (res.a0)
++ return false;
++ cb = call_smc_arch_workaround_1;
++ smccc_start = __smccc_workaround_1_smc_start;
++ smccc_end = __smccc_workaround_1_smc_end;
++ break;
++
++ default:
++ return false;
++ }
++
++ install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
++
++ return true;
++}
++
+ static int enable_psci_bp_hardening(void *data)
+ {
+ const struct arm64_cpu_capabilities *entry = data;
+
+- if (psci_ops.get_version)
++ if (psci_ops.get_version) {
++ if (check_smccc_arch_workaround_1(entry))
++ return 0;
++
+ install_bp_hardening_cb(entry,
+ (bp_hardening_cb_t)psci_ops.get_version,
+ __psci_hyp_bp_inval_start,
+ __psci_hyp_bp_inval_end);
++ }
+
+ return 0;
+ }
+--
+2.16.2
+
diff --git a/patches.arch/0015-arm64-Kill-PSCI_GET_VERSION-as-a-variant-2-workaroun.patch b/patches.arch/0015-arm64-Kill-PSCI_GET_VERSION-as-a-variant-2-workaroun.patch
new file mode 100644
index 0000000000..d58eedf50a
--- /dev/null
+++ b/patches.arch/0015-arm64-Kill-PSCI_GET_VERSION-as-a-variant-2-workaroun.patch
@@ -0,0 +1,216 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 6 Feb 2018 17:56:21 +0000
+Subject: arm64: Kill PSCI_GET_VERSION as a variant-2 workaround
+Git-commit: 3a0a397ff5ff8b56ca9f7908b75dee6bf0b5fabb
+Patch-mainline: v4.16-rc1
+References: bsc#1068032
+
+Now that we've standardised on SMCCC v1.1 to perform the branch
+prediction invalidation, let's drop the previous band-aid.
+If vendors haven't updated their firmware to do SMCCC 1.1, they
+haven't updated PSCI either, so we don't loose anything.
+
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/arm64/kernel/bpi.S | 24 ----------------------
+ arch/arm64/kernel/cpu_errata.c | 45 ++++++++++++------------------------------
+ arch/arm64/kvm/hyp/switch.c | 14 -------------
+ 3 files changed, 13 insertions(+), 70 deletions(-)
+
+diff --git a/arch/arm64/kernel/bpi.S b/arch/arm64/kernel/bpi.S
+index fdeed629f2c6..e5de33513b5d 100644
+--- a/arch/arm64/kernel/bpi.S
++++ b/arch/arm64/kernel/bpi.S
+@@ -54,30 +54,6 @@ ENTRY(__bp_harden_hyp_vecs_start)
+ vectors __kvm_hyp_vector
+ .endr
+ ENTRY(__bp_harden_hyp_vecs_end)
+-ENTRY(__psci_hyp_bp_inval_start)
+- sub sp, sp, #(8 * 18)
+- stp x16, x17, [sp, #(16 * 0)]
+- stp x14, x15, [sp, #(16 * 1)]
+- stp x12, x13, [sp, #(16 * 2)]
+- stp x10, x11, [sp, #(16 * 3)]
+- stp x8, x9, [sp, #(16 * 4)]
+- stp x6, x7, [sp, #(16 * 5)]
+- stp x4, x5, [sp, #(16 * 6)]
+- stp x2, x3, [sp, #(16 * 7)]
+- stp x0, x1, [sp, #(16 * 8)]
+- mov x0, #0x84000000
+- smc #0
+- ldp x16, x17, [sp, #(16 * 0)]
+- ldp x14, x15, [sp, #(16 * 1)]
+- ldp x12, x13, [sp, #(16 * 2)]
+- ldp x10, x11, [sp, #(16 * 3)]
+- ldp x8, x9, [sp, #(16 * 4)]
+- ldp x6, x7, [sp, #(16 * 5)]
+- ldp x4, x5, [sp, #(16 * 6)]
+- ldp x2, x3, [sp, #(16 * 7)]
+- ldp x0, x1, [sp, #(16 * 8)]
+- add sp, sp, #(8 * 18)
+-ENTRY(__psci_hyp_bp_inval_end)
+
+ ENTRY(__qcom_hyp_sanitize_link_stack_start)
+ stp x29, x30, [sp, #-16]!
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 9e77809a3b23..07823595b7f0 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -67,7 +67,6 @@ static int cpu_enable_trap_ctr_access(void *__unused)
+ DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
+
+ #ifdef CONFIG_KVM
+-extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
+ extern char __qcom_hyp_sanitize_link_stack_start[];
+ extern char __qcom_hyp_sanitize_link_stack_end[];
+ extern char __smccc_workaround_1_smc_start[];
+@@ -116,8 +115,6 @@ static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
+ spin_unlock(&bp_lock);
+ }
+ #else
+-#define __psci_hyp_bp_inval_start NULL
+-#define __psci_hyp_bp_inval_end NULL
+ #define __qcom_hyp_sanitize_link_stack_start NULL
+ #define __qcom_hyp_sanitize_link_stack_end NULL
+ #define __smccc_workaround_1_smc_start NULL
+@@ -164,24 +161,25 @@ static void call_hvc_arch_workaround_1(void)
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
+ }
+
+-static bool check_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
++static int enable_smccc_arch_workaround_1(void *data)
+ {
++ const struct arm64_cpu_capabilities *entry = data;
+ bp_hardening_cb_t cb;
+ void *smccc_start, *smccc_end;
+ struct arm_smccc_res res;
+
+ if (!entry->matches(entry, SCOPE_LOCAL_CPU))
+- return false;
++ return 0;
+
+ if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
+- return false;
++ return 0;
+
+ switch (psci_ops.conduit) {
+ case PSCI_CONDUIT_HVC:
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+ if (res.a0)
+- return false;
++ return 0;
+ cb = call_hvc_arch_workaround_1;
+ smccc_start = __smccc_workaround_1_hvc_start;
+ smccc_end = __smccc_workaround_1_hvc_end;
+@@ -191,35 +189,18 @@ static bool check_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *e
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+ if (res.a0)
+- return false;
++ return 0;
+ cb = call_smc_arch_workaround_1;
+ smccc_start = __smccc_workaround_1_smc_start;
+ smccc_end = __smccc_workaround_1_smc_end;
+ break;
+
+ default:
+- return false;
++ return 0;
+ }
+
+ install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
+
+- return true;
+-}
+-
+-static int enable_psci_bp_hardening(void *data)
+-{
+- const struct arm64_cpu_capabilities *entry = data;
+-
+- if (psci_ops.get_version) {
+- if (check_smccc_arch_workaround_1(entry))
+- return 0;
+-
+- install_bp_hardening_cb(entry,
+- (bp_hardening_cb_t)psci_ops.get_version,
+- __psci_hyp_bp_inval_start,
+- __psci_hyp_bp_inval_end);
+- }
+-
+ return 0;
+ }
+
+@@ -399,22 +380,22 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
+- .enable = enable_psci_bp_hardening,
++ .enable = enable_smccc_arch_workaround_1,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
+- .enable = enable_psci_bp_hardening,
++ .enable = enable_smccc_arch_workaround_1,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
+- .enable = enable_psci_bp_hardening,
++ .enable = enable_smccc_arch_workaround_1,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
+- .enable = enable_psci_bp_hardening,
++ .enable = enable_smccc_arch_workaround_1,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+@@ -428,12 +409,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+- .enable = enable_psci_bp_hardening,
++ .enable = enable_smccc_arch_workaround_1,
+ },
+ {
+ .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
+ MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+- .enable = enable_psci_bp_hardening,
++ .enable = enable_smccc_arch_workaround_1,
+ },
+ #endif
+ {
+diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
+index 408c04d789a5..cac6a0500162 100644
+--- a/arch/arm64/kvm/hyp/switch.c
++++ b/arch/arm64/kvm/hyp/switch.c
+@@ -350,20 +350,6 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
+ if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
+ goto again;
+
+- if (exit_code == ARM_EXCEPTION_TRAP &&
+- (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC64 ||
+- kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_HVC32)) {
+- u32 val = vcpu_get_reg(vcpu, 0);
+-
+- if (val == PSCI_0_2_FN_PSCI_VERSION) {
+- val = kvm_psci_version(vcpu, kern_hyp_va(vcpu->kvm));
+- if (unlikely(val == KVM_ARM_PSCI_0_1))
+- val = PSCI_RET_NOT_SUPPORTED;
+- vcpu_set_reg(vcpu, 0, val);
+- goto again;
+- }
+- }
+-
+ if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
+ exit_code == ARM_EXCEPTION_TRAP) {
+ bool valid;
+--
+2.16.2
+
diff --git a/patches.arch/arm64-Relax-ARM_SMCCC_ARCH_WORKAROUND_1-discovery.patch b/patches.arch/arm64-Relax-ARM_SMCCC_ARCH_WORKAROUND_1-discovery.patch
new file mode 100644
index 0000000000..73efde9fb2
--- /dev/null
+++ b/patches.arch/arm64-Relax-ARM_SMCCC_ARCH_WORKAROUND_1-discovery.patch
@@ -0,0 +1,48 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Fri, 9 Mar 2018 15:40:50 +0000
+Subject: arm64: Relax ARM_SMCCC_ARCH_WORKAROUND_1 discovery
+Git-commit: e21da1c992007594d391e7b301779cf30f438691
+Patch-mainline: v4.16-rc5
+References: bsc#1068032
+
+A recent update to the ARM SMCCC ARCH_WORKAROUND_1 specification
+allows firmware to return a non zero, positive value to describe
+that although the mitigation is implemented at the higher exception
+level, the CPU on which the call is made is not affected.
+
+Let's relax the check on the return value from ARCH_WORKAROUND_1
+so that we only error out if the returned value is negative.
+
+Fixes: b092201e0020 ("arm64: Add ARM_SMCCC_ARCH_WORKAROUND_1 BP hardening support")
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/arm64/kernel/cpu_errata.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
+index 52f15cd896e1..b5a28336c077 100644
+--- a/arch/arm64/kernel/cpu_errata.c
++++ b/arch/arm64/kernel/cpu_errata.c
+@@ -178,7 +178,7 @@ static int enable_smccc_arch_workaround_1(void *data)
+ case PSCI_CONDUIT_HVC:
+ arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+- if (res.a0)
++ if ((int)res.a0 < 0)
+ return 0;
+ cb = call_hvc_arch_workaround_1;
+ smccc_start = __smccc_workaround_1_hvc_start;
+@@ -188,7 +188,7 @@ static int enable_smccc_arch_workaround_1(void *data)
+ case PSCI_CONDUIT_SMC:
+ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
+ ARM_SMCCC_ARCH_WORKAROUND_1, &res);
+- if (res.a0)
++ if ((int)res.a0 < 0)
+ return 0;
+ cb = call_smc_arch_workaround_1;
+ smccc_start = __smccc_workaround_1_smc_start;
+--
+2.16.2
+
diff --git a/patches.arch/arm64-capabilities-Handle-duplicate-entries-for-a-ca.patch b/patches.arch/arm64-capabilities-Handle-duplicate-entries-for-a-ca.patch
new file mode 100644
index 0000000000..8783f2ebe3
--- /dev/null
+++ b/patches.arch/arm64-capabilities-Handle-duplicate-entries-for-a-ca.patch
@@ -0,0 +1,107 @@
+From: Suzuki K Poulose <suzuki.poulose@arm.com>
+Date: Tue, 9 Jan 2018 16:12:18 +0000
+Subject: arm64: capabilities: Handle duplicate entries for a capability
+Git-commit: 67948af41f2e6818edeeba5182811c704d484949
+Patch-mainline: v4.16-rc1
+References: bsc#1068032
+
+Sometimes a single capability could be listed multiple times with
+differing matches(), e.g, CPU errata for different MIDR versions.
+This breaks verify_local_cpu_feature() and this_cpu_has_cap() as
+we stop checking for a capability on a CPU with the first
+entry in the given table, which is not sufficient. Make sure we
+run the checks for all entries of the same capability. We do
+this by fixing __this_cpu_has_cap() to run through all the
+entries in the given table for a match and reuse it for
+verify_local_cpu_feature().
+
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Will Deacon <will.deacon@arm.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/arm64/kernel/cpufeature.c | 44 ++++++++++++++++++++++--------------------
+ 1 file changed, 23 insertions(+), 21 deletions(-)
+
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 9ef84d0def9a..9f4491d16cfb 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1137,6 +1137,26 @@ static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
+ cap_set_elf_hwcap(hwcaps);
+ }
+
++/*
++ * Check if the current CPU has a given feature capability.
++ * Should be called from non-preemptible context.
++ */
++static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
++ unsigned int cap)
++{
++ const struct arm64_cpu_capabilities *caps;
++
++ if (WARN_ON(preemptible()))
++ return false;
++
++ for (caps = cap_array; caps->desc; caps++)
++ if (caps->capability == cap &&
++ caps->matches &&
++ caps->matches(caps, SCOPE_LOCAL_CPU))
++ return true;
++ return false;
++}
++
+ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
+ const char *info)
+ {
+@@ -1200,8 +1220,9 @@ verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
+ }
+
+ static void
+-verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
++verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
+ {
++ const struct arm64_cpu_capabilities *caps = caps_list;
+ for (; caps->matches; caps++) {
+ if (!cpus_have_cap(caps->capability))
+ continue;
+@@ -1209,7 +1230,7 @@ verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
+ * If the new CPU misses an advertised feature, we cannot proceed
+ * further, park the cpu.
+ */
+- if (!caps->matches(caps, SCOPE_LOCAL_CPU)) {
++ if (!__this_cpu_has_cap(caps_list, caps->capability)) {
+ pr_crit("CPU%d: missing feature: %s\n",
+ smp_processor_id(), caps->desc);
+ cpu_die_early();
+@@ -1291,25 +1312,6 @@ static void __init mark_const_caps_ready(void)
+ static_branch_enable(&arm64_const_caps_ready);
+ }
+
+-/*
+- * Check if the current CPU has a given feature capability.
+- * Should be called from non-preemptible context.
+- */
+-static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
+- unsigned int cap)
+-{
+- const struct arm64_cpu_capabilities *caps;
+-
+- if (WARN_ON(preemptible()))
+- return false;
+-
+- for (caps = cap_array; caps->desc; caps++)
+- if (caps->capability == cap && caps->matches)
+- return caps->matches(caps, SCOPE_LOCAL_CPU);
+-
+- return false;
+-}
+-
+ extern const struct arm64_cpu_capabilities arm64_errata[];
+
+ bool this_cpu_has_cap(unsigned int cap)
+--
+2.16.2
+
diff --git a/patches.arch/arm64-cpufeature-__this_cpu_has_cap-shouldn-t-stop-e.patch b/patches.arch/arm64-cpufeature-__this_cpu_has_cap-shouldn-t-stop-e.patch
new file mode 100644
index 0000000000..a048343579
--- /dev/null
+++ b/patches.arch/arm64-cpufeature-__this_cpu_has_cap-shouldn-t-stop-e.patch
@@ -0,0 +1,45 @@
+From: James Morse <james.morse@arm.com>
+Date: Mon, 15 Jan 2018 19:38:54 +0000
+Subject: arm64: cpufeature: __this_cpu_has_cap() shouldn't stop early
+Git-commit: edf298cfce47ab7279d03b5203ae2ef3a58e49db
+Patch-mainline: v4.16-rc1
+References: bsc#1068032
+
+this_cpu_has_cap() tests caps->desc not caps->matches, so it stops
+walking the list when it finds a 'silent' feature, instead of
+walking to the end of the list.
+
+Prior to v4.6's 644c2ae198412 ("arm64: cpufeature: Test 'matches' pointer
+to find the end of the list") we always tested desc to find the end of
+a capability list. This was changed for dubious things like PAN_NOT_UAO.
+v4.7's e3661b128e53e ("arm64: Allow a capability to be checked on
+single CPU") added this_cpu_has_cap() using the old desc style test.
+
+CC: Suzuki K Poulose <suzuki.poulose@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/arm64/kernel/cpufeature.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index a11311397430..630a40ec1332 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1149,9 +1149,8 @@ static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
+ if (WARN_ON(preemptible()))
+ return false;
+
+- for (caps = cap_array; caps->desc; caps++)
++ for (caps = cap_array; caps->matches; caps++)
+ if (caps->capability == cap &&
+- caps->matches &&
+ caps->matches(caps, SCOPE_LOCAL_CPU))
+ return true;
+ return false;
+--
+2.16.2
+
diff --git a/patches.arch/arm64-fix-smccc-compilation.patch b/patches.arch/arm64-fix-smccc-compilation.patch
new file mode 100644
index 0000000000..7193308dac
--- /dev/null
+++ b/patches.arch/arm64-fix-smccc-compilation.patch
@@ -0,0 +1,54 @@
+From: Matthias Brugger <mbrugger@suse.com>
+Date: Tue, 20 Mar 2018 13:56:21 +0000
+Subject: arm64: fix smccc compilation
+Patch-mainline: Never, fixes local compilation issue
+References: bsc#1068032
+
+ARM_SMCC_CALL_VAL is a assembly macro. Fix compilation of
+arm-smccc.h by setting ifdefs as in mainline.
+
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>---
+ arch/arm64/kernel/smccc-call.S | 1 +
+ include/linux/arm-smccc.h | 9 ++++++---
+ 2 files changed, 7 insertions(+), 3 deletions(-)
+
+--- a/arch/arm64/kernel/smccc-call.S
++++ b/arch/arm64/kernel/smccc-call.S
+@@ -12,6 +12,7 @@
+ *
+ */
+ #include <linux/linkage.h>
++#include <linux/arm-smccc.h>
+ #include <asm/asm-offsets.h>
+
+ .macro SMCCC instr
+--- a/include/linux/arm-smccc.h
++++ b/include/linux/arm-smccc.h
+@@ -14,9 +14,6 @@
+ #ifndef __LINUX_ARM_SMCCC_H
+ #define __LINUX_ARM_SMCCC_H
+
+-#include <linux/linkage.h>
+-#include <linux/types.h>
+-
+ #include <uapi/linux/const.h>
+
+ /*
+@@ -80,6 +77,11 @@
+ ARM_SMCCC_SMC_32, \
+ 0, 0x8000)
+
++#ifndef __ASSEMBLY__
++
++#include <linux/linkage.h>
++#include <linux/types.h>
++
+ /**
+ * struct arm_smccc_res - Result from SMC/HVC call
+ * @a0-a3 result values from registers 0 to 3
+@@ -262,4 +264,5 @@ asmlinkage void arm_smccc_hvc(unsigned l
+ */
+ #define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__)
+
++#endif /*__ASSEMBLY__*/
+ #endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/patches.arch/arm64-mm-fix-thinko-in-non-global-page-table-attribu.patch b/patches.arch/arm64-mm-fix-thinko-in-non-global-page-table-attribu.patch
new file mode 100644
index 0000000000..472606992c
--- /dev/null
+++ b/patches.arch/arm64-mm-fix-thinko-in-non-global-page-table-attribu.patch
@@ -0,0 +1,55 @@
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Fri, 23 Feb 2018 18:04:48 +0000
+Subject: arm64: mm: fix thinko in non-global page table attribute check
+Git-commit: 753e8abc36b2c966caea075db0c845563c8a19bf
+Patch-mainline: v4.16-rc5
+References: bsc#1088050
+
+The routine pgattr_change_is_safe() was extended in commit 4e6020565596
+("arm64: mm: Permit transitioning from Global to Non-Global without BBM")
+to permit changing the nG attribute from not set to set, but did so in a
+way that inadvertently disallows such changes if other permitted attribute
+changes take place at the same time. So update the code to take this into
+account.
+
+Fixes: 4e6020565596 ("arm64: mm: Permit transitioning from Global to ...")
+Cc: <stable@vger.kernel.org> # 4.14.x-
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/arm64/mm/mmu.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
+index 84a019f55022..8c704f1e53c2 100644
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -108,7 +108,7 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
+ * The following mapping attributes may be updated in live
+ * kernel mappings without the need for break-before-make.
+ */
+- static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE;
++ static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG;
+
+ /* creating or taking down mappings is always safe */
+ if (old == 0 || new == 0)
+@@ -118,9 +118,9 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
+ if ((old | new) & PTE_CONT)
+ return false;
+
+- /* Transitioning from Global to Non-Global is safe */
+- if (((old ^ new) == PTE_NG) && (new & PTE_NG))
+- return true;
++ /* Transitioning from Non-Global to Global is unsafe */
++ if (old & ~new & PTE_NG)
++ return false;
+
+ return ((old ^ new) & ~mask) == 0;
+ }
+--
+2.16.2
+
diff --git a/patches.drivers/net-hns-Fix-ethtool-private-flags.patch b/patches.drivers/net-hns-Fix-ethtool-private-flags.patch
new file mode 100644
index 0000000000..04abc93757
--- /dev/null
+++ b/patches.drivers/net-hns-Fix-ethtool-private-flags.patch
@@ -0,0 +1,80 @@
+From: Matthias Brugger <matthias.bgg@gmail.com>
+Date: Thu, 15 Mar 2018 17:54:20 +0100
+Subject: net: hns: Fix ethtool private flags
+Git-commit: d61d263c8d82db7c4404a29ebc29674b1c0c05c9
+Patch-mainline: v4.16-rc7
+References: bsc#1085511
+
+The driver implementation returns support for private flags, while
+no private flags are present. When asked for the number of private
+flags it returns the number of statistic flag names.
+
+Fix this by returning EOPNOTSUPP for not implemented ethtool flags.
+
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c | 2 +-
+ drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c | 2 +-
+ drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c | 2 +-
+ drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | 4 +++-
+ 4 files changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+index 86944bc3b273..74bd260ca02a 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+@@ -666,7 +666,7 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data)
+
+ static int hns_gmac_get_sset_count(int stringset)
+ {
+- if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
++ if (stringset == ETH_SS_STATS)
+ return ARRAY_SIZE(g_gmac_stats_string);
+
+ return 0;
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+index b62816c1574e..93e71e27401b 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+@@ -422,7 +422,7 @@ void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb)
+
+ int hns_ppe_get_sset_count(int stringset)
+ {
+- if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
++ if (stringset == ETH_SS_STATS)
+ return ETH_PPE_STATIC_NUM;
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+index 6f3570cfb501..e2e28532e4dc 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+@@ -876,7 +876,7 @@ void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
+ */
+ int hns_rcb_get_ring_sset_count(int stringset)
+ {
+- if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
++ if (stringset == ETH_SS_STATS)
+ return HNS_RING_STATIC_REG_NUM;
+
+ return 0;
+diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+index 7ea7f8a4aa2a..2e14a3ae1d8b 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+@@ -993,8 +993,10 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
+ cnt--;
+
+ return cnt;
+- } else {
++ } else if (stringset == ETH_SS_STATS) {
+ return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset));
++ } else {
++ return -EOPNOTSUPP;
+ }
+ }
+
+--
+2.16.2
+
diff --git a/patches.drivers/watchdog-sbsa-use-32-bit-read-for-WCV.patch b/patches.drivers/watchdog-sbsa-use-32-bit-read-for-WCV.patch
new file mode 100644
index 0000000000..12dd4129e3
--- /dev/null
+++ b/patches.drivers/watchdog-sbsa-use-32-bit-read-for-WCV.patch
@@ -0,0 +1,53 @@
+From: Jayachandran C <jnair@caviumnetworks.com>
+Date: Wed, 28 Feb 2018 02:52:20 -0800
+Subject: watchdog: sbsa: use 32-bit read for WCV
+Git-commit: 93ac3deb7c220cbcec032a967220a1f109d58431
+Patch-mainline: v4.16-rc5
+References: bsc#1085679
+
+According to SBSA spec v3.1 section 5.3:
+ All registers are 32 bits in size and should be accessed using
+ 32-bit reads and writes. If an access size other than 32 bits
+ is used then the results are IMPLEMENTATION DEFINED.
+ [...]
+ The Generic Watchdog is little-endian
+
+The current code uses readq to read the watchdog compare register
+which does a 64-bit access. This fails on ThunderX2 which does not
+implement 64-bit access to this register.
+
+Fix this by using lo_hi_readq() that does two 32-bit reads.
+
+Signed-off-by: Jayachandran C <jnair@caviumnetworks.com>
+Reviewed-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Wim Van Sebroeck <wim@iguana.be>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/watchdog/sbsa_gwdt.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
+index 316c2eb122d2..e8bd9887c566 100644
+--- a/drivers/watchdog/sbsa_gwdt.c
++++ b/drivers/watchdog/sbsa_gwdt.c
+@@ -50,6 +50,7 @@
+ */
+
+ #include <linux/io.h>
++#include <linux/io-64-nonatomic-lo-hi.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+ #include <linux/moduleparam.h>
+@@ -159,7 +160,7 @@ static unsigned int sbsa_gwdt_get_timeleft(struct watchdog_device *wdd)
+ !(readl(gwdt->control_base + SBSA_GWDT_WCS) & SBSA_GWDT_WCS_WS0))
+ timeleft += readl(gwdt->control_base + SBSA_GWDT_WOR);
+
+- timeleft += readq(gwdt->control_base + SBSA_GWDT_WCV) -
++ timeleft += lo_hi_readq(gwdt->control_base + SBSA_GWDT_WCV) -
+ arch_counter_get_cntvct();
+
+ do_div(timeleft, gwdt->clk);
+--
+2.16.2
+
diff --git a/series.conf b/series.conf
index 842f6d91ca..cc977ba5e8 100644
--- a/series.conf
+++ b/series.conf
@@ -14910,6 +14910,9 @@
patches.fixes/netfilter-ebtables-fix-erroneous-reject-of-last-rule.patch
patches.fixes/Revert-e1000e-Separate-signaling-for-link-check-link.patch
patches.fixes/e1000e-Fix-link-check-race-condition.patch
+ patches.drivers/net-hns-Fix-ethtool-private-flags.patch
+
+ # davem/net-next
patches.drivers/ibmvnic-Keep-track-of-supplementary-TX-descriptors.patch
patches.drivers/ibmvnic-Rename-active-queue-count-variables.patch
patches.drivers/ibmvnic-Move-active-sub-crq-count-settings.patch
@@ -21135,6 +21138,7 @@
patches.fixes/PCI-MSI-Ignore-affinity-if-pre-post-vector-count-is-.patch
patches.fixes/PCI-msi-fix-the-pci_alloc_irq_vectors_affinity-stub.patch
patches.drivers/0001-mmc-mmc-correct-the-logic-for-setting-HS400ES-signal.patch
+ patches.drivers/watchdog-sbsa-use-32-bit-read-for-WCV.patch
# bsc#1035479 Missing CN88 patches
patches.drivers/0001-iommu-arm-smmu-Print-message-when-Cavium-erratum-277.patch
@@ -22197,6 +22201,29 @@
patches.kabi/kabi-protect-struct-ethtool_link_settings.patch
+ # arm64 SMCCC v1.1
+ patches.arch/0001-arm64-KVM-Increment-PC-after-handling-an-SMC-trap.patch
+ patches.arch/0002-arm-arm64-KVM-Consolidate-the-PSCI-include-files.patch
+ patches.arch/0003-arm-arm64-KVM-Add-PSCI_VERSION-helper.patch
+ patches.arch/0004-arm-arm64-KVM-Add-smccc-accessors-to-PSCI-code.patch
+ patches.arch/0005-arm-arm64-KVM-Implement-PSCI-1.0-support.patch
+ patches.arch/0006-arm-arm64-KVM-Advertise-SMCCC-v1.1.patch
+ patches.arch/0007-arm-arm64-KVM-Turn-kvm_psci_version-into-a-static-in.patch
+ patches.arch/0008-arm64-KVM-Report-SMCCC_ARCH_WORKAROUND_1-BP-hardenin.patch
+ patches.arch/0009-arm64-KVM-Add-SMCCC_ARCH_WORKAROUND_1-fast-handling.patch
+ patches.arch/0010-firmware-psci-Expose-PSCI-conduit.patch
+ patches.arch/0011-firmware-psci-Expose-SMCCC-version-through-psci_ops.patch
+ patches.arch/0012-arm-arm64-smccc-Make-function-identifiers-an-unsigne.patch
+ patches.arch/0013-arm-arm64-smccc-Implement-SMCCC-v1.1-inline-primitiv.patch
+ patches.arch/0014-arm64-Add-ARM_SMCCC_ARCH_WORKAROUND_1-BP-hardening-s.patch
+ patches.arch/0015-arm64-Kill-PSCI_GET_VERSION-as-a-variant-2-workaroun.patch
+ patches.arch/arm64-fix-smccc-compilation.patch
+
+ patches.arch/arm64-capabilities-Handle-duplicate-entries-for-a-ca.patch
+ patches.arch/arm64-cpufeature-__this_cpu_has_cap-shouldn-t-stop-e.patch
+ patches.arch/arm64-Relax-ARM_SMCCC_ARCH_WORKAROUND_1-discovery.patch
+ patches.arch/arm64-mm-fix-thinko-in-non-global-page-table-attribu.patch
+
########################################################
# kGraft
########################################################