Home Home > GIT Browse > SLE15-AZURE
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2018-08-14 11:18:29 +0200
committerTakashi Iwai <tiwai@suse.de>2018-08-14 11:18:29 +0200
commit4e090cccf03b8bc44df284541592fb087136f4ac (patch)
tree977800c0e0da5babbf0ab0b428805626727914fb
parenta015d477cc4ba5c84e5ae6055a3ec370c49ca548 (diff)
parent6bff971011eb57652b9cf5be604e2caee82607c5 (diff)
Merge branch 'SLE12-SP3_EMBARGO' into openSUSE-42.3_EMBARGOrpm-4.4.143-65
-rw-r--r--config/x86_64/debug1
-rw-r--r--config/x86_64/default1
-rw-r--r--kabi/severities1
-rw-r--r--patches.arch/00-x86-cpufeatures-Add-detection-of-L1D-cache-flush-sup.patch48
-rw-r--r--patches.arch/0001-x86-KVM-VMX-Initialize-the-vmx_l1d_flush_pages-conte.patch81
-rw-r--r--patches.arch/0001-x86-litf-Introduce-vmx-status-variable.patch175
-rw-r--r--patches.arch/0002-x86-kvm-Drop-L1TF-MSR-list-approach.patch113
-rw-r--r--patches.arch/0003-x86-l1tf-Handle-EPT-disabled-state-proper.patch131
-rw-r--r--patches.arch/0004-x86-kvm-Move-l1tf-setup-function.patch208
-rw-r--r--patches.arch/0005-x86-kvm-Add-static-key-for-flush-always.patch68
-rw-r--r--patches.arch/0006-x86-kvm-Serialize-L1D-flush-parameter-setter.patch54
-rw-r--r--patches.arch/0007-x86-kvm-Allow-runtime-control-of-L1D-flush.patch78
-rw-r--r--patches.arch/0008-cpu-hotplug-Expose-SMT-control-init-function.patch71
-rw-r--r--patches.arch/0009-cpu-hotplug-Set-CPU_SMT_NOT_SUPPORTED-early.patch89
-rw-r--r--patches.arch/0010-x86-bugs-kvm-Introduce-boot-time-control-of-L1TF-mit.patch370
-rw-r--r--patches.arch/0011-Documentation-Add-section-about-CPU-vulnerabilities.patch622
-rw-r--r--patches.arch/0012-cpu-hotplug-detect-SMT-disabled-by-BIOS.patch49
-rw-r--r--patches.arch/01-sched-smt-update-sched_smt_present-at-runtime.patch93
-rw-r--r--patches.arch/01-x86-KVM-Warn-user-if-KVM-is-loaded-SMT-and-L1TF-CPU-.patch102
-rw-r--r--patches.arch/02-x86-KVM-VMX-Add-module-argument-for-L1TF-mitigation.patch132
-rw-r--r--patches.arch/02-x86-smp-provide-topology_is_primary_thread.patch126
-rw-r--r--patches.arch/03-x86-KVM-VMX-Add-L1D-flush-algorithm.patch137
-rw-r--r--patches.arch/03-x86-topology-provide-topology_smt_supported.patch49
-rw-r--r--patches.arch/04-cpu-hotplug-split-do_cpu_down.patch60
-rw-r--r--patches.arch/04-x86-KVM-VMX-Add-L1D-MSR-based-flush.patch84
-rw-r--r--patches.arch/04.1-cpu-hotplug-add-sysfs-state-interface.patch159
-rw-r--r--patches.arch/04.2-x86-topology-add-topology_max_smt_threads.patch125
-rw-r--r--patches.arch/04.3-x86-smpboot-do-not-use-smp_num_siblings-in-_max_logical_packages-calculation.patch50
-rw-r--r--patches.arch/05-cpu-hotplug-provide-knobs-to-control-smt.patch369
-rw-r--r--patches.arch/05-x86-KVM-VMX-Add-L1D-flush-logic.patch151
-rw-r--r--patches.arch/06-x86-KVM-VMX-Split-the-VMX-MSR-LOAD-structures-to-hav.patch123
-rw-r--r--patches.arch/06-x86-cpu-remove-the-pointless-cpu-printout.patch112
-rw-r--r--patches.arch/07-x86-KVM-VMX-Add-find_msr-helper-function.patch85
-rw-r--r--patches.arch/07-x86-cpu-amd-remove-the-pointless-detect_ht-call.patch42
-rw-r--r--patches.arch/08-x86-KVM-VMX-Separate-the-VMX-AUTOLOAD-guest-host-num.patch84
-rw-r--r--patches.arch/08-x86-cpu-common-provide-detect_ht_early.patch90
-rw-r--r--patches.arch/09-x86-KVM-VMX-Extend-add_atomic_switch_msr-to-allow-VM.patch92
-rw-r--r--patches.arch/09-x86-cpu-topology-provide-detect_extended_topology_early.patch129
-rw-r--r--patches.arch/10-x86-KVM-VMX-Use-MSR-save-list-for-IA32_FLUSH_CMD-if-.patch93
-rw-r--r--patches.arch/10-x86-cpu-intel-evaluate-smp_num_siblings-early.patch46
-rw-r--r--patches.arch/11-x86-cpu-amd-do-not-check-cpuid-max-ext-level-before-parsing-smp-info.patch35
-rw-r--r--patches.arch/12-x86-cpu-amd-evaluate-smp_num_siblings-early.patch47
-rw-r--r--patches.arch/14-x86-cpu-amd-move-topoext-reenablement-before-reading-smp_num_siblings.patch94
-rw-r--r--patches.arch/15-cpu-hotplug-boot-HT-siblings-at-least-once.patch204
-rw-r--r--patches.arch/16-cpu-hotplug-Online-siblings-when-SMT-control-is-turn.patch70
-rw-r--r--patches.arch/x86-l1tf-01-increase-32bitPAE-__PHYSICAL_PAGE_MASK.patch72
-rw-r--r--patches.arch/x86-l1tf-02-change-order-of-offset-type.patch89
-rw-r--r--patches.arch/x86-l1tf-03-protect-swap-entries.patch78
-rw-r--r--patches.arch/x86-l1tf-04-protect-PROT_NONE-ptes.patch244
-rw-r--r--patches.arch/x86-l1tf-05-make-sure-first-pages-is-reserved.patch46
-rw-r--r--patches.arch/x86-l1tf-06-add-sysfs-report.patch235
-rw-r--r--patches.arch/x86-l1tf-07-limit-swap-file-size.patch140
-rw-r--r--patches.arch/x86-l1tf-08-disallow-non-privileged-high-MMIO-PROT_NONE.patch280
-rw-r--r--patches.arch/x86-mm-Simplify-p-g4um-d_page-macros.patch99
-rw-r--r--patches.fixes/inet-frag-enforce-memory-limits-earlier.patch58
-rw-r--r--patches.fixes/ip-discard-ipv4-datagrams-with-overlapping-segments.patch144
-rw-r--r--patches.fixes/ipv6-defrag-drop-non-last-frags-smaller-than-min-mtu.patch55
-rw-r--r--patches.kabi/ip-drop-IPSTATS_MIB_REASM_OVERLAPS.patch51
-rw-r--r--series.conf67
59 files changed, 6797 insertions, 4 deletions
diff --git a/config/x86_64/debug b/config/x86_64/debug
index 01b1c3679a..b91734727b 100644
--- a/config/x86_64/debug
+++ b/config/x86_64/debug
@@ -251,6 +251,7 @@ CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
CONFIG_CRASH_CORE=y
CONFIG_KEXEC_CORE=y
+CONFIG_HOTPLUG_SMT=y
CONFIG_OPROFILE=m
# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set
CONFIG_HAVE_OPROFILE=y
diff --git a/config/x86_64/default b/config/x86_64/default
index a464fc1ae1..8d4bbd1a90 100644
--- a/config/x86_64/default
+++ b/config/x86_64/default
@@ -251,6 +251,7 @@ CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
CONFIG_CRASH_CORE=y
CONFIG_KEXEC_CORE=y
+CONFIG_HOTPLUG_SMT=y
CONFIG_OPROFILE=m
# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set
CONFIG_HAVE_OPROFILE=y
diff --git a/kabi/severities b/kabi/severities
index a24b46cd9c..7a7a79c96d 100644
--- a/kabi/severities
+++ b/kabi/severities
@@ -8,7 +8,6 @@
# symbol is exported from. The special pattern "vmlinux" matches built-in
# symbols. All other patterns match against symbol names.
-kvm_x86_ops FAIL
arch/x86/kvm/* PASS
arch/powerpc/kvm/* PASS
kvmppc_do_h_enter PASS
diff --git a/patches.arch/00-x86-cpufeatures-Add-detection-of-L1D-cache-flush-sup.patch b/patches.arch/00-x86-cpufeatures-Add-detection-of-L1D-cache-flush-sup.patch
new file mode 100644
index 0000000000..d47afde476
--- /dev/null
+++ b/patches.arch/00-x86-cpufeatures-Add-detection-of-L1D-cache-flush-sup.patch
@@ -0,0 +1,48 @@
+From 182a68debec3df71553dea079bad650361372e88 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 20 Jun 2018 16:42:58 -0400
+Subject: [PATCH] x86/cpufeatures: Add detection of L1D cache flush support.
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit 11e34e64e4103955fc4568750914c75d65ea87ee upstream
+
+336996-Speculative-Execution-Side-Channel-Mitigations.pdf defines a new MSR
+(IA32_FLUSH_CMD) which is detected by CPUID.7.EDX[28]=1 bit being set.
+
+This new MSR "gives software a way to invalidate structures with finer
+granularity than other architectual methods like WBINVD."
+
+A copy of this document is available at
+ https://bugzilla.kernel.org/show_bug.cgi?id=199511
+
+[jkosina@suse.cz: do a scattered bit initialization]
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/kernel/cpu/scattered.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -78,6 +78,7 @@
+ */
+ #define X86_FEATURE_ZEN ( 2*32+ 4) /* "" CPU is AMD family 0x17 (Zen) */
+ #define X86_FEATURE_L1TF_FIX ( 2*32+5) /* "" L1TF workaround used */
++#define X86_FEATURE_FLUSH_L1D ( 2*32+6) /* Flush L1D cache */
+
+ /* Other features, Linux-defined mapping, word 3 */
+ /* This range is used for feature bits which conflict or are synthesized */
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -52,6 +52,7 @@ static const struct cpuid_bit cpuid_bits
+ { X86_FEATURE_PAUSEFILTER, CPUID_EDX,10, 0x8000000a, 0 },
+ { X86_FEATURE_PFTHRESHOLD, CPUID_EDX,12, 0x8000000a, 0 },
+ { X86_FEATURE_AVIC, CPUID_EDX,13, 0x8000000a, 0 },
++ { X86_FEATURE_FLUSH_L1D, CPUID_EDX,28, 0x00000007, 0 },
+ { 0, 0, 0, 0, 0 }
+ };
+
diff --git a/patches.arch/0001-x86-KVM-VMX-Initialize-the-vmx_l1d_flush_pages-conte.patch b/patches.arch/0001-x86-KVM-VMX-Initialize-the-vmx_l1d_flush_pages-conte.patch
new file mode 100644
index 0000000000..41cf138629
--- /dev/null
+++ b/patches.arch/0001-x86-KVM-VMX-Initialize-the-vmx_l1d_flush_pages-conte.patch
@@ -0,0 +1,81 @@
+From 288d152c23dcf3c09da46c5c481903ca10ebfef7 Mon Sep 17 00:00:00 2001
+From: Nicolai Stange <nstange@suse.de>
+Date: Wed, 18 Jul 2018 19:07:38 +0200
+Subject: [PATCH] x86/KVM/VMX: Initialize the vmx_l1d_flush_pages' content
+Patch-mainline: not yet, under development
+References: bsc#1089343 CVE-2018-3646
+
+The slow path in vmx_l1d_flush() reads from vmx_l1d_flush_pages in order
+to evict the L1d cache.
+
+However, these pages are never cleared and, in theory, their data could be
+leaked.
+
+More importantly, KSM could merge a nested hypervisor's vmx_l1d_flush_pages
+to fewer than 1 << L1D_CACHE_ORDER host physical pages and this would break
+the L1d flushing algorithm: L1D on x86_64 is tagged by physical addresses.
+
+Fix this by initializing the individual vmx_l1d_flush_pages with a
+different pattern each.
+
+Rename the "empty_zp" asm constraint identifier in vmx_l1d_flush() to
+"flush_pages" to reflect this change.
+
+Fixes: a47dd5f06714 ("x86/KVM/VMX: Add L1D flush algorithm")
+Signed-off-by: Nicolai Stange <nstange@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/kvm/vmx.c | 17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -196,6 +196,7 @@ static void *vmx_l1d_flush_pages;
+ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
+ {
+ struct page *page;
++ unsigned int i;
+
+ if (!enable_ept) {
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
+@@ -228,6 +229,16 @@ static int vmx_setup_l1d_flush(enum vmx_
+ if (!page)
+ return -ENOMEM;
+ vmx_l1d_flush_pages = page_address(page);
++
++ /*
++ * Initialize each page with a different pattern in
++ * order to protect against KSM in the nested
++ * virtualization case.
++ */
++ for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
++ memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
++ PAGE_SIZE);
++ }
+ }
+
+ l1tf_vmx_mitigation = l1tf;
+@@ -8398,7 +8409,7 @@ static void vmx_l1d_flush(struct kvm_vcp
+ /* First ensure the pages are in the TLB */
+ "xorl %%eax, %%eax\n"
+ ".Lpopulate_tlb:\n\t"
+- "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
++ "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
+ "addl $4096, %%eax\n\t"
+ "cmpl %%eax, %[size]\n\t"
+ "jne .Lpopulate_tlb\n\t"
+@@ -8407,12 +8418,12 @@ static void vmx_l1d_flush(struct kvm_vcp
+ /* Now fill the cache */
+ "xorl %%eax, %%eax\n"
+ ".Lfill_cache:\n"
+- "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
++ "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
+ "addl $64, %%eax\n\t"
+ "cmpl %%eax, %[size]\n\t"
+ "jne .Lfill_cache\n\t"
+ "lfence\n"
+- :: [empty_zp] "r" (vmx_l1d_flush_pages),
++ :: [flush_pages] "r" (vmx_l1d_flush_pages),
+ [size] "r" (size)
+ : "eax", "ebx", "ecx", "edx");
+ }
diff --git a/patches.arch/0001-x86-litf-Introduce-vmx-status-variable.patch b/patches.arch/0001-x86-litf-Introduce-vmx-status-variable.patch
new file mode 100644
index 0000000000..2791fcc530
--- /dev/null
+++ b/patches.arch/0001-x86-litf-Introduce-vmx-status-variable.patch
@@ -0,0 +1,175 @@
+From 1018fc06496c6c99b41aceb65e7638313b2e0d0f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:16 +0200
+Subject: [PATCH 01/11] x86/litf: Introduce vmx status variable
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit 72c6d2db64fa18c996ece8f06e499509e6c9a37e upstream
+
+Store the effective mitigation of VMX in a status variable and use it to
+report the VMX state in the l1tf sysfs file.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.433098358@linutronix.de
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/include/asm/vmx.h | 9 +++++++++
+ arch/x86/kernel/cpu/bugs.c | 34 +++++++++++++++++++++++++++++++++-
+ arch/x86/kvm/vmx.c | 22 +++++++++++-----------
+ 3 files changed, 53 insertions(+), 12 deletions(-)
+
+--- a/arch/x86/include/asm/vmx.h
++++ b/arch/x86/include/asm/vmx.h
+@@ -571,4 +571,13 @@ enum vm_instruction_error_number {
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
+ };
+
++enum vmx_l1d_flush_state {
++ VMENTER_L1D_FLUSH_AUTO,
++ VMENTER_L1D_FLUSH_NEVER,
++ VMENTER_L1D_FLUSH_COND,
++ VMENTER_L1D_FLUSH_ALWAYS,
++};
++
++extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
++
+ #endif
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -21,6 +21,7 @@
+ #include <asm/processor-flags.h>
+ #include <asm/fpu/internal.h>
+ #include <asm/msr.h>
++#include <asm/vmx.h>
+ #include <asm/paravirt.h>
+ #include <asm/alternative.h>
+ #include <asm/pgtable.h>
+@@ -211,6 +212,11 @@ static void x86_amd_ssb_disable(void)
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
+ }
+
++#if IS_ENABLED(CONFIG_KVM_INTEL)
++enum vmx_l1d_flush_state l1tf_vmx_mitigation __ro_after_init = VMENTER_L1D_FLUSH_AUTO;
++EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
++#endif
++
+ static void __init l1tf_select_mitigation(void)
+ {
+ u64 half_pa;
+@@ -705,6 +711,32 @@ void x86_spec_ctrl_setup_ap(void)
+
+ #ifdef CONFIG_SYSFS
+
++#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
++
++#if IS_ENABLED(CONFIG_KVM_INTEL)
++static const char *l1tf_vmx_states[] = {
++ [VMENTER_L1D_FLUSH_AUTO] = "auto",
++ [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
++ [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
++ [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
++};
++
++static ssize_t l1tf_show_state(char *buf)
++{
++ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
++ return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
++
++ return sprintf(buf, "%s; VMX: SMT %s, L1D %s\n", L1TF_DEFAULT_MSG,
++ cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled",
++ l1tf_vmx_states[l1tf_vmx_mitigation]);
++}
++#else
++static ssize_t l1tf_show_state(char *buf)
++{
++ return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
++}
++#endif
++
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+ char *buf, unsigned int bug)
+ {
+@@ -736,7 +768,7 @@ static ssize_t cpu_show_common(struct de
+
+ case X86_BUG_L1TF:
+ if (boot_cpu_has(X86_FEATURE_L1TF_FIX))
+- return sprintf(buf, "Mitigation: Page Table Inversion\n");
++ return l1tf_show_state(buf);
+ break;
+
+ default:
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -199,19 +199,13 @@ extern const ulong vmx_return;
+
+ static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
+
+-/* These MUST be in sync with vmentry_l1d_param order. */
+-enum vmx_l1d_flush_state {
+- VMENTER_L1D_FLUSH_NEVER,
+- VMENTER_L1D_FLUSH_COND,
+- VMENTER_L1D_FLUSH_ALWAYS,
+-};
+-
+ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush = VMENTER_L1D_FLUSH_COND;
+
+ static const struct {
+ const char *option;
+ enum vmx_l1d_flush_state cmd;
+ } vmentry_l1d_param[] = {
++ {"auto", VMENTER_L1D_FLUSH_AUTO},
+ {"never", VMENTER_L1D_FLUSH_NEVER},
+ {"cond", VMENTER_L1D_FLUSH_COND},
+ {"always", VMENTER_L1D_FLUSH_ALWAYS},
+@@ -12578,8 +12572,12 @@ static int __init vmx_setup_l1d_flush(vo
+ {
+ struct page *page;
+
++ if (!boot_cpu_has_bug(X86_BUG_L1TF))
++ return 0;
++
++ l1tf_vmx_mitigation = vmentry_l1d_flush;
++
+ if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
+- !boot_cpu_has_bug(X86_BUG_L1TF) ||
+ vmx_l1d_use_msr_save_list())
+ return 0;
+
+@@ -12594,12 +12592,14 @@ static int __init vmx_setup_l1d_flush(vo
+ return 0;
+ }
+
+-static void vmx_free_l1d_flush_pages(void)
++static void vmx_cleanup_l1d_flush(void)
+ {
+ if (vmx_l1d_flush_pages) {
+ free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
+ vmx_l1d_flush_pages = NULL;
+ }
++ /* Restore state so sysfs ignores VMX */
++ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+ }
+
+ static int __init vmx_init(void)
+@@ -12613,7 +12613,7 @@ static int __init vmx_init(void)
+ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+ __alignof__(struct vcpu_vmx), THIS_MODULE);
+ if (r) {
+- vmx_free_l1d_flush_pages();
++ vmx_cleanup_l1d_flush();
+ return r;
+ }
+
+@@ -12634,7 +12634,7 @@ static void __exit vmx_exit(void)
+
+ kvm_exit();
+
+- vmx_free_l1d_flush_pages();
++ vmx_cleanup_l1d_flush();
+ }
+
+ module_init(vmx_init)
diff --git a/patches.arch/0002-x86-kvm-Drop-L1TF-MSR-list-approach.patch b/patches.arch/0002-x86-kvm-Drop-L1TF-MSR-list-approach.patch
new file mode 100644
index 0000000000..6bf74eea54
--- /dev/null
+++ b/patches.arch/0002-x86-kvm-Drop-L1TF-MSR-list-approach.patch
@@ -0,0 +1,113 @@
+From 7ec66a2a22a6fb2e6d1359c23ef223e41ce85517 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:17 +0200
+Subject: [PATCH 02/11] x86/kvm: Drop L1TF MSR list approach
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit 2f055947ae5e2741fb2dc5bba1033c417ccf4faa upstream
+
+The VMX module parameter to control the L1D flush should become
+writeable.
+
+The MSR list is set up at VM init per guest VCPU, but the run time
+switching is based on a static key which is global. Toggling the MSR list
+at run time might be feasible, but for now drop this optimization and use
+the regular MSR write to make run-time switching possible.
+
+The default mitigation is the conditional flush anyway, so for extra
+paranoid setups this will add some small overhead, but the extra code
+executed is in the noise compared to the flush itself.
+
+Aside of that the EPT disabled case is not handled correctly at the moment
+and the MSR list magic is in the way for fixing that as well.
+
+If it's really providing a significant advantage, then this needs to be
+revisited after the code is correct and the control is writable.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.516940445@linutronix.de
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 43 +++++++------------------------------------
+ 1 file changed, 7 insertions(+), 36 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -4892,16 +4892,6 @@ static void ept_set_mmio_spte_mask(void)
+ kvm_mmu_set_mmio_spte_mask((0x3ull << 62) | 0x6ull);
+ }
+
+-static bool vmx_l1d_use_msr_save_list(void)
+-{
+- if (!enable_ept || !boot_cpu_has_bug(X86_BUG_L1TF) ||
+- static_cpu_has(X86_FEATURE_HYPERVISOR) ||
+- !static_cpu_has(X86_FEATURE_FLUSH_L1D))
+- return false;
+-
+- return vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
+-}
+-
+ #define VMX_XSS_EXIT_BITMAP 0
+ /*
+ * Sets up the vmcs for emulated real mode.
+@@ -5247,12 +5237,6 @@ static void vmx_set_nmi_mask(struct kvm_
+ vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
+ }
+- /*
+- * If flushing the L1D cache on every VMENTER is enforced and the
+- * MSR is available, use the MSR save list.
+- */
+- if (vmx_l1d_use_msr_save_list())
+- add_atomic_switch_msr(vmx, MSR_IA32_FLUSH_CMD, L1D_FLUSH, 0, true);
+ }
+
+ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
+@@ -8314,26 +8298,14 @@ static void vmx_l1d_flush(struct kvm_vcp
+ bool always;
+
+ /*
+- * This code is only executed when:
+- * - the flush mode is 'cond'
+- * - the flush mode is 'always' and the flush MSR is not
+- * available
+- *
+- * If the CPU has the flush MSR then clear the flush bit because
+- * 'always' mode is handled via the MSR save list.
+- *
+- * If the MSR is not avaibable then act depending on the mitigation
+- * mode: If 'flush always', keep the flush bit set, otherwise clear
+- * it.
++ * This code is only executed when the the flush mode is 'cond' or
++ * 'always'
+ *
+- * The flush bit gets set again either from vcpu_run() or from one
+- * of the unsafe VMEXIT handlers.
++ * If 'flush always', keep the flush bit set, otherwise clear
++ * it. The flush bit gets set again either from vcpu_run() or from
++ * one of the unsafe VMEXIT handlers.
+ */
+- if (static_cpu_has(X86_FEATURE_FLUSH_L1D))
+- always = false;
+- else
+- always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
+-
++ always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
+ vcpu->arch.l1tf_flush_l1d = always;
+
+ vcpu->stat.l1d_flush++;
+@@ -11214,8 +11186,7 @@ static int __init vmx_setup_l1d_flush(vo
+
+ l1tf_vmx_mitigation = vmentry_l1d_flush;
+
+- if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
+- vmx_l1d_use_msr_save_list())
++ if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER)
+ return 0;
+
+ if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
diff --git a/patches.arch/0003-x86-l1tf-Handle-EPT-disabled-state-proper.patch b/patches.arch/0003-x86-l1tf-Handle-EPT-disabled-state-proper.patch
new file mode 100644
index 0000000000..ac3201cf00
--- /dev/null
+++ b/patches.arch/0003-x86-l1tf-Handle-EPT-disabled-state-proper.patch
@@ -0,0 +1,131 @@
+From e7df88f07e31908829a68d29c6da50250e033ffd Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:18 +0200
+Subject: [PATCH 03/11] x86/l1tf: Handle EPT disabled state proper
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit a7b9020b06ec6d7c3f3b0d4ef1a9eba12654f4f7 upstream
+
+If Extended Page Tables (EPT) are disabled or not supported, no L1D
+flushing is required. The setup function can just avoid setting up the L1D
+flush for the EPT=n case.
+
+Invoke it after the hardware setup has be done and enable_ept has the
+correct state and expose the EPT disabled state in the mitigation status as
+well.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.612160168@linutronix.de
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/include/asm/vmx.h | 1 +
+ arch/x86/kernel/cpu/bugs.c | 9 +++++----
+ arch/x86/kvm/vmx.c | 44 ++++++++++++++++++++++++++------------------
+ 3 files changed, 32 insertions(+), 22 deletions(-)
+
+--- a/arch/x86/include/asm/vmx.h
++++ b/arch/x86/include/asm/vmx.h
+@@ -505,6 +505,7 @@ enum vmx_l1d_flush_state {
+ VMENTER_L1D_FLUSH_NEVER,
+ VMENTER_L1D_FLUSH_COND,
+ VMENTER_L1D_FLUSH_ALWAYS,
++ VMENTER_L1D_FLUSH_EPT_DISABLED,
+ };
+
+ extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -702,10 +702,11 @@ void x86_spec_ctrl_setup_ap(void)
+
+ #if IS_ENABLED(CONFIG_KVM_INTEL)
+ static const char *l1tf_vmx_states[] = {
+- [VMENTER_L1D_FLUSH_AUTO] = "auto",
+- [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
+- [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
+- [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
++ [VMENTER_L1D_FLUSH_AUTO] = "auto",
++ [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
++ [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
++ [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
++ [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
+ };
+
+ static ssize_t l1tf_show_state(char *buf)
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -11184,6 +11184,11 @@ static int __init vmx_setup_l1d_flush(vo
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
+ return 0;
+
++ if (!enable_ept) {
++ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
++ return 0;
++ }
++
+ l1tf_vmx_mitigation = vmentry_l1d_flush;
+
+ if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER)
+@@ -11210,18 +11215,35 @@ static void vmx_cleanup_l1d_flush(void)
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+ }
+
++
++static void __exit vmx_exit(void)
++{
++#ifdef CONFIG_KEXEC_CORE
++ RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
++ synchronize_rcu();
++#endif
++
++ kvm_exit();
++
++ vmx_cleanup_l1d_flush();
++}
++module_exit(vmx_exit)
++
+ static int __init vmx_init(void)
+ {
+ int r;
+
+- r = vmx_setup_l1d_flush();
++ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
++ __alignof__(struct vcpu_vmx), THIS_MODULE);
+ if (r)
+ return r;
+
+- r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+- __alignof__(struct vcpu_vmx), THIS_MODULE);
++ /*
++ * Must be called after kvm_init() so enable_ept is properly set up
++ */
++ r = vmx_setup_l1d_flush();
+ if (r) {
+- vmx_cleanup_l1d_flush();
++ vmx_exit();
+ return r;
+ }
+
+@@ -11232,18 +11254,4 @@ static int __init vmx_init(void)
+
+ return 0;
+ }
+-
+-static void __exit vmx_exit(void)
+-{
+-#ifdef CONFIG_KEXEC_CORE
+- RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
+- synchronize_rcu();
+-#endif
+-
+- kvm_exit();
+-
+- vmx_cleanup_l1d_flush();
+-}
+-
+ module_init(vmx_init)
+-module_exit(vmx_exit)
diff --git a/patches.arch/0004-x86-kvm-Move-l1tf-setup-function.patch b/patches.arch/0004-x86-kvm-Move-l1tf-setup-function.patch
new file mode 100644
index 0000000000..ee5e660c8d
--- /dev/null
+++ b/patches.arch/0004-x86-kvm-Move-l1tf-setup-function.patch
@@ -0,0 +1,208 @@
+From 97b75b4eb58eeeab1422d73a6f275d6e7fe0bd34 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:19 +0200
+Subject: [PATCH 04/11] x86/kvm: Move l1tf setup function
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit 7db92e165ac814487264632ab2624e832f20ae38 upstream
+
+In preparation of allowing run time control for L1D flushing, move the
+setup code to the module parameter handler.
+
+In case of pre module init parsing, just store the value and let vmx_init()
+do the actual setup after running kvm_init() so that enable_ept is having
+the correct state.
+
+During run-time invoke it directly from the parameter setter to prepare for
+run-time control.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.694063239@linutronix.de
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 125 +++++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 78 insertions(+), 47 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -178,7 +178,8 @@ extern const ulong vmx_return;
+
+ static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
+
+-static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush = VMENTER_L1D_FLUSH_COND;
++/* Storage for pre module init parameter parsing */
++static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
+
+ static const struct {
+ const char *option;
+@@ -190,33 +191,85 @@ static const struct {
+ {"always", VMENTER_L1D_FLUSH_ALWAYS},
+ };
+
+-static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
++#define L1D_CACHE_ORDER 4
++static void *vmx_l1d_flush_pages;
++
++static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
+ {
+- unsigned int i;
++ struct page *page;
+
+- if (!s)
+- return -EINVAL;
++ /* If set to 'auto' select 'cond' */
++ if (l1tf == VMENTER_L1D_FLUSH_AUTO)
++ l1tf = VMENTER_L1D_FLUSH_COND;
+
+- for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
+- if (!strcmp(s, vmentry_l1d_param[i].option)) {
+- vmentry_l1d_flush = vmentry_l1d_param[i].cmd;
+- return 0;
+- }
++ if (!enable_ept) {
++ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
++ return 0;
+ }
+
++ if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
++ !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
++ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
++ if (!page)
++ return -ENOMEM;
++ vmx_l1d_flush_pages = page_address(page);
++ }
++
++ l1tf_vmx_mitigation = l1tf;
++
++ if (l1tf != VMENTER_L1D_FLUSH_NEVER)
++ static_branch_enable(&vmx_l1d_should_flush);
++ return 0;
++}
++
++static int vmentry_l1d_flush_parse(const char *s)
++{
++ unsigned int i;
++
++ if (s) {
++ for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
++ if (!strcmp(s, vmentry_l1d_param[i].option))
++ return vmentry_l1d_param[i].cmd;
++ }
++ }
+ return -EINVAL;
+ }
+
++static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
++{
++ int l1tf;
++
++ if (!boot_cpu_has(X86_BUG_L1TF))
++ return 0;
++
++ l1tf = vmentry_l1d_flush_parse(s);
++ if (l1tf < 0)
++ return l1tf;
++
++ /*
++ * Has vmx_init() run already? If not then this is the pre init
++ * parameter parsing. In that case just store the value and let
++ * vmx_init() do the proper setup after enable_ept has been
++ * established.
++ */
++ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
++ vmentry_l1d_flush_param = l1tf;
++ return 0;
++ }
++
++ return vmx_setup_l1d_flush(l1tf);
++}
++
+ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
+ {
+- return sprintf(s, "%s\n", vmentry_l1d_param[vmentry_l1d_flush].option);
++ return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
+ }
+
+ static const struct kernel_param_ops vmentry_l1d_flush_ops = {
+ .set = vmentry_l1d_flush_set,
+ .get = vmentry_l1d_flush_get,
+ };
+-module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, &vmentry_l1d_flush, S_IRUGO);
++module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, S_IRUGO);
+
+ #define NR_AUTOLOAD_MSRS 8
+ #define VMCS02_POOL_SIZE 1
+@@ -8305,7 +8358,7 @@ static void vmx_l1d_flush(struct kvm_vcp
+ * it. The flush bit gets set again either from vcpu_run() or from
+ * one of the unsafe VMEXIT handlers.
+ */
+- always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
++ always = l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_ALWAYS;
+ vcpu->arch.l1tf_flush_l1d = always;
+
+ vcpu->stat.l1d_flush++;
+@@ -11177,34 +11230,6 @@ static struct kvm_x86_ops vmx_x86_ops =
+ .setup_mce = vmx_setup_mce,
+ };
+
+-static int __init vmx_setup_l1d_flush(void)
+-{
+- struct page *page;
+-
+- if (!boot_cpu_has_bug(X86_BUG_L1TF))
+- return 0;
+-
+- if (!enable_ept) {
+- l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
+- return 0;
+- }
+-
+- l1tf_vmx_mitigation = vmentry_l1d_flush;
+-
+- if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER)
+- return 0;
+-
+- if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+- page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
+- if (!page)
+- return -ENOMEM;
+- vmx_l1d_flush_pages = page_address(page);
+- }
+-
+- static_branch_enable(&vmx_l1d_should_flush);
+- return 0;
+-}
+-
+ static void vmx_cleanup_l1d_flush(void)
+ {
+ if (vmx_l1d_flush_pages) {
+@@ -11239,12 +11264,18 @@ static int __init vmx_init(void)
+ return r;
+
+ /*
+- * Must be called after kvm_init() so enable_ept is properly set up
+- */
+- r = vmx_setup_l1d_flush();
+- if (r) {
+- vmx_exit();
+- return r;
++ * Must be called after kvm_init() so enable_ept is properly set
++ * up. Hand the parameter mitigation value in which was stored in
++ * the pre module init parser. If no parameter was given, it will
++ * contain 'auto' which will be turned into the default 'cond'
++ * mitigation mode.
++ */
++ if (boot_cpu_has(X86_BUG_L1TF)) {
++ r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
++ if (r) {
++ vmx_exit();
++ return r;
++ }
+ }
+
+ #ifdef CONFIG_KEXEC_CORE
diff --git a/patches.arch/0005-x86-kvm-Add-static-key-for-flush-always.patch b/patches.arch/0005-x86-kvm-Add-static-key-for-flush-always.patch
new file mode 100644
index 0000000000..4720f491a4
--- /dev/null
+++ b/patches.arch/0005-x86-kvm-Add-static-key-for-flush-always.patch
@@ -0,0 +1,68 @@
+From 4869e95527728ca067e0111eca899023cd6b3652 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:20 +0200
+Subject: [PATCH 05/11] x86/kvm: Add static key for flush always
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit 4c6523ec59fe895ea352a650218a6be0653910b1 upstream
+
+Avoid the conditional in the L1D flush control path.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.790914912@linutronix.de
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 16 +++++++++++-----
+ 1 file changed, 11 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -177,6 +177,7 @@ module_param(ple_window_max, int, S_IRUG
+ extern const ulong vmx_return;
+
+ static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
++static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_always);
+
+ /* Storage for pre module init parameter parsing */
+ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
+@@ -217,8 +218,12 @@ static int vmx_setup_l1d_flush(enum vmx_
+
+ l1tf_vmx_mitigation = l1tf;
+
+- if (l1tf != VMENTER_L1D_FLUSH_NEVER)
+- static_branch_enable(&vmx_l1d_should_flush);
++ if (l1tf == VMENTER_L1D_FLUSH_NEVER)
++ return 0;
++
++ static_branch_enable(&vmx_l1d_should_flush);
++ if (l1tf == VMENTER_L1D_FLUSH_ALWAYS)
++ static_branch_enable(&vmx_l1d_flush_always);
+ return 0;
+ }
+
+@@ -8348,7 +8353,6 @@ static void *vmx_l1d_flush_pages;
+ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
+ {
+ int size = PAGE_SIZE << L1D_CACHE_ORDER;
+- bool always;
+
+ /*
+ * This code is only executed when the the flush mode is 'cond' or
+@@ -8358,8 +8362,10 @@ static void vmx_l1d_flush(struct kvm_vcp
+ * it. The flush bit gets set again either from vcpu_run() or from
+ * one of the unsafe VMEXIT handlers.
+ */
+- always = l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_ALWAYS;
+- vcpu->arch.l1tf_flush_l1d = always;
++ if (static_branch_unlikely(&vmx_l1d_flush_always))
++ vcpu->arch.l1tf_flush_l1d = true;
++ else
++ vcpu->arch.l1tf_flush_l1d = false;
+
+ vcpu->stat.l1d_flush++;
+
diff --git a/patches.arch/0006-x86-kvm-Serialize-L1D-flush-parameter-setter.patch b/patches.arch/0006-x86-kvm-Serialize-L1D-flush-parameter-setter.patch
new file mode 100644
index 0000000000..99048a87c4
--- /dev/null
+++ b/patches.arch/0006-x86-kvm-Serialize-L1D-flush-parameter-setter.patch
@@ -0,0 +1,54 @@
+From 7ac0b796e89e597baabffb9a26b173ca557df876 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:21 +0200
+Subject: [PATCH 06/11] x86/kvm: Serialize L1D flush parameter setter
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit dd4bfa739a72508b75760b393d129ed7b431daab upstream
+
+Writes to the parameter files are not serialized at the sysfs core
+level, so local serialization is required.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.873642605@linutronix.de
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -178,6 +178,7 @@ extern const ulong vmx_return;
+
+ static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
+ static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_always);
++static DEFINE_MUTEX(vmx_l1d_flush_mutex);
+
+ /* Storage for pre module init parameter parsing */
+ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
+@@ -242,7 +243,7 @@ static int vmentry_l1d_flush_parse(const
+
+ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
+ {
+- int l1tf;
++ int l1tf, ret;
+
+ if (!boot_cpu_has(X86_BUG_L1TF))
+ return 0;
+@@ -262,7 +263,10 @@ static int vmentry_l1d_flush_set(const c
+ return 0;
+ }
+
+- return vmx_setup_l1d_flush(l1tf);
++ mutex_lock(&vmx_l1d_flush_mutex);
++ ret = vmx_setup_l1d_flush(l1tf);
++ mutex_unlock(&vmx_l1d_flush_mutex);
++ return ret;
+ }
+
+ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
diff --git a/patches.arch/0007-x86-kvm-Allow-runtime-control-of-L1D-flush.patch b/patches.arch/0007-x86-kvm-Allow-runtime-control-of-L1D-flush.patch
new file mode 100644
index 0000000000..db7e406eed
--- /dev/null
+++ b/patches.arch/0007-x86-kvm-Allow-runtime-control-of-L1D-flush.patch
@@ -0,0 +1,78 @@
+From fa2ecdf1e0d193c282bd8bd81d9af6cbd6e9483e Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:22 +0200
+Subject: [PATCH 07/11] x86/kvm: Allow runtime control of L1D flush
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit 895ae47f9918833c3a880fbccd41e0692b37e7d9 upstream
+
+All mitigation modes can be switched at run time with a static key now:
+
+ - Use sysfs_streq() instead of strcmp() to handle the trailing new line
+ from sysfs writes correctly.
+ - Make the static key management handle multiple invocations properly.
+ - Set the module parameter file to RW
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.954525119@linutronix.de
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kernel/cpu/bugs.c | 2 +-
+ arch/x86/kvm/vmx.c | 13 ++++++++-----
+ 2 files changed, 9 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -215,7 +215,7 @@ static void x86_amd_ssb_disable(void)
+ }
+
+ #if IS_ENABLED(CONFIG_KVM_INTEL)
+-enum vmx_l1d_flush_state l1tf_vmx_mitigation __ro_after_init = VMENTER_L1D_FLUSH_AUTO;
++enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+ EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
+ #endif
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -219,12 +219,15 @@ static int vmx_setup_l1d_flush(enum vmx_
+
+ l1tf_vmx_mitigation = l1tf;
+
+- if (l1tf == VMENTER_L1D_FLUSH_NEVER)
+- return 0;
++ if (l1tf != VMENTER_L1D_FLUSH_NEVER)
++ static_branch_enable(&vmx_l1d_should_flush);
++ else
++ static_branch_disable(&vmx_l1d_should_flush);
+
+- static_branch_enable(&vmx_l1d_should_flush);
+ if (l1tf == VMENTER_L1D_FLUSH_ALWAYS)
+ static_branch_enable(&vmx_l1d_flush_always);
++ else
++ static_branch_disable(&vmx_l1d_flush_always);
+ return 0;
+ }
+
+@@ -234,7 +237,7 @@ static int vmentry_l1d_flush_parse(const
+
+ if (s) {
+ for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
+- if (!strcmp(s, vmentry_l1d_param[i].option))
++ if (sysfs_streq(s, vmentry_l1d_param[i].option))
+ return vmentry_l1d_param[i].cmd;
+ }
+ }
+@@ -278,7 +281,7 @@ static const struct kernel_param_ops vme
+ .set = vmentry_l1d_flush_set,
+ .get = vmentry_l1d_flush_get,
+ };
+-module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, S_IRUGO);
++module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
+
+ #define NR_AUTOLOAD_MSRS 8
+ #define VMCS02_POOL_SIZE 1
diff --git a/patches.arch/0008-cpu-hotplug-Expose-SMT-control-init-function.patch b/patches.arch/0008-cpu-hotplug-Expose-SMT-control-init-function.patch
new file mode 100644
index 0000000000..412af066e6
--- /dev/null
+++ b/patches.arch/0008-cpu-hotplug-Expose-SMT-control-init-function.patch
@@ -0,0 +1,71 @@
+From 338b2f8743e1c0ee1f4cd1ef8a02499846212733 Mon Sep 17 00:00:00 2001
+From: Jiri Kosina <jkosina@suse.cz>
+Date: Fri, 13 Jul 2018 16:23:23 +0200
+Subject: [PATCH 08/11] cpu/hotplug: Expose SMT control init function
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit 8e1b706b6e819bed215c0db16345568864660393 upstream
+
+The L1TF mitigation will gain a commend line parameter which allows to set
+a combination of hypervisor mitigation and SMT control.
+
+Expose cpu_smt_disable() so the command line parser can tweak SMT settings.
+
+[ tglx: Split out of larger patch and made it preserve an already existing
+ force off state ]
+
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142323.039715135@linutronix.de
+---
+ include/linux/cpu.h | 2 ++
+ kernel/cpu.c | 16 +++++++++++++---
+ 2 files changed, 15 insertions(+), 3 deletions(-)
+
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -304,8 +304,10 @@ enum cpuhp_smt_control {
+
+ #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
+ extern enum cpuhp_smt_control cpu_smt_control;
++extern void cpu_smt_disable(bool force);
+ #else
+ # define cpu_smt_control (CPU_SMT_ENABLED)
++static inline void cpu_smt_disable(bool force) { }
+ #endif
+
+ #endif /* _LINUX_CPU_H_ */
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -471,13 +471,23 @@ EXPORT_SYMBOL(cpu_down);
+ enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
+ EXPORT_SYMBOL_GPL(cpu_smt_control);
+
+-static int __init smt_cmdline_disable(char *str)
++void __init cpu_smt_disable(bool force)
+ {
+- cpu_smt_control = CPU_SMT_DISABLED;
+- if (str && !strcmp(str, "force")) {
++ if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
++ cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
++ return;
++
++ if (force) {
+ pr_info("SMT: Force disabled\n");
+ cpu_smt_control = CPU_SMT_FORCE_DISABLED;
++ } else {
++ cpu_smt_control = CPU_SMT_DISABLED;
+ }
++}
++
++static int __init smt_cmdline_disable(char *str)
++{
++ cpu_smt_disable(str && !strcmp(str, "force"));
+ return 0;
+ }
+ early_param("nosmt", smt_cmdline_disable);
diff --git a/patches.arch/0009-cpu-hotplug-Set-CPU_SMT_NOT_SUPPORTED-early.patch b/patches.arch/0009-cpu-hotplug-Set-CPU_SMT_NOT_SUPPORTED-early.patch
new file mode 100644
index 0000000000..2ab8cc3979
--- /dev/null
+++ b/patches.arch/0009-cpu-hotplug-Set-CPU_SMT_NOT_SUPPORTED-early.patch
@@ -0,0 +1,89 @@
+From 62e81568e375272dc52a649e841b496dd1c12a43 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:24 +0200
+Subject: [PATCH 09/11] cpu/hotplug: Set CPU_SMT_NOT_SUPPORTED early
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit fee0aede6f4739c87179eca76136f83210953b86 upstream
+
+The CPU_SMT_NOT_SUPPORTED state is set (if the processor does not support
+SMT) when the sysfs SMT control file is initialized.
+
+That was fine so far as this was only required to make the output of the
+control file correct and to prevent writes in that case.
+
+With the upcoming l1tf command line parameter, this needs to be set up
+before the L1TF mitigation selection and command line parsing happens.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142323.121795971@linutronix.de
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kernel/cpu/bugs.c | 6 ++++++
+ include/linux/cpu.h | 2 ++
+ kernel/cpu.c | 13 ++++++++++---
+ 3 files changed, 18 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -58,6 +58,12 @@ void __init check_bugs(void)
+ {
+ identify_boot_cpu();
+
++ /*
++ * identify_boot_cpu() initialized SMT support information, let the
++ * core code know.
++ */
++ cpu_smt_check_topology();
++
+ if (!IS_ENABLED(CONFIG_SMP)) {
+ pr_info("CPU: ");
+ print_cpu_info(&boot_cpu_data);
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -305,9 +305,11 @@ enum cpuhp_smt_control {
+ #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
+ extern enum cpuhp_smt_control cpu_smt_control;
+ extern void cpu_smt_disable(bool force);
++extern void cpu_smt_check_topology(void);
+ #else
+ # define cpu_smt_control (CPU_SMT_ENABLED)
+ static inline void cpu_smt_disable(bool force) { }
++static inline void cpu_smt_check_topology(void) { }
+ #endif
+
+ #endif /* _LINUX_CPU_H_ */
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -485,6 +485,16 @@ void __init cpu_smt_disable(bool force)
+ }
+ }
+
++/*
++ * The decision whether SMT is supported can only be done after the full
++ * CPU identification. Called from architecture code.
++ */
++void __init cpu_smt_check_topology(void)
++{
++ if (!topology_smt_supported())
++ cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
++}
++
+ static int __init smt_cmdline_disable(char *str)
+ {
+ cpu_smt_disable(str && !strcmp(str, "force"));
+@@ -1044,9 +1054,6 @@ static const struct attribute_group cpuh
+
+ static int __init cpu_smt_state_init(void)
+ {
+- if (!topology_smt_supported())
+- cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
+-
+ return sysfs_create_group(&cpu_subsys.dev_root->kobj,
+ &cpuhp_smt_attr_group);
+ }
diff --git a/patches.arch/0010-x86-bugs-kvm-Introduce-boot-time-control-of-L1TF-mit.patch b/patches.arch/0010-x86-bugs-kvm-Introduce-boot-time-control-of-L1TF-mit.patch
new file mode 100644
index 0000000000..b1193f4827
--- /dev/null
+++ b/patches.arch/0010-x86-bugs-kvm-Introduce-boot-time-control-of-L1TF-mit.patch
@@ -0,0 +1,370 @@
+From 2edd564f0dbf8f19717786c31776aa5e06b42253 Mon Sep 17 00:00:00 2001
+From: Jiri Kosina <jkosina@suse.cz>
+Date: Fri, 13 Jul 2018 16:23:25 +0200
+Subject: [PATCH 10/11] x86/bugs, kvm: Introduce boot-time control of L1TF
+ mitigations
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit d90a7a0ec83fb86622cd7dae23255d3c50a99ec8 upstream
+
+Introduce the 'l1tf=' kernel command line option to allow for boot-time
+switching of mitigation that is used on processors affected by L1TF.
+
+The possible values are:
+
+ full
+ Provides all available mitigations for the L1TF vulnerability. Disables
+ SMT and enables all mitigations in the hypervisors. SMT control via
+ /sys/devices/system/cpu/smt/control is still possible after boot.
+ Hypervisors will issue a warning when the first VM is started in
+ a potentially insecure configuration, i.e. SMT enabled or L1D flush
+ disabled.
+
+ full,force
+ Same as 'full', but disables SMT control. Implies the 'nosmt=force'
+ command line option. sysfs control of SMT and the hypervisor flush
+ control is disabled.
+
+ flush
+ Leaves SMT enabled and enables the conditional hypervisor mitigation.
+ Hypervisors will issue a warning when the first VM is started in a
+ potentially insecure configuration, i.e. SMT enabled or L1D flush
+ disabled.
+
+ flush,nosmt
+ Disables SMT and enables the conditional hypervisor mitigation. SMT
+ control via /sys/devices/system/cpu/smt/control is still possible
+ after boot. If SMT is reenabled or flushing disabled at runtime
+ hypervisors will issue a warning.
+
+ flush,nowarn
+ Same as 'flush', but hypervisors will not warn when
+ a VM is started in a potentially insecure configuration.
+
+ off
+ Disables hypervisor mitigations and doesn't emit any warnings.
+
+Default is 'flush'.
+
+Let KVM adhere to these semantics, which means:
+
+ - 'lt1f=full,force' : Performe L1D flushes. No runtime control
+ possible.
+
+ - 'l1tf=full'
+ - 'l1tf-flush'
+ - 'l1tf=flush,nosmt' : Perform L1D flushes and warn on VM start if
+ SMT has been runtime enabled or L1D flushing
+ has been run-time enabled
+
+ - 'l1tf=flush,nowarn' : Perform L1D flushes and no warnings are emitted.
+
+ - 'l1tf=off' : L1D flushes are not performed and no warnings
+ are emitted.
+
+KVM can always override the L1D flushing behavior using its 'vmentry_l1d_flush'
+module parameter except when lt1f=full,force is set.
+
+This makes KVM's private 'nosmt' option redundant, and as it is a bit
+non-systematic anyway (this is something to control globally, not on
+hypervisor level), remove that option.
+
+Add the missing Documentation entry for the l1tf vulnerability sysfs file
+while at it.
+
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142323.202758176@linutronix.de
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu | 4 +
+ Documentation/kernel-parameters.txt | 68 +++++++++++++++++++--
+ arch/x86/include/asm/processor.h | 12 +++
+ arch/x86/kernel/cpu/bugs.c | 45 +++++++++++++
+ arch/x86/kvm/vmx.c | 56 +++++++++++++----
+ 5 files changed, 166 insertions(+), 19 deletions(-)
+
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -871,4 +871,16 @@ bool xen_set_default_idle(void);
+
+ void stop_this_cpu(void *dummy);
+ void df_debug(struct pt_regs *regs, long error_code);
++
++enum l1tf_mitigations {
++ L1TF_MITIGATION_OFF,
++ L1TF_MITIGATION_FLUSH_NOWARN,
++ L1TF_MITIGATION_FLUSH,
++ L1TF_MITIGATION_FLUSH_NOSMT,
++ L1TF_MITIGATION_FULL,
++ L1TF_MITIGATION_FULL_FORCE
++};
++
++extern enum l1tf_mitigations l1tf_mitigation;
++
+ #endif /* _ASM_X86_PROCESSOR_H */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -220,7 +220,11 @@ static void x86_amd_ssb_disable(void)
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
+ }
+
++/* Default mitigation for L1TF-affected CPUs */
++enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
+ #if IS_ENABLED(CONFIG_KVM_INTEL)
++EXPORT_SYMBOL_GPL(l1tf_mitigation);
++
+ enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+ EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
+ #endif
+@@ -232,6 +236,20 @@ static void __init l1tf_select_mitigatio
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
+ return;
+
++ switch (l1tf_mitigation) {
++ case L1TF_MITIGATION_OFF:
++ case L1TF_MITIGATION_FLUSH_NOWARN:
++ case L1TF_MITIGATION_FLUSH:
++ break;
++ case L1TF_MITIGATION_FLUSH_NOSMT:
++ case L1TF_MITIGATION_FULL:
++ cpu_smt_disable(false);
++ break;
++ case L1TF_MITIGATION_FULL_FORCE:
++ cpu_smt_disable(true);
++ break;
++ }
++
+ #if CONFIG_PGTABLE_LEVELS == 2
+ pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
+ return;
+@@ -251,6 +269,33 @@ static void __init l1tf_select_mitigatio
+ setup_force_cpu_cap(X86_FEATURE_L1TF_FIX);
+ }
+
++
++static int __init l1tf_cmdline(char *str)
++{
++ if (!boot_cpu_has_bug(X86_BUG_L1TF))
++ return 0;
++
++ if (!str)
++ return -EINVAL;
++
++ if (!strcmp(str, "off"))
++ l1tf_mitigation = L1TF_MITIGATION_OFF;
++ else if (!strcmp(str, "flush,nowarn"))
++ l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
++ else if (!strcmp(str, "flush"))
++ l1tf_mitigation = L1TF_MITIGATION_FLUSH;
++ else if (!strcmp(str, "flush,nosmt"))
++ l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
++ else if (!strcmp(str, "full"))
++ l1tf_mitigation = L1TF_MITIGATION_FULL;
++ else if (!strcmp(str, "full,force"))
++ l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
++
++ return 0;
++}
++early_param("l1tf", l1tf_cmdline);
++
++
+ #ifdef RETPOLINE
+ static bool spectre_v2_bad_module;
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -68,9 +68,6 @@ static const struct x86_cpu_id vmx_cpu_i
+ };
+ MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
+
+-static bool __read_mostly nosmt;
+-module_param(nosmt, bool, S_IRUGO);
+-
+ static bool __read_mostly enable_vpid = 1;
+ module_param_named(vpid, enable_vpid, bool, 0444);
+
+@@ -200,15 +197,31 @@ static int vmx_setup_l1d_flush(enum vmx_
+ {
+ struct page *page;
+
+- /* If set to 'auto' select 'cond' */
+- if (l1tf == VMENTER_L1D_FLUSH_AUTO)
+- l1tf = VMENTER_L1D_FLUSH_COND;
+-
+ if (!enable_ept) {
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
+ return 0;
+ }
+
++ /* If set to auto use the default l1tf mitigation method */
++ if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
++ switch (l1tf_mitigation) {
++ case L1TF_MITIGATION_OFF:
++ l1tf = VMENTER_L1D_FLUSH_NEVER;
++ break;
++ case L1TF_MITIGATION_FLUSH_NOWARN:
++ case L1TF_MITIGATION_FLUSH:
++ case L1TF_MITIGATION_FLUSH_NOSMT:
++ l1tf = VMENTER_L1D_FLUSH_COND;
++ break;
++ case L1TF_MITIGATION_FULL:
++ case L1TF_MITIGATION_FULL_FORCE:
++ l1tf = VMENTER_L1D_FLUSH_ALWAYS;
++ break;
++ }
++ } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
++ l1tf = VMENTER_L1D_FLUSH_ALWAYS;
++ }
++
+ if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
+ !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
+@@ -9167,16 +9180,33 @@ free_vcpu:
+ return ERR_PTR(err);
+ }
+
+-#define L1TF_MSG "SMT enabled with L1TF CPU bug present. Refer to CVE-2018-3620 for details.\n"
++#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/l1tf.html for details.\n"
++#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/l1tf.html for details.\n"
+
+ static int vmx_vm_init(struct kvm *kvm)
+ {
+- if (boot_cpu_has(X86_BUG_L1TF) && cpu_smt_control == CPU_SMT_ENABLED) {
+- if (nosmt) {
+- pr_err(L1TF_MSG);
+- return -EOPNOTSUPP;
++ if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
++ switch (l1tf_mitigation) {
++ case L1TF_MITIGATION_OFF:
++ case L1TF_MITIGATION_FLUSH_NOWARN:
++ /* 'I explicitly don't care' is set */
++ break;
++ case L1TF_MITIGATION_FLUSH:
++ case L1TF_MITIGATION_FLUSH_NOSMT:
++ case L1TF_MITIGATION_FULL:
++ /*
++ * Warn upon starting the first VM in a potentially
++ * insecure environment.
++ */
++ if (cpu_smt_control == CPU_SMT_ENABLED)
++ pr_warn_once(L1TF_MSG_SMT);
++ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
++ pr_warn_once(L1TF_MSG_L1D);
++ break;
++ case L1TF_MITIGATION_FULL_FORCE:
++ /* Flush is enforced */
++ break;
+ }
+- pr_warn(L1TF_MSG);
+ }
+ return 0;
+ }
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -277,6 +277,7 @@ What: /sys/devices/system/cpu/vulnerabi
+ /sys/devices/system/cpu/vulnerabilities/spectre_v1
+ /sys/devices/system/cpu/vulnerabilities/spectre_v2
+ /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
++ /sys/devices/system/cpu/vulnerabilities/l1tf
+ Date: January 2018
+ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+ Description: Information about CPU vulnerabilities
+@@ -289,6 +290,9 @@ Description: Information about CPU vulne
+ "Vulnerable" CPU is affected and no mitigation in effect
+ "Mitigation: $M" CPU is affected and mitigation $M is in effect
+
++ Details about the l1tf file can be found in
++ Documentation/l1tf.rst
++
+ What: /sys/devices/system/cpu/smt
+ /sys/devices/system/cpu/smt/active
+ /sys/devices/system/cpu/smt/control
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1862,12 +1862,6 @@ bytes respectively. Such letter suffixes
+ for all guests.
+ Default is 1 (enabled) if in 64-bit or 32-bit PAE mode.
+
+- kvm-intel.nosmt=[KVM,Intel] If the L1TF CPU bug is present (CVE-2018-3620)
+- and the system has SMT (aka Hyper-Threading) enabled then
+- don't allow guests to be created.
+-
+- Default is 0 (allow guests to be created).
+-
+ kvm-intel.ept= [KVM,Intel] Disable extended page tables
+ (virtualized MMU) support on capable Intel chips.
+ Default is 1 (enabled)
+@@ -1909,6 +1903,68 @@ bytes respectively. Such letter suffixes
+ Disables the paravirtualized spinlock slowpath
+ optimizations for KVM.
+
++ l1tf= [X86] Control mitigation of the L1TF vulnerability on
++ affected CPUs
++
++ The kernel PTE inversion protection is unconditionally
++ enabled and cannot be disabled.
++
++ full
++ Provides all available mitigations for the
++ L1TF vulnerability. Disables SMT and
++ enables all mitigations in the
++ hypervisors, i.e. unconditional L1D flush.
++
++ SMT control and L1D flush control via the
++ sysfs interface is still possible after
++ boot. Hypervisors will issue a warning
++ when the first VM is started in a
++ potentially insecure configuration,
++ i.e. SMT enabled or L1D flush disabled.
++
++ full,force
++ Same as 'full', but disables SMT and L1D
++ flush runtime control. Implies the
++ 'nosmt=force' command line option.
++ (i.e. sysfs control of SMT is disabled.)
++
++ flush
++ Leaves SMT enabled and enables the default
++ hypervisor mitigation, i.e. conditional
++ L1D flush.
++
++ SMT control and L1D flush control via the
++ sysfs interface is still possible after
++ boot. Hypervisors will issue a warning
++ when the first VM is started in a
++ potentially insecure configuration,
++ i.e. SMT enabled or L1D flush disabled.
++
++ flush,nosmt
++
++ Disables SMT and enables the default
++ hypervisor mitigation.
++
++ SMT control and L1D flush control via the
++ sysfs interface is still possible after
++ boot. Hypervisors will issue a warning
++ when the first VM is started in a
++ potentially insecure configuration,
++ i.e. SMT enabled or L1D flush disabled.
++
++ flush,nowarn
++ Same as 'flush', but hypervisors will not
++ warn when a VM is started in a potentially
++ insecure configuration.
++
++ off
++ Disables hypervisor mitigations and doesn't
++ emit any warnings.
++
++ Default is 'flush'.
++
++ For details see: Documentation/l1tf.rst
++
+ l2cr= [PPC]
+
+ l3cr= [PPC]
diff --git a/patches.arch/0011-Documentation-Add-section-about-CPU-vulnerabilities.patch b/patches.arch/0011-Documentation-Add-section-about-CPU-vulnerabilities.patch
new file mode 100644
index 0000000000..5843bbf2dd
--- /dev/null
+++ b/patches.arch/0011-Documentation-Add-section-about-CPU-vulnerabilities.patch
@@ -0,0 +1,622 @@
+From b4f6a2228077ea61b5944835cc67aba83cc9e82d Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:26 +0200
+Subject: [PATCH 11/11] Documentation: Add section about CPU vulnerabilities
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit 3ec8ce5d866ec6a08a9cfab82b62acf4a830b35f upstream
+
+Add documentation for the L1TF vulnerability and the mitigation mechanisms:
+
+ - Explain the problem and risks
+ - Document the mitigation mechanisms
+ - Document the command line controls
+ - Document the sysfs files
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Link: https://lkml.kernel.org/r/20180713142323.287429944@linutronix.de
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ Documentation/l1tf.rst | 591 +++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 591 insertions(+)
+ create mode 100644 Documentation/l1tf.rst
+
+--- /dev/null
++++ b/Documentation/l1tf.rst
+@@ -0,0 +1,591 @@
++L1TF - L1 Terminal Fault
++========================
++
++L1 Terminal Fault is a hardware vulnerability which allows unprivileged
++speculative access to data which is available in the Level 1 Data Cache
++when the page table entry controlling the virtual address, which is used
++for the access, has the Present bit cleared or other reserved bits set.
++
++Affected processors
++-------------------
++
++This vulnerability affects a wide range of Intel processors. The
++vulnerability is not present on:
++
++ - Processors from AMD, Centaur and other non Intel vendors
++
++ - Older processor models, where the CPU family is < 6
++
++ - A range of Intel ATOM processors (Cedarview, Cloverview, Lincroft,
++ Penwell, Pineview, Slivermont, Airmont, Merrifield)
++
++ - The Intel Core Duo Yonah variants (2006 - 2008)
++
++ - The Intel XEON PHI family
++
++ - Intel processors which have the ARCH_CAP_RDCL_NO bit set in the
++ IA32_ARCH_CAPABILITIES MSR. If the bit is set the CPU is not affected
++ by the Meltdown vulnerability either. These CPUs should become
++ available by end of 2018.
++
++Whether a processor is affected or not can be read out from the L1TF
++vulnerability file in sysfs. See :ref:`l1tf_sys_info`.
++
++Related CVEs
++------------
++
++The following CVE entries are related to the L1TF vulnerability:
++
++ ============= ================= ==============================
++ CVE-2018-3615 L1 Terminal Fault SGX related aspects
++ CVE-2018-3620 L1 Terminal Fault OS, SMM related aspects
++ CVE-2018-3646 L1 Terminal Fault Virtualization related aspects
++ ============= ================= ==============================
++
++Problem
++-------
++
++If an instruction accesses a virtual address for which the relevant page
++table entry (PTE) has the Present bit cleared or other reserved bits set,
++then speculative execution ignores the invalid PTE and loads the referenced
++data if it is present in the Level 1 Data Cache, as if the page referenced
++by the address bits in the PTE was still present and accessible.
++
++While this is a purely speculative mechanism and the instruction will raise
++a page fault when it is retired eventually, the pure act of loading the
++data and making it available to other speculative instructions opens up the
++opportunity for side channel attacks to unprivileged malicious code,
++similar to the Meltdown attack.
++
++While Meltdown breaks the user space to kernel space protection, L1TF
++allows to attack any physical memory address in the system and the attack
++works across all protection domains. It allows an attack of SGX and also
++works from inside virtual machines because the speculation bypasses the
++extended page table (EPT) protection mechanism.
++
++
++Attack scenarios
++----------------
++
++1. Malicious user space
++^^^^^^^^^^^^^^^^^^^^^^^
++
++ Operating Systems store arbitrary information in the address bits of a
++ PTE which is marked non present. This allows a malicious user space
++ application to attack the physical memory to which these PTEs resolve.
++ In some cases user-space can maliciously influence the information
++ encoded in the address bits of the PTE, thus making attacks more
++ deterministic and more practical.
++
++ The Linux kernel contains a mitigation for this attack vector, PTE
++ inversion, which is permanently enabled and has no performance
++ impact. The kernel ensures that the address bits of PTEs, which are not
++ marked present, never point to cacheable physical memory space.
++
++ A system with an up to date kernel is protected against attacks from
++ malicious user space applications.
++
++2. Malicious guest in a virtual machine
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++ The fact that L1TF breaks all domain protections allows malicious guest
++ OSes, which can control the PTEs directly, and malicious guest user
++ space applications, which run on an unprotected guest kernel lacking the
++ PTE inversion mitigation for L1TF, to attack physical host memory.
++
++ A special aspect of L1TF in the context of virtualization is symmetric
++ multi threading (SMT). The Intel implementation of SMT is called
++ HyperThreading. The fact that Hyperthreads on the affected processors
++ share the L1 Data Cache (L1D) is important for this. As the flaw allows
++ only to attack data which is present in L1D, a malicious guest running
++ on one Hyperthread can attack the data which is brought into the L1D by
++ the context which runs on the sibling Hyperthread of the same physical
++ core. This context can be host OS, host user space or a different guest.
++
++ If the processor does not support Extended Page Tables, the attack is
++ only possible, when the hypervisor does not sanitize the content of the
++ effective (shadow) page tables.
++
++ While solutions exist to mitigate these attack vectors fully, these
++ mitigations are not enabled by default in the Linux kernel because they
++ can affect performance significantly. The kernel provides several
++ mechanisms which can be utilized to address the problem depending on the
++ deployment scenario. The mitigations, their protection scope and impact
++ are described in the next sections.
++
++ The default mitigations and the rationale for chosing them are explained
++ at the end of this document. See :ref:`default_mitigations`.
++
++.. _l1tf_sys_info:
++
++L1TF system information
++-----------------------
++
++The Linux kernel provides a sysfs interface to enumerate the current L1TF
++status of the system: whether the system is vulnerable, and which
++mitigations are active. The relevant sysfs file is:
++
++/sys/devices/system/cpu/vulnerabilities/l1tf
++
++The possible values in this file are:
++
++ =========================== ===============================
++ 'Not affected' The processor is not vulnerable
++ 'Mitigation: PTE Inversion' The host protection is active
++ =========================== ===============================
++
++If KVM/VMX is enabled and the processor is vulnerable then the following
++information is appended to the 'Mitigation: PTE Inversion' part:
++
++ - SMT status:
++
++ ===================== ================
++ 'VMX: SMT vulnerable' SMT is enabled
++ 'VMX: SMT disabled' SMT is disabled
++ ===================== ================
++
++ - L1D Flush mode:
++
++ ================================ ====================================
++ 'L1D vulnerable' L1D flushing is disabled
++
++ 'L1D conditional cache flushes' L1D flush is conditionally enabled
++
++ 'L1D cache flushes' L1D flush is unconditionally enabled
++ ================================ ====================================
++
++The resulting grade of protection is discussed in the following sections.
++
++
++Host mitigation mechanism
++-------------------------
++
++The kernel is unconditionally protected against L1TF attacks from malicious
++user space running on the host.
++
++
++Guest mitigation mechanisms
++---------------------------
++
++.. _l1d_flush:
++
++1. L1D flush on VMENTER
++^^^^^^^^^^^^^^^^^^^^^^^
++
++ To make sure that a guest cannot attack data which is present in the L1D
++ the hypervisor flushes the L1D before entering the guest.
++
++ Flushing the L1D evicts not only the data which should not be accessed
++ by a potentially malicious guest, it also flushes the guest
++ data. Flushing the L1D has a performance impact as the processor has to
++ bring the flushed guest data back into the L1D. Depending on the
++ frequency of VMEXIT/VMENTER and the type of computations in the guest
++ performance degradation in the range of 1% to 50% has been observed. For
++ scenarios where guest VMEXIT/VMENTER are rare the performance impact is
++ minimal. Virtio and mechanisms like posted interrupts are designed to
++ confine the VMEXITs to a bare minimum, but specific configurations and
++ application scenarios might still suffer from a high VMEXIT rate.
++
++ The kernel provides two L1D flush modes:
++ - conditional ('cond')
++ - unconditional ('always')
++
++ The conditional mode avoids L1D flushing after VMEXITs which execute
++ only audited code pathes before the corresponding VMENTER. These code
++ pathes have beed verified that they cannot expose secrets or other
++ interesting data to an attacker, but they can leak information about the
++ address space layout of the hypervisor.
++
++ Unconditional mode flushes L1D on all VMENTER invocations and provides
++ maximum protection. It has a higher overhead than the conditional
++ mode. The overhead cannot be quantified correctly as it depends on the
++ work load scenario and the resulting number of VMEXITs.
++
++ The general recommendation is to enable L1D flush on VMENTER. The kernel
++ defaults to conditional mode on affected processors.
++
++ **Note**, that L1D flush does not prevent the SMT problem because the
++ sibling thread will also bring back its data into the L1D which makes it
++ attackable again.
++
++ L1D flush can be controlled by the administrator via the kernel command
++ line and sysfs control files. See :ref:`mitigation_control_command_line`
++ and :ref:`mitigation_control_kvm`.
++
++.. _guest_confinement:
++
++2. Guest VCPU confinement to dedicated physical cores
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++ To address the SMT problem, it is possible to make a guest or a group of
++ guests affine to one or more physical cores. The proper mechanism for
++ that is to utilize exclusive cpusets to ensure that no other guest or
++ host tasks can run on these cores.
++
++ If only a single guest or related guests run on sibling SMT threads on
++ the same physical core then they can only attack their own memory and
++ restricted parts of the host memory.
++
++ Host memory is attackable, when one of the sibling SMT threads runs in
++ host OS (hypervisor) context and the other in guest context. The amount
++ of valuable information from the host OS context depends on the context
++ which the host OS executes, i.e. interrupts, soft interrupts and kernel
++ threads. The amount of valuable data from these contexts cannot be
++ declared as non-interesting for an attacker without deep inspection of
++ the code.
++
++ **Note**, that assigning guests to a fixed set of physical cores affects
++ the ability of the scheduler to do load balancing and might have
++ negative effects on CPU utilization depending on the hosting
++ scenario. Disabling SMT might be a viable alternative for particular
++ scenarios.
++
++ For further information about confining guests to a single or to a group
++ of cores consult the cpusets documentation:
++
++ https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt
++
++.. _interrupt_isolation:
++
++3. Interrupt affinity
++^^^^^^^^^^^^^^^^^^^^^
++
++ Interrupts can be made affine to logical CPUs. This is not universally
++ true because there are types of interrupts which are truly per CPU
++ interrupts, e.g. the local timer interrupt. Aside of that multi queue
++ devices affine their interrupts to single CPUs or groups of CPUs per
++ queue without allowing the administrator to control the affinities.
++
++ Moving the interrupts, which can be affinity controlled, away from CPUs
++ which run untrusted guests, reduces the attack vector space.
++
++ Whether the interrupts with are affine to CPUs, which run untrusted
++ guests, provide interesting data for an attacker depends on the system
++ configuration and the scenarios which run on the system. While for some
++ of the interrupts it can be assumed that they wont expose interesting
++ information beyond exposing hints about the host OS memory layout, there
++ is no way to make general assumptions.
++
++ Interrupt affinity can be controlled by the administrator via the
++ /proc/irq/$NR/smp_affinity[_list] files. Limited documentation is
++ available at:
++
++ https://www.kernel.org/doc/Documentation/IRQ-affinity.txt
++
++.. _smt_control:
++
++4. SMT control
++^^^^^^^^^^^^^^
++
++ To prevent the SMT issues of L1TF it might be necessary to disable SMT
++ completely. Disabling SMT can have a significant performance impact, but
++ the impact depends on the hosting scenario and the type of workloads.
++ The impact of disabling SMT needs also to be weighted against the impact
++ of other mitigation solutions like confining guests to dedicated cores.
++
++ The kernel provides a sysfs interface to retrieve the status of SMT and
++ to control it. It also provides a kernel command line interface to
++ control SMT.
++
++ The kernel command line interface consists of the following options:
++
++ =========== ==========================================================
++ nosmt Affects the bring up of the secondary CPUs during boot. The
++ kernel tries to bring all present CPUs online during the
++ boot process. "nosmt" makes sure that from each physical
++ core only one - the so called primary (hyper) thread is
++ activated. Due to a design flaw of Intel processors related
++ to Machine Check Exceptions the non primary siblings have
++ to be brought up at least partially and are then shut down
++ again. "nosmt" can be undone via the sysfs interface.
++
++ nosmt=force Has the same effect as "nosmt' but it does not allow to
++ undo the SMT disable via the sysfs interface.
++ =========== ==========================================================
++
++ The sysfs interface provides two files:
++
++ - /sys/devices/system/cpu/smt/control
++ - /sys/devices/system/cpu/smt/active
++
++ /sys/devices/system/cpu/smt/control:
++
++ This file allows to read out the SMT control state and provides the
++ ability to disable or (re)enable SMT. The possible states are:
++
++ ============== ===================================================
++ on SMT is supported by the CPU and enabled. All
++ logical CPUs can be onlined and offlined without
++ restrictions.
++
++ off SMT is supported by the CPU and disabled. Only
++ the so called primary SMT threads can be onlined
++ and offlined without restrictions. An attempt to
++ online a non-primary sibling is rejected
++
++ forceoff Same as 'off' but the state cannot be controlled.
++ Attempts to write to the control file are rejected.
++
++ notsupported The processor does not support SMT. It's therefore
++ not affected by the SMT implications of L1TF.
++ Attempts to write to the control file are rejected.
++ ============== ===================================================
++
++ The possible states which can be written into this file to control SMT
++ state are:
++
++ - on
++ - off
++ - forceoff
++
++ /sys/devices/system/cpu/smt/active:
++
++ This file reports whether SMT is enabled and active, i.e. if on any
++ physical core two or more sibling threads are online.
++
++ SMT control is also possible at boot time via the l1tf kernel command
++ line parameter in combination with L1D flush control. See
++ :ref:`mitigation_control_command_line`.
++
++5. Disabling EPT
++^^^^^^^^^^^^^^^^
++
++ Disabling EPT for virtual machines provides full mitigation for L1TF even
++ with SMT enabled, because the effective page tables for guests are
++ managed and sanitized by the hypervisor. Though disabling EPT has a
++ significant performance impact especially when the Meltdown mitigation
++ KPTI is enabled.
++
++ EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter.
++
++There is ongoing research and development for new mitigation mechanisms to
++address the performance impact of disabling SMT or EPT.
++
++.. _mitigation_control_command_line:
++
++Mitigation control on the kernel command line
++---------------------------------------------
++
++The kernel command line allows to control the L1TF mitigations at boot
++time with the option "l1tf=". The valid arguments for this option are:
++
++ ============ =============================================================
++ full Provides all available mitigations for the L1TF
++ vulnerability. Disables SMT and enables all mitigations in
++ the hypervisors, i.e. unconditional L1D flushing
++
++ SMT control and L1D flush control via the sysfs interface
++ is still possible after boot. Hypervisors will issue a
++ warning when the first VM is started in a potentially
++ insecure configuration, i.e. SMT enabled or L1D flush
++ disabled.
++
++ full,force Same as 'full', but disables SMT and L1D flush runtime
++ control. Implies the 'nosmt=force' command line option.
++ (i.e. sysfs control of SMT is disabled.)
++
++ flush Leaves SMT enabled and enables the default hypervisor
++ mitigation, i.e. conditional L1D flushing
++
++ SMT control and L1D flush control via the sysfs interface
++ is still possible after boot. Hypervisors will issue a
++ warning when the first VM is started in a potentially
++ insecure configuration, i.e. SMT enabled or L1D flush
++ disabled.
++
++ flush,nosmt Disables SMT and enables the default hypervisor mitigation,
++ i.e. conditional L1D flushing.
++
++ SMT control and L1D flush control via the sysfs interface
++ is still possible after boot. Hypervisors will issue a
++ warning when the first VM is started in a potentially
++ insecure configuration, i.e. SMT enabled or L1D flush
++ disabled.
++
++ flush,nowarn Same as 'flush', but hypervisors will not warn when a VM is
++ started in a potentially insecure configuration.
++
++ off Disables hypervisor mitigations and doesn't emit any
++ warnings.
++ ============ =============================================================
++
++The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
++
++
++.. _mitigation_control_kvm:
++
++Mitigation control for KVM - module parameter
++-------------------------------------------------------------
++
++The KVM hypervisor mitigation mechanism, flushing the L1D cache when
++entering a guest, can be controlled with a module parameter.
++
++The option/parameter is "kvm-intel.vmentry_l1d_flush=". It takes the
++following arguments:
++
++ ============ ==============================================================
++ always L1D cache flush on every VMENTER.
++
++ cond Flush L1D on VMENTER only when the code between VMEXIT and
++ VMENTER can leak host memory which is considered
++ interesting for an attacker. This still can leak host memory
++ which allows e.g. to determine the hosts address space layout.
++
++ never Disables the mitigation
++ ============ ==============================================================
++
++The parameter can be provided on the kernel command line, as a module
++parameter when loading the modules and at runtime modified via the sysfs
++file:
++
++/sys/module/kvm_intel/parameters/vmentry_l1d_flush
++
++The default is 'cond'. If 'l1tf=full,force' is given on the kernel command
++line, then 'always' is enforced and the kvm-intel.vmentry_l1d_flush
++module parameter is ignored and writes to the sysfs file are rejected.
++
++
++Mitigation selection guide
++--------------------------
++
++1. No virtualization in use
++^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++ The system is protected by the kernel unconditionally and no further
++ action is required.
++
++2. Virtualization with trusted guests
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++ If the guest comes from a trusted source and the guest OS kernel is
++ guaranteed to have the L1TF mitigations in place the system is fully
++ protected against L1TF and no further action is required.
++
++ To avoid the overhead of the default L1D flushing on VMENTER the
++ administrator can disable the flushing via the kernel command line and
++ sysfs control files. See :ref:`mitigation_control_command_line` and
++ :ref:`mitigation_control_kvm`.
++
++
++3. Virtualization with untrusted guests
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++3.1. SMT not supported or disabled
++""""""""""""""""""""""""""""""""""
++
++ If SMT is not supported by the processor or disabled in the BIOS or by
++ the kernel, it's only required to enforce L1D flushing on VMENTER.
++
++ Conditional L1D flushing is the default behaviour and can be tuned. See
++ :ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`.
++
++3.2. EPT not supported or disabled
++""""""""""""""""""""""""""""""""""
++
++ If EPT is not supported by the processor or disabled in the hypervisor,
++ the system is fully protected. SMT can stay enabled and L1D flushing on
++ VMENTER is not required.
++
++ EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter.
++
++3.3. SMT and EPT supported and active
++"""""""""""""""""""""""""""""""""""""
++
++ If SMT and EPT are supported and active then various degrees of
++ mitigations can be employed:
++
++ - L1D flushing on VMENTER:
++
++ L1D flushing on VMENTER is the minimal protection requirement, but it
++ is only potent in combination with other mitigation methods.
++
++ Conditional L1D flushing is the default behaviour and can be tuned. See
++ :ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`.
++
++ - Guest confinement:
++
++ Confinement of guests to a single or a group of physical cores which
++ are not running any other processes, can reduce the attack surface
++ significantly, but interrupts, soft interrupts and kernel threads can
++ still expose valuable data to a potential attacker. See
++ :ref:`guest_confinement`.
++
++ - Interrupt isolation:
++
++ Isolating the guest CPUs from interrupts can reduce the attack surface
++ further, but still allows a malicious guest to explore a limited amount
++ of host physical memory. This can at least be used to gain knowledge
++ about the host address space layout. The interrupts which have a fixed
++ affinity to the CPUs which run the untrusted guests can depending on
++ the scenario still trigger soft interrupts and schedule kernel threads
++ which might expose valuable information. See
++ :ref:`interrupt_isolation`.
++
++The above three mitigation methods combined can provide protection to a
++certain degree, but the risk of the remaining attack surface has to be
++carefully analyzed. For full protection the following methods are
++available:
++
++ - Disabling SMT:
++
++ Disabling SMT and enforcing the L1D flushing provides the maximum
++ amount of protection. This mitigation is not depending on any of the
++ above mitigation methods.
++
++ SMT control and L1D flushing can be tuned by the command line
++ parameters 'nosmt', 'l1tf', 'kvm-intel.vmentry_l1d_flush' and at run
++ time with the matching sysfs control files. See :ref:`smt_control`,
++ :ref:`mitigation_control_command_line` and
++ :ref:`mitigation_control_kvm`.
++
++ - Disabling EPT:
++
++ Disabling EPT provides the maximum amount of protection as well. It is
++ not depending on any of the above mitigation methods. SMT can stay
++ enabled and L1D flushing is not required, but the performance impact is
++ significant.
++
++ EPT can be disabled in the hypervisor via the 'kvm-intel.ept'
++ parameter.
++
++
++.. _default_mitigations:
++
++Default mitigations
++-------------------
++
++ The kernel default mitigations for vulnerable processors are:
++
++ - PTE inversion to protect against malicious user space. This is done
++ unconditionally and cannot be controlled.
++
++ - L1D conditional flushing on VMENTER when EPT is enabled for
++ a guest.
++
++ The kernel does not by default enforce the disabling of SMT, which leaves
++ SMT systems vulnerable when running untrusted guests with EPT enabled.
++
++ The rationale for this choice is:
++
++ - Force disabling SMT can break existing setups, especially with
++ unattended updates.
++
++ - If regular users run untrusted guests on their machine, then L1TF is
++ just an add on to other malware which might be embedded in an untrusted
++ guest, e.g. spam-bots or attacks on the local network.
++
++ There is no technical way to prevent a user from running untrusted code
++ on their machines blindly.
++
++ - It's technically extremely unlikely and from today's knowledge even
++ impossible that L1TF can be exploited via the most popular attack
++ mechanisms like JavaScript because these mechanisms have no way to
++ control PTEs. If this would be possible and not other mitigation would
++ be possible, then the default might be different.
++
++ - The administrators of cloud and hosting setups have to carefully
++ analyze the risk for their scenarios and make the appropriate
++ mitigation choices, which might even vary across their deployed
++ machines and also result in other changes of their overall setup.
++ There is no way for the kernel to provide a sensible default for this
++ kind of scenarios.
diff --git a/patches.arch/0012-cpu-hotplug-detect-SMT-disabled-by-BIOS.patch b/patches.arch/0012-cpu-hotplug-detect-SMT-disabled-by-BIOS.patch
new file mode 100644
index 0000000000..061fa9002e
--- /dev/null
+++ b/patches.arch/0012-cpu-hotplug-detect-SMT-disabled-by-BIOS.patch
@@ -0,0 +1,49 @@
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Subject: [PATCH] cpu/hotplug: detect SMT disabled by BIOS
+Patch-mainline: Not yet, under development
+References: bsc#1089343 CVE-2018-3646
+
+commit 73d5e2b472640b1fcdb61ae8be389912ef211bda upstream
+
+If SMT is disabled in BIOS, the CPU code doesn't properly detect it.
+The /sys/devices/system/cpu/smt/control file shows 'on', and the 'l1tf'
+vulnerabilities file shows SMT as vulnerable.
+
+Fix it by forcing 'cpu_smt_control' to CPU_SMT_NOT_SUPPORTED in such a
+case. Unfortunately the detection can only be done after bringing all
+the CPUs online, so we have to overwrite any previous writes to the
+variable.
+
+Reported-by: Joe Mario <jmario@redhat.com>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Fixes: f048c399e0f7 ("x86/topology: Provide topology_smt_supported()")
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ kernel/cpu.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 39b9e0c65f12..4547bc72febb 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -2137,6 +2137,15 @@ static const struct attribute_group cpuhp_smt_attr_group = {
+
+ static int __init cpu_smt_state_init(void)
+ {
++ /*
++ * If SMT was disabled by BIOS, detect it here, after the CPUs have
++ * been brought online. This ensures the smt/l1tf sysfs entries are
++ * consistent with reality. Note this may overwrite cpu_smt_control's
++ * previous setting.
++ */
++ if (topology_max_smt_threads() == 1)
++ cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
++
+ return sysfs_create_group(&cpu_subsys.dev_root->kobj,
+ &cpuhp_smt_attr_group);
+ }
+--
+2.12.3
+
diff --git a/patches.arch/01-sched-smt-update-sched_smt_present-at-runtime.patch b/patches.arch/01-sched-smt-update-sched_smt_present-at-runtime.patch
new file mode 100644
index 0000000000..12b2988f82
--- /dev/null
+++ b/patches.arch/01-sched-smt-update-sched_smt_present-at-runtime.patch
@@ -0,0 +1,93 @@
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 15 Jun 2018 18:28:02 +0200
+Subject: sched/smt: Update sched_smt_present at runtime
+Git-commit: 2876762b440055e2cb3f6fff66bb2b5c07bdbca0
+Patch-mainline: v4.18 or v4.18-rc2 (next release)
+References: bsc#1089343
+
+The static key sched_smt_present is only updated at boot time when SMT
+siblings have been deteced. Booting with maxcpus=1 and bringing the
+siblings online after boot rebuilds the scheduling domains correctly but
+does not update the static key, so the SMT code is not enabled.
+
+Let the key update in the scheduler CPU hotplug code to fix this.
+
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: lkml <linux-kernel@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20180615162947.510962250@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+---
+ kernel/sched/core.c | 32 ++++++++++++++------------------
+ kernel/sched/fair.c | 1 +
+ 2 files changed, 15 insertions(+), 18 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5986,6 +5986,20 @@ static int sched_cpu_active(struct notif
+ return NOTIFY_OK;
+
+ case CPU_ONLINE:
++
++#ifdef CONFIG_SCHED_SMT
++ /*
++ * The sched_smt_present static key needs to be evaluated on every
++ * hotplug event because at boot time SMT might be disabled when
++ * the number of booted CPUs is limited.
++ *
++ * If then later a sibling gets hotplugged, then the key would stay
++ * off and SMT scheduling would never be functional.
++ */
++ if (cpumask_weight(cpu_smt_mask(cpu)) > 1)
++ static_branch_enable(&sched_smt_present);
++#endif
++
+ /*
+ * At this point a starting CPU has marked itself as online via
+ * set_cpu_online(). But it might not yet have marked itself
+@@ -7807,22 +7821,6 @@ static int cpuset_cpu_inactive(struct no
+ return NOTIFY_OK;
+ }
+
+-#ifdef CONFIG_SCHED_SMT
+-DEFINE_STATIC_KEY_FALSE(sched_smt_present);
+-
+-static void sched_init_smt(void)
+-{
+- /*
+- * We've enumerated all CPUs and will assume that if any CPU
+- * has SMT siblings, CPU0 will too.
+- */
+- if (cpumask_weight(cpu_smt_mask(0)) > 1)
+- static_branch_enable(&sched_smt_present);
+-}
+-#else
+-static inline void sched_init_smt(void) { }
+-#endif
+-
+ void __init sched_init_smp(void)
+ {
+ cpumask_var_t non_isolated_cpus;
+@@ -7854,8 +7852,6 @@ void __init sched_init_smp(void)
+ init_sched_rt_class();
+ init_sched_dl_class();
+
+- sched_init_smt();
+-
+ sched_smp_initialized = true;
+ }
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5293,6 +5293,7 @@ find_idlest_cpu(struct sched_group *grou
+ }
+
+ #ifdef CONFIG_SCHED_SMT
++DEFINE_STATIC_KEY_FALSE(sched_smt_present);
+
+ static inline void set_idle_cores(int cpu, int val)
+ {
diff --git a/patches.arch/01-x86-KVM-Warn-user-if-KVM-is-loaded-SMT-and-L1TF-CPU-.patch b/patches.arch/01-x86-KVM-Warn-user-if-KVM-is-loaded-SMT-and-L1TF-CPU-.patch
new file mode 100644
index 0000000000..990c568c73
--- /dev/null
+++ b/patches.arch/01-x86-KVM-Warn-user-if-KVM-is-loaded-SMT-and-L1TF-CPU-.patch
@@ -0,0 +1,102 @@
+From c038a1e28449578f948cc2ab09758e8798c1c2c6 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 20 Jun 2018 11:29:53 -0400
+Subject: [PATCH 31/40] x86/KVM: Warn user if KVM is loaded SMT and L1TF CPU
+ bug being present
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit 26acfb666a473d960f0fd971fe68f3e3ad16c70b upstream
+
+If the L1TF CPU bug is present we allow the KVM module to be loaded as the
+major of users that use Linux and KVM have trusted guests and do not want a
+broken setup.
+
+Cloud vendors are the ones that are uncomfortable with CVE 2018-3620 and as
+such they are the ones that should set nosmt to one.
+
+Setting 'nosmt' means that the system administrator also needs to disable
+SMT (Hyper-threading) in the BIOS, or via the 'nosmt' command line
+parameter, or via the /sys/devices/system/cpu/smt/control. See commit
+05736e4ac13c ("cpu/hotplug: Provide knobs to control SMT").
+
+Other mitigations are to use task affinity, cpu sets, interrupt binding,
+etc - anything to make sure that _only_ the same guests vCPUs are running
+on sibling threads.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ Documentation/kernel-parameters.txt | 6 ++++++
+ arch/x86/kvm/vmx.c | 19 +++++++++++++++++++
+ kernel/cpu.c | 1 +
+ 3 files changed, 26 insertions(+)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -68,6 +68,9 @@ static const struct x86_cpu_id vmx_cpu_i
+ };
+ MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
+
++static bool __read_mostly nosmt;
++module_param(nosmt, bool, S_IRUGO);
++
+ static bool __read_mostly enable_vpid = 1;
+ module_param_named(vpid, enable_vpid, bool, 0444);
+
+@@ -8966,6 +8969,20 @@ free_vcpu:
+ return ERR_PTR(err);
+ }
+
++#define L1TF_MSG "SMT enabled with L1TF CPU bug present. Refer to CVE-2018-3620 for details.\n"
++
++static int vmx_vm_init(struct kvm *kvm)
++{
++ if (boot_cpu_has(X86_BUG_L1TF) && cpu_smt_control == CPU_SMT_ENABLED) {
++ if (nosmt) {
++ pr_err(L1TF_MSG);
++ return -EOPNOTSUPP;
++ }
++ pr_warn(L1TF_MSG);
++ }
++ return 0;
++}
++
+ static void __init vmx_check_processor_compat(void *rtn)
+ {
+ struct vmcs_config vmcs_conf;
+@@ -10908,6 +10925,8 @@ static struct kvm_x86_ops vmx_x86_ops =
+ .cpu_has_accelerated_tpr = report_flexpriority,
+ .has_emulated_msr = vmx_has_emulated_msr,
+
++ .vm_init = vmx_vm_init,
++
+ .vcpu_create = vmx_create_vcpu,
+ .vcpu_free = vmx_free_vcpu,
+ .vcpu_reset = vmx_vcpu_reset,
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1862,6 +1862,12 @@ bytes respectively. Such letter suffixes
+ for all guests.
+ Default is 1 (enabled) if in 64-bit or 32-bit PAE mode.
+
++ kvm-intel.nosmt=[KVM,Intel] If the L1TF CPU bug is present (CVE-2018-3620)
++ and the system has SMT (aka Hyper-Threading) enabled then
++ don't allow guests to be created.
++
++ Default is 0 (allow guests to be created).
++
+ kvm-intel.ept= [KVM,Intel] Disable extended page tables
+ (virtualized MMU) support on capable Intel chips.
+ Default is 1 (enabled)
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -469,6 +469,7 @@ EXPORT_SYMBOL(cpu_down);
+
+ #ifdef CONFIG_HOTPLUG_SMT
+ enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
++EXPORT_SYMBOL_GPL(cpu_smt_control);
+
+ static int __init smt_cmdline_disable(char *str)
+ {
diff --git a/patches.arch/02-x86-KVM-VMX-Add-module-argument-for-L1TF-mitigation.patch b/patches.arch/02-x86-KVM-VMX-Add-module-argument-for-L1TF-mitigation.patch
new file mode 100644
index 0000000000..4f3f4fe5ff
--- /dev/null
+++ b/patches.arch/02-x86-KVM-VMX-Add-module-argument-for-L1TF-mitigation.patch
@@ -0,0 +1,132 @@
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Subject: [PATCH 32/40] x86/KVM/VMX: Add module argument for L1TF mitigation
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit a399477e52c17e148746d3ce9a483f681c2aa9a0 upstream
+
+Add a mitigation mode parameter "vmentry_l1d_flush" for CVE-2018-3620, aka
+L1 terminal fault. The valid arguments are:
+
+ - "always" L1D cache flush on every VMENTER.
+ - "cond" Conditional L1D cache flush, explained below
+ - "never" Disable the L1D cache flush mitigation
+
+"cond" is trying to avoid L1D cache flushes on VMENTER if the code executed
+between VMEXIT and VMENTER is considered safe, i.e. is not bringing any
+interesting information into L1D which might exploited.
+
+[ tglx: Split out from a larger patch ]
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ Documentation/kernel-parameters.txt | 12 ++++++
+ arch/x86/kvm/vmx.c | 65 ++++++++++++++++++++++++++++++++++--
+ 2 files changed, 75 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -176,6 +176,54 @@ module_param(ple_window_max, int, S_IRUG
+
+ extern const ulong vmx_return;
+
++static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
++
++/* These MUST be in sync with vmentry_l1d_param order. */
++enum vmx_l1d_flush_state {
++ VMENTER_L1D_FLUSH_NEVER,
++ VMENTER_L1D_FLUSH_COND,
++ VMENTER_L1D_FLUSH_ALWAYS,
++};
++
++static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush = VMENTER_L1D_FLUSH_COND;
++
++static const struct {
++ const char *option;
++ enum vmx_l1d_flush_state cmd;
++} vmentry_l1d_param[] = {
++ {"never", VMENTER_L1D_FLUSH_NEVER},
++ {"cond", VMENTER_L1D_FLUSH_COND},
++ {"always", VMENTER_L1D_FLUSH_ALWAYS},
++};
++
++static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
++{
++ unsigned int i;
++
++ if (!s)
++ return -EINVAL;
++
++ for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
++ if (!strcmp(s, vmentry_l1d_param[i].option)) {
++ vmentry_l1d_flush = vmentry_l1d_param[i].cmd;
++ return 0;
++ }
++ }
++
++ return -EINVAL;
++}
++
++static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
++{
++ return sprintf(s, "%s\n", vmentry_l1d_param[vmentry_l1d_flush].option);
++}
++
++static const struct kernel_param_ops vmentry_l1d_flush_ops = {
++ .set = vmentry_l1d_flush_set,
++ .get = vmentry_l1d_flush_get,
++};
++module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, &vmentry_l1d_flush, S_IRUGO);
++
+ #define NR_AUTOLOAD_MSRS 8
+ #define VMCS02_POOL_SIZE 1
+
+@@ -11042,10 +11090,23 @@ static struct kvm_x86_ops vmx_x86_ops =
+ .setup_mce = vmx_setup_mce,
+ };
+
++static void __init vmx_setup_l1d_flush(void)
++{
++ if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
++ !boot_cpu_has_bug(X86_BUG_L1TF))
++ return;
++
++ static_branch_enable(&vmx_l1d_should_flush);
++}
++
+ static int __init vmx_init(void)
+ {
+- int r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+- __alignof__(struct vcpu_vmx), THIS_MODULE);
++ int r;
++
++ vmx_setup_l1d_flush();
++
++ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
++ __alignof__(struct vcpu_vmx), THIS_MODULE);
+ if (r)
+ return r;
+
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1889,6 +1889,18 @@ bytes respectively. Such letter suffixes
+ (virtualized real and unpaged mode) on capable
+ Intel chips. Default is 1 (enabled)
+
++ kvm-intel.vmentry_l1d_flush=[KVM,Intel] Mitigation for L1 Terminal Fault
++ CVE-2018-3620.
++
++ Valid arguments: never, cond, always
++
++ always: L1D cache flush on every VMENTER.
++ cond: Flush L1D on VMENTER only when the code between
++ VMEXIT and VMENTER can leak host memory.
++ never: Disables the mitigation
++
++ Default is cond (do L1 cache flush in specific instances)
++
+ kvm-intel.vpid= [KVM,Intel] Disable Virtual Processor Identification
+ feature (tagged TLBs) on capable Intel chips.
+ Default is 1 (enabled)
diff --git a/patches.arch/02-x86-smp-provide-topology_is_primary_thread.patch b/patches.arch/02-x86-smp-provide-topology_is_primary_thread.patch
new file mode 100644
index 0000000000..8930fdabfa
--- /dev/null
+++ b/patches.arch/02-x86-smp-provide-topology_is_primary_thread.patch
@@ -0,0 +1,126 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 15 Jun 2018 18:28:03 +0200
+Subject: x86/smp: Provide topology_is_primary_thread()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Git-commit: 4a648bd4062a6db192992f14a4a4baf9ad19cb9e
+Patch-mainline: v4.18 or v4.18-rc2 (next release)
+References: bsc#1089343
+
+If the CPU is supporting SMT then the primary thread can be found by
+checking the lower APIC ID bits for zero. smp_num_siblings is used to build
+the mask for the APIC ID bits which need to be taken into account.
+
+This uses the MPTABLE or ACPI/MADT supplied APIC ID, which can be different
+than the initial APIC ID in CPUID. But according to AMD the lower bits have
+to be consistent. Intel gave a tentative confirmation as well.
+
+Preparatory patch to support disabling SMT at boot/runtime.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: "Ville Syrjälä" <ville.syrjala@linux.intel.com>
+Cc: Alison Schofield <alison.schofield@intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Andy Shevchenko <andy.shevchenko@gmail.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Borislav Petkov <bpetkov@suse.de>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Dou Liyang <douly.fnst@cn.fujitsu.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Prarit Bhargava <prarit@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
+Cc: lkml <linux-kernel@vger.kernel.org>
+Cc: x86-ml <x86@kernel.org>
+Link: http://lkml.kernel.org/r/20180615162947.593966615@linutronix.de
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/apic.h | 7 +++++++
+ arch/x86/include/asm/topology.h | 4 +++-
+ arch/x86/kernel/apic/apic.c | 15 +++++++++++++++
+ arch/x86/kernel/smpboot.c | 9 +++++++++
+ 4 files changed, 34 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -632,6 +632,13 @@ extern int default_check_phys_apicid_pre
+ #endif
+
+ #endif /* CONFIG_X86_LOCAL_APIC */
++
++#ifdef CONFIG_SMP
++bool apic_id_is_primary_thread(unsigned int id);
++#else
++static inline bool apic_id_is_primary_thread(unsigned int id) { return false; }
++#endif
++
+ extern void irq_enter(void);
+ extern void irq_exit(void);
+
+--- a/arch/x86/include/asm/topology.h
++++ b/arch/x86/include/asm/topology.h
+@@ -68,7 +68,6 @@ static inline int early_cpu_to_node(int
+ }
+
+ #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
+-
+ /* Mappings between node number and cpus on that node. */
+ extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+
+@@ -125,6 +124,9 @@ extern const struct cpumask *cpu_coregro
+ #ifdef ENABLE_TOPO_DEFINES
+ #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
+ #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
++bool topology_is_primary_thread(unsigned int cpu);
++#else
++static inline bool topology_is_primary_thread(unsigned int cpu) { return true; }
+ #endif
+
+ static inline void arch_fix_phys_package_id(int num, u32 slot)
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -2018,6 +2018,21 @@ static int cpuid_to_apicid[] = {
+ [0 ... NR_CPUS - 1] = -1,
+ };
+
++/**
++ * apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
++ * @id: APIC ID to check
++ */
++bool apic_id_is_primary_thread(unsigned int apicid)
++{
++ u32 mask;
++
++ if (smp_num_siblings == 1)
++ return true;
++ /* Isolate the SMT bit(s) in the APICID and check for 0 */
++ mask = (1U << (fls(smp_num_siblings) - 1)) - 1;
++ return !(apicid & mask);
++}
++
+ /*
+ * Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids
+ * and cpuid_to_apicid[] synchronized.
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1020,6 +1020,15 @@ int native_cpu_up(unsigned int cpu, stru
+ }
+
+ /**
++ * topology_is_primary_thread - Check whether CPU is the primary SMT thread
++ * @cpu: CPU to check
++ */
++bool topology_is_primary_thread(unsigned int cpu)
++{
++ return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu));
++}
++
++/**
+ * arch_disable_smp_support() - disables SMP support for x86 at runtime
+ */
+ void arch_disable_smp_support(void)
diff --git a/patches.arch/03-x86-KVM-VMX-Add-L1D-flush-algorithm.patch b/patches.arch/03-x86-KVM-VMX-Add-L1D-flush-algorithm.patch
new file mode 100644
index 0000000000..7392320915
--- /dev/null
+++ b/patches.arch/03-x86-KVM-VMX-Add-L1D-flush-algorithm.patch
@@ -0,0 +1,137 @@
+From b4d3b3f62cfa91e976c7bc1042ef81ca6801bfd1 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 2 Jul 2018 12:47:38 +0200
+Subject: [PATCH 33/40] x86/KVM/VMX: Add L1D flush algorithm
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit a47dd5f06714c844b33f3b5f517b6f3e81ce57b5 upstream
+
+To mitigate the L1 Terminal Fault vulnerability it's required to flush L1D
+on VMENTER to prevent rogue guests from snooping host memory.
+
+CPUs will have a new control MSR via a microcode update to flush L1D with a
+single MSR write, but in the absence of microcode a fallback to a software
+based flush algorithm is required.
+
+Add a software flush loop which is based on code from Intel.
+
+[ tglx: Split out from combo patch ]
+[ bpetkov: Polish the asm code ]
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 70 +++++++++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 66 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8260,6 +8260,46 @@ static int vmx_handle_exit(struct kvm_vc
+ }
+ }
+
++/*
++ * Software based L1D cache flush which is used when microcode providing
++ * the cache control MSR is not loaded.
++ *
++ * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
++ * flush it is required to read in 64 KiB because the replacement algorithm
++ * is not exactly LRU. This could be sized at runtime via topology
++ * information but as all relevant affected CPUs have 32KiB L1D cache size
++ * there is no point in doing so.
++ */
++#define L1D_CACHE_ORDER 4
++static void *vmx_l1d_flush_pages;
++
++static void __maybe_unused vmx_l1d_flush(void)
++{
++ int size = PAGE_SIZE << L1D_CACHE_ORDER;
++
++ asm volatile(
++ /* First ensure the pages are in the TLB */
++ "xorl %%eax, %%eax\n"
++ ".Lpopulate_tlb:\n\t"
++ "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
++ "addl $4096, %%eax\n\t"
++ "cmpl %%eax, %[size]\n\t"
++ "jne .Lpopulate_tlb\n\t"
++ "xorl %%eax, %%eax\n\t"
++ "cpuid\n\t"
++ /* Now fill the cache */
++ "xorl %%eax, %%eax\n"
++ ".Lfill_cache:\n"
++ "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
++ "addl $64, %%eax\n\t"
++ "cmpl %%eax, %[size]\n\t"
++ "jne .Lfill_cache\n\t"
++ "lfence\n"
++ :: [empty_zp] "r" (vmx_l1d_flush_pages),
++ [size] "r" (size)
++ : "eax", "ebx", "ecx", "edx");
++}
++
+ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+ {
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+@@ -11090,25 +11130,45 @@ static struct kvm_x86_ops vmx_x86_ops =
+ .setup_mce = vmx_setup_mce,
+ };
+
+-static void __init vmx_setup_l1d_flush(void)
++static int __init vmx_setup_l1d_flush(void)
+ {
++ struct page *page;
++
+ if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
+ !boot_cpu_has_bug(X86_BUG_L1TF))
+- return;
++ return 0;
+
++ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
++ if (!page)
++ return -ENOMEM;
++
++ vmx_l1d_flush_pages = page_address(page);
+ static_branch_enable(&vmx_l1d_should_flush);
++ return 0;
++}
++
++static void vmx_free_l1d_flush_pages(void)
++{
++ if (vmx_l1d_flush_pages) {
++ free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
++ vmx_l1d_flush_pages = NULL;
++ }
+ }
+
+ static int __init vmx_init(void)
+ {
+ int r;
+
+- vmx_setup_l1d_flush();
++ r = vmx_setup_l1d_flush();
++ if (r)
++ return r;
+
+ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+ __alignof__(struct vcpu_vmx), THIS_MODULE);
+- if (r)
++ if (r) {
++ vmx_free_l1d_flush_pages();
+ return r;
++ }
+
+ #ifdef CONFIG_KEXEC_CORE
+ rcu_assign_pointer(crash_vmclear_loaded_vmcss,
+@@ -11126,6 +11186,8 @@ static void __exit vmx_exit(void)
+ #endif
+
+ kvm_exit();
++
++ vmx_free_l1d_flush_pages();
+ }
+
+ module_init(vmx_init)
diff --git a/patches.arch/03-x86-topology-provide-topology_smt_supported.patch b/patches.arch/03-x86-topology-provide-topology_smt_supported.patch
new file mode 100644
index 0000000000..f7a11cd32f
--- /dev/null
+++ b/patches.arch/03-x86-topology-provide-topology_smt_supported.patch
@@ -0,0 +1,49 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 21 Jun 2018 10:37:20 +0200
+Subject: x86/topology: Provide topology_smt_supported()
+Git-commit: f048c399e0f7490ab7296bc2c255d37eb14a9675
+Patch-mainline: v4.18 or v4.18-rc2 (next release)
+References: bsc#1089343
+
+Provide information whether SMT is supoorted by the CPUs. Preparatory patch
+for SMT control mechanism.
+
+Suggested-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/topology.h | 2 ++
+ arch/x86/kernel/smpboot.c | 8 ++++++++
+ 2 files changed, 10 insertions(+)
+
+--- a/arch/x86/include/asm/topology.h
++++ b/arch/x86/include/asm/topology.h
+@@ -125,8 +125,10 @@ extern const struct cpumask *cpu_coregro
+ #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
+ #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
+ bool topology_is_primary_thread(unsigned int cpu);
++bool topology_smt_supported(void);
+ #else
+ static inline bool topology_is_primary_thread(unsigned int cpu) { return true; }
++static inline bool topology_smt_supported(void) { return false; }
+ #endif
+
+ static inline void arch_fix_phys_package_id(int num, u32 slot)
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1029,6 +1029,14 @@ bool topology_is_primary_thread(unsigned
+ }
+
+ /**
++ * topology_smt_supported - Check whether SMT is supported by the CPUs
++ */
++bool topology_smt_supported(void)
++{
++ return smp_num_siblings > 1;
++}
++
++/**
+ * arch_disable_smp_support() - disables SMP support for x86 at runtime
+ */
+ void arch_disable_smp_support(void)
diff --git a/patches.arch/04-cpu-hotplug-split-do_cpu_down.patch b/patches.arch/04-cpu-hotplug-split-do_cpu_down.patch
new file mode 100644
index 0000000000..d5e0186f5f
--- /dev/null
+++ b/patches.arch/04-cpu-hotplug-split-do_cpu_down.patch
@@ -0,0 +1,60 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 15 Jun 2018 18:28:05 +0200
+Subject: cpu/hotplug: Split do_cpu_down()
+Git-commit: 049ac01a7b9da9676b802fb13732da43cf6f25ca
+Patch-mainline: v4.18 or v4.18-rc2 (next release)
+References: bsc#1089343
+
+Split out the inner workings of do_cpu_down() to allow reuse of that
+function for the upcoming SMT disabling mechanism.
+
+No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: lkml <linux-kernel@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20180615162947.758649097@linutronix.de
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+---
+ kernel/cpu.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -430,20 +430,24 @@ out_release:
+ return err;
+ }
+
++/*
++ * @target unused.
++ */
++static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
++{
++ if (cpu_hotplug_disabled)
++ return -EBUSY;
++ return _cpu_down(cpu, 0);
++}
++
+ int cpu_down(unsigned int cpu)
+ {
+ int err;
+
+ cpu_maps_update_begin();
+
+- if (cpu_hotplug_disabled) {
+- err = -EBUSY;
+- goto out;
+- }
+-
+- err = _cpu_down(cpu, 0);
++ err = cpu_down_maps_locked(cpu, 0);
+
+-out:
+ cpu_maps_update_done();
+ return err;
+ }
diff --git a/patches.arch/04-x86-KVM-VMX-Add-L1D-MSR-based-flush.patch b/patches.arch/04-x86-KVM-VMX-Add-L1D-MSR-based-flush.patch
new file mode 100644
index 0000000000..b8d21180cf
--- /dev/null
+++ b/patches.arch/04-x86-KVM-VMX-Add-L1D-MSR-based-flush.patch
@@ -0,0 +1,84 @@
+From ab38da8677245ccbb76767a4ed7e39b5106e00cf Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 2 Jul 2018 13:03:48 +0200
+Subject: [PATCH 34/40] x86/KVM/VMX: Add L1D MSR based flush
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit 3fa045be4c720146b18a19cea7a767dc6ad5df94 upstream
+
+336996-Speculative-Execution-Side-Channel-Mitigations.pdf defines a new MSR
+(IA32_FLUSH_CMD aka 0x10B) which has similar write-only semantics to other
+MSRs defined in the document.
+
+The semantics of this MSR is to allow "finer granularity invalidation of
+caching structures than existing mechanisms like WBINVD. It will writeback
+and invalidate the L1 data cache, including all cachelines brought in by
+preceding instructions, without invalidating all caches (eg. L2 or
+LLC). Some processors may also invalidate the first level level instruction
+cache on a L1D_FLUSH command. The L1 data and instruction caches may be
+shared across the logical processors of a core."
+
+Use it instead of the loop based L1 flush algorithm.
+
+A copy of this document is available at
+ https://bugzilla.kernel.org/show_bug.cgi?id=199511
+
+[ tglx: Avoid allocating pages when the MSR is available ]
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/include/asm/msr-index.h | 6 ++++++
+ arch/x86/kvm/vmx.c | 15 +++++++++++----
+ 2 files changed, 17 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -65,6 +65,12 @@
+ * control required.
+ */
+
++#define MSR_IA32_FLUSH_CMD 0x0000010b
++#define L1D_FLUSH (1 << 0) /*
++ * Writeback and invalidate the
++ * L1 data cache.
++ */
++
+ #define MSR_IA32_BBL_CR_CTL 0x00000119
+ #define MSR_IA32_BBL_CR_CTL3 0x0000011e
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8277,6 +8277,11 @@ static void __maybe_unused vmx_l1d_flush
+ {
+ int size = PAGE_SIZE << L1D_CACHE_ORDER;
+
++ if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
++ wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
++ return;
++ }
++
+ asm volatile(
+ /* First ensure the pages are in the TLB */
+ "xorl %%eax, %%eax\n"
+@@ -11138,11 +11143,13 @@ static int __init vmx_setup_l1d_flush(vo
+ !boot_cpu_has_bug(X86_BUG_L1TF))
+ return 0;
+
+- page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
+- if (!page)
+- return -ENOMEM;
++ if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
++ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
++ if (!page)
++ return -ENOMEM;
++ vmx_l1d_flush_pages = page_address(page);
++ }
+
+- vmx_l1d_flush_pages = page_address(page);
+ static_branch_enable(&vmx_l1d_should_flush);
+ return 0;
+ }
diff --git a/patches.arch/04.1-cpu-hotplug-add-sysfs-state-interface.patch b/patches.arch/04.1-cpu-hotplug-add-sysfs-state-interface.patch
new file mode 100644
index 0000000000..f8cf8b892d
--- /dev/null
+++ b/patches.arch/04.1-cpu-hotplug-add-sysfs-state-interface.patch
@@ -0,0 +1,159 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 26 Feb 2016 18:43:31 +0000
+Subject: cpu/hotplug: Add sysfs state interface
+Git-commit: 98f8cdce1db580b99fce823a48eea2cb2bdb261e
+Patch-mainline: v4.6-rc1
+References: bsc#1089343
+
+Add a sysfs interface so we can actually see in which state the cpus are in.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-arch@vger.kernel.org
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Rafael Wysocki <rafael.j.wysocki@intel.com>
+Cc: "Srivatsa S. Bhat" <srivatsa@mit.edu>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Rusty Russell <rusty@rustcorp.com.au>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paul Turner <pjt@google.com>
+Link: http://lkml.kernel.org/r/20160226182340.942257522@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+[ bp: only barebones backport, not needed stuff commented out. ]
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ kernel/cpu.c | 110 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 110 insertions(+)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -82,6 +82,7 @@ static struct {
+ #endif
+ };
+
++static DEFINE_MUTEX(cpuhp_state_mutex);
+ /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
+ #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
+ #define cpuhp_lock_acquire_tryread() \
+@@ -734,6 +735,115 @@ void notify_cpu_starting(unsigned int cp
+
+ #endif /* CONFIG_SMP */
+
++#if 0
++static bool cpuhp_is_ap_state(enum cpuhp_state state)
++{
++ return (state > CPUHP_AP_OFFLINE && state < CPUHP_AP_ONLINE);
++}
++
++static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
++{
++ struct cpuhp_step *sp;
++
++ sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
++ return sp + state;
++}
++#endif
++
++#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
++static ssize_t show_cpuhp_state(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++#if 0
++ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
++
++ return sprintf(buf, "%d\n", st->state);
++#endif
++ return 0;
++}
++static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
++
++static ssize_t show_cpuhp_target(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++#if 0
++ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
++
++ return sprintf(buf, "%d\n", st->target);
++#endif
++ return 0;
++}
++static DEVICE_ATTR(target, 0444, show_cpuhp_target, NULL);
++
++static struct attribute *cpuhp_cpu_attrs[] = {
++ &dev_attr_state.attr,
++ &dev_attr_target.attr,
++ NULL
++};
++
++static struct attribute_group cpuhp_cpu_attr_group = {
++ .attrs = cpuhp_cpu_attrs,
++ .name = "hotplug",
++ NULL
++};
++
++static ssize_t show_cpuhp_states(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++#if 0
++ ssize_t cur, res = 0;
++ int i;
++
++ mutex_lock(&cpuhp_state_mutex);
++ for (i = 0; i <= CPUHP_ONLINE; i++) {
++ struct cpuhp_step *sp = cpuhp_get_step(i);
++
++ if (sp->name) {
++ cur = sprintf(buf, "%3d: %s\n", i, sp->name);
++ buf += cur;
++ res += cur;
++ }
++ }
++ mutex_unlock(&cpuhp_state_mutex);
++#endif
++ return 0;
++}
++static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
++
++static struct attribute *cpuhp_cpu_root_attrs[] = {
++ &dev_attr_states.attr,
++ NULL
++};
++
++static struct attribute_group cpuhp_cpu_root_attr_group = {
++ .attrs = cpuhp_cpu_root_attrs,
++ .name = "hotplug",
++ NULL
++};
++
++static int __init cpuhp_sysfs_init(void)
++{
++ int cpu, ret;
++
++ ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
++ &cpuhp_cpu_root_attr_group);
++ if (ret)
++ return ret;
++
++ for_each_possible_cpu(cpu) {
++ struct device *dev = get_cpu_device(cpu);
++
++ if (!dev)
++ continue;
++ ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
++ if (ret)
++ return ret;
++ }
++ return 0;
++}
++device_initcall(cpuhp_sysfs_init);
++#endif
++
+ /*
+ * cpu_bit_bitmap[] is a special, "compressed" data structure that
+ * represents all NR_CPUS bits binary values of 1<<nr.
diff --git a/patches.arch/04.2-x86-topology-add-topology_max_smt_threads.patch b/patches.arch/04.2-x86-topology-add-topology_max_smt_threads.patch
new file mode 100644
index 0000000000..f2edfd0549
--- /dev/null
+++ b/patches.arch/04.2-x86-topology-add-topology_max_smt_threads.patch
@@ -0,0 +1,125 @@
+From: Andi Kleen <ak@linux.intel.com>
+Date: Thu, 19 May 2016 17:09:55 -0700
+Subject: x86/topology: Add topology_max_smt_threads()
+Git-commit: 70b8301f6b8f7bc053377a9cbd0c4e42e29d9807
+Patch-mainline: v4.8-rc1
+References: bsc#1089343
+
+For SMT specific workarounds it is useful to know if SMT is active
+on any online CPU in the system. This currently requires a loop
+over all online CPUs.
+
+Add a global variable that is updated with the maximum number
+of smt threads on any CPU on online/offline, and use it for
+topology_max_smt_threads()
+
+The single call is easier to use than a loop.
+
+Not exported to user space because user space already can use
+the existing sibling interfaces to find this out.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Cc: acme@kernel.org
+Cc: jolsa@kernel.org
+Link: http://lkml.kernel.org/r/1463703002-19686-2-git-send-email-andi@firstfloor.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/topology.h | 9 +++++++++
+ arch/x86/kernel/smpboot.c | 25 ++++++++++++++++++++++++-
+ 2 files changed, 33 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/topology.h
++++ b/arch/x86/include/asm/topology.h
+@@ -124,11 +124,20 @@ extern const struct cpumask *cpu_coregro
+ #ifdef ENABLE_TOPO_DEFINES
+ #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
+ #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
++
++extern int __max_smt_threads;
++
++static inline int topology_max_smt_threads(void)
++{
++ return __max_smt_threads;
++}
++
+ bool topology_is_primary_thread(unsigned int cpu);
+ bool topology_smt_supported(void);
+ #else
+ static inline bool topology_is_primary_thread(unsigned int cpu) { return true; }
+ static inline bool topology_smt_supported(void) { return false; }
++static inline int topology_max_smt_threads(void) { return 1; }
+ #endif
+
+ static inline void arch_fix_phys_package_id(int num, u32 slot)
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -99,6 +99,9 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t
+ DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
+ EXPORT_PER_CPU_SYMBOL(cpu_info);
+
++/* Maximum number of SMT threads on any online core */
++int __max_smt_threads __read_mostly;
++
+ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
+ {
+ unsigned long flags;
+@@ -371,7 +374,7 @@ void set_cpu_sibling_map(int cpu)
+ bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ struct cpuinfo_x86 *o;
+- int i;
++ int i, threads;
+
+ cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
+
+@@ -428,6 +431,10 @@ void set_cpu_sibling_map(int cpu)
+ if (match_die(c, o) && !topology_same_node(c, o))
+ x86_has_numa_in_package = true;
+ }
++
++ threads = cpumask_weight(topology_sibling_cpumask(cpu));
++ if (threads > __max_smt_threads)
++ __max_smt_threads = threads;
+ }
+
+ /* maps the cpu to the sched domain representing multi-core */
+@@ -1359,6 +1366,21 @@ __init void prefill_possible_map(void)
+
+ #ifdef CONFIG_HOTPLUG_CPU
+
++/* Recompute SMT state for all CPUs on offline */
++static void recompute_smt_state(void)
++{
++ int max_threads, cpu;
++
++ max_threads = 0;
++ for_each_online_cpu (cpu) {
++ int threads = cpumask_weight(topology_sibling_cpumask(cpu));
++
++ if (threads > max_threads)
++ max_threads = threads;
++ }
++ __max_smt_threads = max_threads;
++}
++
+ static void remove_siblinginfo(int cpu)
+ {
+ int sibling;
+@@ -1384,6 +1406,7 @@ static void remove_siblinginfo(int cpu)
+ c->cpu_core_id = 0;
+ c->booted_cores = 0;
+ cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
++ recompute_smt_state();
+ }
+
+ static void remove_cpu_from_maps(int cpu)
diff --git a/patches.arch/04.3-x86-smpboot-do-not-use-smp_num_siblings-in-_max_logical_packages-calculation.patch b/patches.arch/04.3-x86-smpboot-do-not-use-smp_num_siblings-in-_max_logical_packages-calculation.patch
new file mode 100644
index 0000000000..8687b688e7
--- /dev/null
+++ b/patches.arch/04.3-x86-smpboot-do-not-use-smp_num_siblings-in-_max_logical_packages-calculation.patch
@@ -0,0 +1,50 @@
+From: Prarit Bhargava <prarit@redhat.com>
+Date: Mon, 4 Dec 2017 11:45:21 -0500
+Subject: x86/smpboot: Do not use smp_num_siblings in __max_logical_packages calculation
+Git-commit: 947134d9b00f342415af7eddd42a5fce7262a1b9
+Patch-mainline: v4.15-rc4
+References: bsc#1089343
+
+Documentation/x86/topology.txt defines smp_num_siblings as "The number of
+threads in a core". Since commit bbb65d2d365e ("x86: use cpuid vector 0xb
+when available for detecting cpu topology") smp_num_siblings is the
+maximum number of threads in a core. If Simultaneous MultiThreading
+(SMT) is disabled on a system, smp_num_siblings is 2 and not 1 as
+expected.
+
+Use topology_max_smt_threads(), which contains the active numer of threads,
+in the __max_logical_packages calculation.
+
+On a single socket, single core, single thread system __max_smt_threads has
+not been updated when the __max_logical_packages calculation happens, so its
+zero which makes the package estimate fail. Initialize it to one, which is
+the minimum number of threads on a core.
+
+[ tglx: Folded the __max_smt_threads fix in ]
+
+Fixes: b4c0a7326f5d ("x86/smpboot: Fix __max_logical_packages estimate")
+Reported-by: Jakub Kicinski <kubakici@wp.pl>
+Signed-off-by: Prarit Bhargava <prarit@redhat.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jakub Kicinski <kubakici@wp.pl>
+Cc: netdev@vger.kernel.org
+Cc: "netdev@vger.kernel.org"
+Cc: Clark Williams <williams@redhat.com>
+Link: https://lkml.kernel.org/r/20171204164521.17870-1-prarit@redhat.com
+
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/smpboot.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -100,7 +100,7 @@ DEFINE_PER_CPU_READ_MOSTLY(struct cpuinf
+ EXPORT_PER_CPU_SYMBOL(cpu_info);
+
+ /* Maximum number of SMT threads on any online core */
+-int __max_smt_threads __read_mostly;
++int __read_mostly __max_smt_threads = 1;
+
+ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
+ {
diff --git a/patches.arch/05-cpu-hotplug-provide-knobs-to-control-smt.patch b/patches.arch/05-cpu-hotplug-provide-knobs-to-control-smt.patch
new file mode 100644
index 0000000000..921770e41e
--- /dev/null
+++ b/patches.arch/05-cpu-hotplug-provide-knobs-to-control-smt.patch
@@ -0,0 +1,369 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 15 Jun 2018 18:28:06 +0200
+Subject: cpu/hotplug: Provide knobs to control SMT
+Git-commit: 8b426b0ce8dd3331ece40cdea6339daac1ecb7b7
+Patch-mainline: v4.18 or v4.18-rc2 (next release)
+References: bsc#1089343
+
+Provide a command line and a sysfs knob to control SMT.
+
+The command line options are:
+
+ 'nosmt': Enumerate secondary threads, but do not online them
+
+ 'nosmt=force': Ignore secondary threads completely during enumeration
+ via MP table and ACPI/MADT.
+
+The sysfs control file has the following states (read/write):
+
+ 'on': SMT is enabled. Secondary threads can be freely onlined
+ 'off': SMT is disabled. Secondary threads, even if enumerated
+ cannot be onlined
+ 'forceoff': SMT is permanentely disabled. Writes to the control
+ file are rejected.
+
+The command line option 'nosmt' sets the sysfs control to 'off'. This
+can be changed to 'on' to reenable SMT during runtime.
+
+The command line option 'nosmt=force' sets the sysfs control to
+'forceoff'. This cannot be changed during runtime.
+
+When SMT is 'on' and the control file is changed to 'off' then all online
+secondary threads are offlined and attempts to online a secondary thread
+later are rejected.
+
+When SMT is 'off' and the control file is changed to 'on' then secondary
+threads can be onlined again. The 'off' -> 'on' transition does not
+automatically online the secondary threads.
+
+When the control file is set to 'forceoff', the behaviour is the same as
+setting it to 'off', but the operation is irreversible and later writes to
+the control file are rejected.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Cc: Aishwarya Pant <aishpant@gmail.com>
+Cc: Alexei Starovoitov <ast@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: David Rientjes <rientjes@google.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Deepa Dinamani <deepa.kernel@gmail.com>
+Cc: Frederic Weisbecker <frederic@kernel.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Josef Bacik <jbacik@fb.com>
+Cc: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Cc: Kate Stewart <kstewart@linuxfoundation.org>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Marc Zyngier <marc.zyngier@arm.com>
+Cc: Masahiro Yamada <yamada.masahiro@socionext.com>
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Nicholas Piggin <npiggin@gmail.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Thymo van Beers <thymovanbeers@gmail.com>
+Cc: Tom Saeger <tom.saeger@oracle.com>
+Cc: Yanjiang Jin <yanjiang.jin@windriver.com>
+Cc: linux-doc@vger.kernel.org
+Cc: lkml <linux-kernel@vger.kernel.org>
+Cc: x86-ml <x86@kernel.org>
+Link: http://lkml.kernel.org/r/20180615162947.853265505@linutronix.de
+[ bp: do forceoff checks first in store_smt_control() ]
+Signed-off-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu | 16 +
+ Documentation/kernel-parameters.txt | 11 +
+ arch/Kconfig | 3
+ arch/x86/Kconfig | 1
+ include/linux/cpu.h | 13 +
+ kernel/cpu.c | 171 +++++++++++++++++++++
+ 6 files changed, 215 insertions(+)
+
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -9,6 +9,9 @@ config KEXEC_CORE
+ select CRASH_CORE
+ bool
+
++config HOTPLUG_SMT
++ bool
++
+ config OPROFILE
+ tristate "OProfile system profiling"
+ depends on PROFILING
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -142,6 +142,7 @@ config X86
+ select HAVE_UID16 if X86_32 || IA32_EMULATION
+ select HAVE_UNSTABLE_SCHED_CLOCK
+ select HAVE_USER_RETURN_NOTIFIER
++ select HOTPLUG_SMT if SMP
+ select IRQ_FORCED_THREADING
+ select MODULES_USE_ELF_RELA if X86_64
+ select MODULES_USE_ELF_REL if X86_32
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -288,3 +288,19 @@ Description: Information about CPU vulne
+ "Not affected" CPU is not affected by the vulnerability
+ "Vulnerable" CPU is affected and no mitigation in effect
+ "Mitigation: $M" CPU is affected and mitigation $M is in effect
++
++What: /sys/devices/system/cpu/smt
++ /sys/devices/system/cpu/smt/active
++ /sys/devices/system/cpu/smt/control
++Date: June 2018
++Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
++Description: Control Symetric Multi Threading (SMT)
++
++ active: Tells whether SMT is active (enabled and available)
++
++ control: Read/write interface to control SMT. Possible
++ values:
++
++ "on" SMT is enabled
++ "off" SMT is disabled
++ "forceoff" SMT is force disabled. Cannot be changed.
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -2550,6 +2550,17 @@ bytes respectively. Such letter suffixes
+
+ nohugeiomap [KNL,x86] Disable kernel huge I/O mappings.
+
++ nosmt [S390] Disable symmetric multithreading (SMT).
++ Equivalent to smt=1.
++
++ [KNL,x86] Disable symmetric multithreading (SMT).
++ nosmt=force: Force disable SMT, similar to disabling
++ it in the BIOS except that some of the
++ resource partitioning effects which are
++ caused by having SMT enabled in the BIOS
++ cannot be undone. Depending on the CPU
++ type this might have a performance impact.
++
+ nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
+ (indirect branch prediction) vulnerability. System may
+ allow data leaks with this option, which is equivalent
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -292,4 +292,17 @@ bool cpu_wait_death(unsigned int cpu, in
+ bool cpu_report_death(void);
+ #endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
++enum cpuhp_smt_control {
++ CPU_SMT_ENABLED,
++ CPU_SMT_DISABLED,
++ CPU_SMT_FORCE_DISABLED,
++ CPU_SMT_NOT_SUPPORTED,
++};
++
++#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
++extern enum cpuhp_smt_control cpu_smt_control;
++#else
++# define cpu_smt_control (CPU_SMT_ENABLED)
++#endif
++
+ #endif /* _LINUX_CPU_H_ */
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -455,6 +455,29 @@ int cpu_down(unsigned int cpu)
+ EXPORT_SYMBOL(cpu_down);
+ #endif /*CONFIG_HOTPLUG_CPU*/
+
++#ifdef CONFIG_HOTPLUG_SMT
++enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
++
++static int __init smt_cmdline_disable(char *str)
++{
++ cpu_smt_control = CPU_SMT_DISABLED;
++ if (str && !strcmp(str, "force")) {
++ pr_info("SMT: Force disabled\n");
++ cpu_smt_control = CPU_SMT_FORCE_DISABLED;
++ }
++ return 0;
++}
++early_param("nosmt", smt_cmdline_disable);
++
++static inline bool cpu_smt_allowed(unsigned int cpu)
++{
++ return cpu_smt_control == CPU_SMT_ENABLED ||
++ topology_is_primary_thread(cpu);
++}
++#else
++static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
++#endif
++
+ /*
+ * Unpark per-CPU smpboot kthreads at CPU-online time.
+ */
+@@ -562,6 +585,10 @@ int cpu_up(unsigned int cpu)
+ err = -EBUSY;
+ goto out;
+ }
++ if (!cpu_smt_allowed(cpu)) {
++ err = -EPERM;
++ goto out;
++ }
+
+ err = _cpu_up(cpu, 0);
+
+@@ -821,10 +848,154 @@ static struct attribute_group cpuhp_cpu_
+ NULL
+ };
+
++#ifdef CONFIG_HOTPLUG_SMT
++
++static const char *smt_states[] = {
++ [CPU_SMT_ENABLED] = "on",
++ [CPU_SMT_DISABLED] = "off",
++ [CPU_SMT_FORCE_DISABLED] = "forceoff",
++ [CPU_SMT_NOT_SUPPORTED] = "notsupported",
++};
++
++static ssize_t
++show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
++}
++
++static void cpuhp_offline_cpu_device(unsigned int cpu)
++{
++ struct device *dev = get_cpu_device(cpu);
++
++ dev->offline = true;
++ /* Tell user space about the state change */
++ kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
++}
++
++static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
++{
++ int cpu, ret = 0;
++
++ cpu_maps_update_begin();
++ for_each_online_cpu(cpu) {
++ if (topology_is_primary_thread(cpu))
++ continue;
++ ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
++ if (ret)
++ break;
++ /*
++ * As this needs to hold the cpu maps lock it's impossible
++ * to call device_offline() because that ends up calling
++ * cpu_down() which takes cpu maps lock. cpu maps lock
++ * needs to be held as this might race against in kernel
++ * abusers of the hotplug machinery (thermal management).
++ *
++ * So nothing would update device:offline state. That would
++ * leave the sysfs entry stale and prevent onlining after
++ * smt control has been changed to 'off' again. This is
++ * called under the sysfs hotplug lock, so it is properly
++ * serialized against the regular offline usage.
++ */
++ cpuhp_offline_cpu_device(cpu);
++ }
++ if (!ret)
++ cpu_smt_control = ctrlval;
++ cpu_maps_update_done();
++ return ret;
++}
++
++static int cpuhp_smt_enable(void)
++{
++ cpu_maps_update_begin();
++ cpu_smt_control = CPU_SMT_ENABLED;
++ cpu_maps_update_done();
++ return 0;
++}
++
++static ssize_t
++store_smt_control(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ int ctrlval, ret;
++
++ if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
++ return -EPERM;
++
++ if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
++ return -ENODEV;
++
++ if (sysfs_streq(buf, "on"))
++ ctrlval = CPU_SMT_ENABLED;
++ else if (sysfs_streq(buf, "off"))
++ ctrlval = CPU_SMT_DISABLED;
++ else if (sysfs_streq(buf, "forceoff"))
++ ctrlval = CPU_SMT_FORCE_DISABLED;
++ else
++ return -EINVAL;
++
++ ret = lock_device_hotplug_sysfs();
++ if (ret)
++ return ret;
++
++ if (ctrlval != cpu_smt_control) {
++ switch (ctrlval) {
++ case CPU_SMT_ENABLED:
++ cpuhp_smt_enable();
++ break;
++ case CPU_SMT_DISABLED:
++ case CPU_SMT_FORCE_DISABLED:
++ ret = cpuhp_smt_disable(ctrlval);
++ break;
++ }
++ }
++
++ unlock_device_hotplug();
++ return ret ? ret : count;
++}
++static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
++
++static ssize_t
++show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ bool active = topology_max_smt_threads() > 1;
++
++ return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
++}
++static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
++
++static struct attribute *cpuhp_smt_attrs[] = {
++ &dev_attr_control.attr,
++ &dev_attr_active.attr,
++ NULL
++};
++
++static const struct attribute_group cpuhp_smt_attr_group = {
++ .attrs = cpuhp_smt_attrs,
++ .name = "smt",
++ NULL
++};
++
++static int __init cpu_smt_state_init(void)
++{
++ if (!topology_smt_supported())
++ cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
++
++ return sysfs_create_group(&cpu_subsys.dev_root->kobj,
++ &cpuhp_smt_attr_group);
++}
++
++#else
++static inline int cpu_smt_state_init(void) { return 0; }
++#endif
++
+ static int __init cpuhp_sysfs_init(void)
+ {
+ int cpu, ret;
+
++ ret = cpu_smt_state_init();
++ if (ret)
++ return ret;
++
+ ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
+ &cpuhp_cpu_root_attr_group);
+ if (ret)
diff --git a/patches.arch/05-x86-KVM-VMX-Add-L1D-flush-logic.patch b/patches.arch/05-x86-KVM-VMX-Add-L1D-flush-logic.patch
new file mode 100644
index 0000000000..8b1ada835f
--- /dev/null
+++ b/patches.arch/05-x86-KVM-VMX-Add-L1D-flush-logic.patch
@@ -0,0 +1,151 @@
+From c441e50d54c697c6b5e309efd9de3740972a6975 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 2 Jul 2018 13:07:14 +0200
+Subject: [PATCH 35/40] x86/KVM/VMX: Add L1D flush logic
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit c595ceee45707f00f64f61c54fb64ef0cc0b4e85 upstream
+
+Add the logic for flushing L1D on VMENTER. The flush depends on the static
+key being enabled and the new l1tf_flush_l1d flag being set.
+
+The flags is set:
+ - Always, if the flush module parameter is 'always'
+
+ - Conditionally at:
+ - Entry to vcpu_run(), i.e. after executing user space
+
+ - From the sched_in notifier, i.e. when switching to a vCPU thread.
+
+ - From vmexit handlers which are considered unsafe, i.e. where
+ sensitive data can be brought into L1D:
+
+ - The emulator, which could be a good target for other speculative
+ execution-based threats,
+
+ - The MMU, which can bring host page tables in the L1 cache.
+
+ - External interrupts
+
+ - Nested operations that require the MMU (see above). That is
+ vmptrld, vmptrst, vmclear,vmwrite,vmread.
+
+ - When handling invept,invvpid
+
+[ tglx: Split out from combo patch and reduced to a single flag ]
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/include/asm/kvm_host.h | 4 ++++
+ arch/x86/kvm/vmx.c | 22 +++++++++++++++++++++-
+ arch/x86/kvm/x86.c | 7 +++++++
+ 3 files changed, 32 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -614,6 +614,9 @@ struct kvm_vcpu_arch {
+
+ int pending_ioapic_eoi;
+ int pending_external_vector;
++
++ /* Flush the L1 Data cache for L1TF mitigation on VMENTER */
++ bool l1tf_flush_l1d;
+ };
+
+ struct kvm_lpage_info {
+@@ -769,6 +772,7 @@ struct kvm_vcpu_stat {
+ u32 signal_exits;
+ u32 irq_window_exits;
+ u32 nmi_window_exits;
++ u32 l1d_flush;
+ u32 halt_exits;
+ u32 halt_successful_poll;
+ u32 halt_attempted_poll;
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8273,9 +8273,20 @@ static int vmx_handle_exit(struct kvm_vc
+ #define L1D_CACHE_ORDER 4
+ static void *vmx_l1d_flush_pages;
+
+-static void __maybe_unused vmx_l1d_flush(void)
++static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
+ {
+ int size = PAGE_SIZE << L1D_CACHE_ORDER;
++ bool always;
++
++ /*
++ * If the mitigation mode is 'flush always', keep the flush bit
++ * set, otherwise clear it. It gets set again either from
++ * vcpu_run() or from one of the unsafe VMEXIT handlers.
++ */
++ always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
++ vcpu->arch.l1tf_flush_l1d = always;
++
++ vcpu->stat.l1d_flush++;
+
+ if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+ wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
+@@ -8736,6 +8747,12 @@ static void __noclone vmx_vcpu_run(struc
+ x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
+
+ vmx->__launched = vmx->loaded_vmcs->launched;
++
++ if (static_branch_unlikely(&vmx_l1d_should_flush)) {
++ if (vcpu->arch.l1tf_flush_l1d)
++ vmx_l1d_flush(vcpu);
++ }
++
+ asm(
+ /* Store host registers */
+ "push %%" _ASM_DX "; push %%" _ASM_BP ";"
+@@ -10150,6 +10167,9 @@ static int nested_vmx_run(struct kvm_vcp
+ }
+ }
+
++ /* Hide L1D cache contents from the nested guest. */
++ vmx->vcpu.arch.l1tf_flush_l1d = true;
++
+ /*
+ * We're finally done with prerequisite checking, and can start with
+ * the nested entry.
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4351,6 +4351,9 @@ static int kvm_write_guest_virt_helper(g
+ void *data = val;
+ int r = X86EMUL_CONTINUE;
+
++ /* kvm_write_guest_virt_system can pull in tons of pages. */
++ vcpu->arch.l1tf_flush_l1d = true;
++
+ while (bytes) {
+ gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
+ access,
+@@ -5483,6 +5486,8 @@ int x86_emulate_instruction(struct kvm_v
+ bool writeback = true;
+ bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
+
++ vcpu->arch.l1tf_flush_l1d = true;
++
+ /*
+ * Clear write_fault_to_shadow_pgtable here to ensure it is
+ * never reused.
+@@ -6828,6 +6833,7 @@ static int vcpu_run(struct kvm_vcpu *vcp
+ struct kvm *kvm = vcpu->kvm;
+
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
++ vcpu->arch.l1tf_flush_l1d = true;
+
+ for (;;) {
+ if (kvm_vcpu_running(vcpu)) {
+@@ -7790,6 +7796,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcp
+
+ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
+ {
++ vcpu->arch.l1tf_flush_l1d = true;
+ kvm_x86_ops->sched_in(vcpu, cpu);
+ }
+
diff --git a/patches.arch/06-x86-KVM-VMX-Split-the-VMX-MSR-LOAD-structures-to-hav.patch b/patches.arch/06-x86-KVM-VMX-Split-the-VMX-MSR-LOAD-structures-to-hav.patch
new file mode 100644
index 0000000000..cb9c2ce353
--- /dev/null
+++ b/patches.arch/06-x86-KVM-VMX-Split-the-VMX-MSR-LOAD-structures-to-hav.patch
@@ -0,0 +1,123 @@
+From 31d19a2332560749924b844557db2042e490433e Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 20 Jun 2018 13:58:37 -0400
+Subject: [PATCH 36/40] x86/KVM/VMX: Split the VMX MSR LOAD structures to have
+ an host/guest numbers
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit 33966dd6b2d2c352fae55412db2ea8cfff5df13a upstream
+
+There is no semantic change but this change allows an unbalanced amount of
+MSRs to be loaded on VMEXIT and VMENTER, i.e. the number of MSRs to save or
+restore on VMEXIT or VMENTER may be different.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 53 +++++++++++++++++++++++++++++------------------------
+ 1 file changed, 29 insertions(+), 24 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -580,6 +580,11 @@ static inline int pi_test_sn(struct pi_d
+ (unsigned long *)&pi_desc->control);
+ }
+
++struct vmx_msrs {
++ unsigned int nr;
++ struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
++};
++
+ struct vcpu_vmx {
+ struct kvm_vcpu vcpu;
+ unsigned long host_rsp;
+@@ -607,9 +612,8 @@ struct vcpu_vmx {
+ struct loaded_vmcs *loaded_vmcs;
+ bool __launched; /* temporary, used in vmx_vcpu_run */
+ struct msr_autoload {
+- unsigned nr;
+- struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
+- struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
++ struct vmx_msrs guest;
++ struct vmx_msrs host;
+ } msr_autoload;
+ struct {
+ int loaded;
+@@ -1781,18 +1785,18 @@ static void clear_atomic_switch_msr(stru
+ }
+ break;
+ }
+-
+- for (i = 0; i < m->nr; ++i)
+- if (m->guest[i].index == msr)
++ for (i = 0; i < m->guest.nr; ++i)
++ if (m->guest.val[i].index == msr)
+ break;
+
+- if (i == m->nr)
++ if (i == m->guest.nr)
+ return;
+- --m->nr;
+- m->guest[i] = m->guest[m->nr];
+- m->host[i] = m->host[m->nr];
+- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
+- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
++ --m->guest.nr;
++ --m->host.nr;
++ m->guest.val[i] = m->guest.val[m->guest.nr];
++ m->host.val[i] = m->host.val[m->host.nr];
++ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
++ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+ }
+
+ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
+@@ -1844,24 +1848,25 @@ static void add_atomic_switch_msr(struct
+ wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
+ }
+
+- for (i = 0; i < m->nr; ++i)
+- if (m->guest[i].index == msr)
++ for (i = 0; i < m->guest.nr; ++i)
++ if (m->guest.val[i].index == msr)
+ break;
+
+ if (i == NR_AUTOLOAD_MSRS) {
+ printk_once(KERN_WARNING "Not enough msr switch entries. "
+ "Can't add msr %x\n", msr);
+ return;
+- } else if (i == m->nr) {
+- ++m->nr;
+- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
+- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
++ } else if (i == m->guest.nr) {
++ ++m->guest.nr;
++ ++m->host.nr;
++ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
++ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+ }
+
+- m->guest[i].index = msr;
+- m->guest[i].value = guest_val;
+- m->host[i].index = msr;
+- m->host[i].value = host_val;
++ m->guest.val[i].index = msr;
++ m->guest.val[i].value = guest_val;
++ m->host.val[i].index = msr;
++ m->host.val[i].value = host_val;
+ }
+
+ static void reload_tss(void)
+@@ -4943,9 +4948,9 @@ static int vmx_vcpu_setup(struct vcpu_vm
+
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+- vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
++ vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
+- vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
++ vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
+
+ if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
+ vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
diff --git a/patches.arch/06-x86-cpu-remove-the-pointless-cpu-printout.patch b/patches.arch/06-x86-cpu-remove-the-pointless-cpu-printout.patch
new file mode 100644
index 0000000000..0179077be0
--- /dev/null
+++ b/patches.arch/06-x86-cpu-remove-the-pointless-cpu-printout.patch
@@ -0,0 +1,112 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 15 Jun 2018 18:28:07 +0200
+Subject: x86/cpu: Remove the pointless CPU printout
+Git-commit: 6cb1d9f777cd0bb4b9323e32870367ea4ed5ff14
+Patch-mainline: v4.18 or v4.18-rc2 (next release)
+References: bsc#1089343
+
+The value of this printout is dubious at best and there is no point in
+having it in two different places along with convoluted ways to reach it.
+
+Remove it completely.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Philippe Ombredanne <pombredanne@nexb.com>
+Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: lkml <linux-kernel@vger.kernel.org>
+Cc: x86-ml <x86@kernel.org>
+Link: http://lkml.kernel.org/r/20180615162947.934557945@linutronix.de
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/common.c | 20 +++++---------------
+ arch/x86/kernel/cpu/topology.c | 10 ----------
+ 2 files changed, 5 insertions(+), 25 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -620,13 +620,12 @@ void detect_ht(struct cpuinfo_x86 *c)
+ #ifdef CONFIG_SMP
+ u32 eax, ebx, ecx, edx;
+ int index_msb, core_bits;
+- static bool printed;
+
+ if (!cpu_has(c, X86_FEATURE_HT))
+ return;
+
+ if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
+- goto out;
++ return;
+
+ if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
+ return;
+@@ -635,14 +634,14 @@ void detect_ht(struct cpuinfo_x86 *c)
+
+ smp_num_siblings = (ebx & 0xff0000) >> 16;
+
++ if (!smp_num_siblings)
++ smp_num_siblings = 1;
++
+ if (smp_num_siblings == 1) {
+ printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n");
+- goto out;
++ return;
+ }
+
+- if (smp_num_siblings <= 1)
+- goto out;
+-
+ index_msb = get_count_order(smp_num_siblings);
+ c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
+
+@@ -654,15 +653,6 @@ void detect_ht(struct cpuinfo_x86 *c)
+
+ c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
+ ((1 << core_bits) - 1);
+-
+-out:
+- if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
+- printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
+- c->phys_proc_id);
+- printk(KERN_INFO "CPU: Processor Core ID: %d\n",
+- c->cpu_core_id);
+- printed = 1;
+- }
+ #endif
+ }
+
+--- a/arch/x86/kernel/cpu/topology.c
++++ b/arch/x86/kernel/cpu/topology.c
+@@ -32,7 +32,6 @@ void detect_extended_topology(struct cpu
+ unsigned int eax, ebx, ecx, edx, sub_index;
+ unsigned int ht_mask_width, core_plus_mask_width;
+ unsigned int core_select_mask, core_level_siblings;
+- static bool printed;
+
+ if (c->cpuid_level < 0xb)
+ return;
+@@ -85,15 +84,6 @@ void detect_extended_topology(struct cpu
+ c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
+
+ c->x86_max_cores = (core_level_siblings / smp_num_siblings);
+-
+- if (!printed) {
+- printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
+- c->phys_proc_id);
+- if (c->x86_max_cores > 1)
+- printk(KERN_INFO "CPU: Processor Core ID: %d\n",
+- c->cpu_core_id);
+- printed = 1;
+- }
+ return;
+ #endif
+ }
diff --git a/patches.arch/07-x86-KVM-VMX-Add-find_msr-helper-function.patch b/patches.arch/07-x86-KVM-VMX-Add-find_msr-helper-function.patch
new file mode 100644
index 0000000000..e178c21168
--- /dev/null
+++ b/patches.arch/07-x86-KVM-VMX-Add-find_msr-helper-function.patch
@@ -0,0 +1,85 @@
+From 0b1d2e2aae1ab6c0980e099d18604445780cc8bd Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 20 Jun 2018 20:11:39 -0400
+Subject: [PATCH 37/40] x86/KVM/VMX: Add find_msr() helper function
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit ca83b4a7f2d068da79a029d323024aa45decb250 upstream
+
+.. to help find the MSR on either the guest or host MSR list.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 31 ++++++++++++++++++-------------
+ 1 file changed, 18 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1762,9 +1762,20 @@ static void clear_atomic_switch_msr_spec
+ vm_exit_controls_clearbit(vmx, exit);
+ }
+
++static int find_msr(struct vmx_msrs *m, unsigned int msr)
++{
++ unsigned int i;
++
++ for (i = 0; i < m->nr; ++i) {
++ if (m->val[i].index == msr)
++ return i;
++ }
++ return -ENOENT;
++}
++
+ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
+ {
+- unsigned i;
++ int i;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ switch (msr) {
+@@ -1785,11 +1796,8 @@ static void clear_atomic_switch_msr(stru
+ }
+ break;
+ }
+- for (i = 0; i < m->guest.nr; ++i)
+- if (m->guest.val[i].index == msr)
+- break;
+-
+- if (i == m->guest.nr)
++ i = find_msr(&m->guest, msr);
++ if (i < 0)
+ return;
+ --m->guest.nr;
+ --m->host.nr;
+@@ -1813,7 +1821,7 @@ static void add_atomic_switch_msr_specia
+ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+ u64 guest_val, u64 host_val)
+ {
+- unsigned i;
++ int i;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ switch (msr) {
+@@ -1848,16 +1856,13 @@ static void add_atomic_switch_msr(struct
+ wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
+ }
+
+- for (i = 0; i < m->guest.nr; ++i)
+- if (m->guest.val[i].index == msr)
+- break;
+-
++ i = find_msr(&m->guest, msr);
+ if (i == NR_AUTOLOAD_MSRS) {
+ printk_once(KERN_WARNING "Not enough msr switch entries. "
+ "Can't add msr %x\n", msr);
+ return;
+- } else if (i == m->guest.nr) {
+- ++m->guest.nr;
++ } else if (i < 0) {
++ i = m->guest.nr++;
+ ++m->host.nr;
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
diff --git a/patches.arch/07-x86-cpu-amd-remove-the-pointless-detect_ht-call.patch b/patches.arch/07-x86-cpu-amd-remove-the-pointless-detect_ht-call.patch
new file mode 100644
index 0000000000..b7ee55e0f6
--- /dev/null
+++ b/patches.arch/07-x86-cpu-amd-remove-the-pointless-detect_ht-call.patch
@@ -0,0 +1,42 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 15 Jun 2018 18:28:08 +0200
+Subject: x86/cpu/AMD: Remove the pointless detect_ht() call
+Git-commit: 07dbb42d958fd47a65159069f1157f8e450fb830
+Patch-mainline: v4.18 or v4.18-rc2 (next release)
+References: bsc#1089343
+
+Real 32bit AMD CPUs do not have SMT and the only value of the call was to
+reach the magic printout which got removed.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jia Zhang <qianyue.zj@alibaba-inc.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Rudolf Marek <r.marek@assembler.cz>
+Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: lkml <linux-kernel@vger.kernel.org>
+Cc: x86-ml <x86@kernel.org>
+Link: http://lkml.kernel.org/r/20180615162948.030786547@linutronix.de
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/amd.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -784,10 +784,6 @@ static void init_amd(struct cpuinfo_x86
+ srat_detect_node(c);
+ }
+
+-#ifdef CONFIG_X86_32
+- detect_ht(c);
+-#endif
+-
+ init_amd_cacheinfo(c);
+
+ if (c->x86 >= 0xf)
diff --git a/patches.arch/08-x86-KVM-VMX-Separate-the-VMX-AUTOLOAD-guest-host-num.patch b/patches.arch/08-x86-KVM-VMX-Separate-the-VMX-AUTOLOAD-guest-host-num.patch
new file mode 100644
index 0000000000..91ad28a674
--- /dev/null
+++ b/patches.arch/08-x86-KVM-VMX-Separate-the-VMX-AUTOLOAD-guest-host-num.patch
@@ -0,0 +1,84 @@
+From e50b30378d2e96fc136bb545aa8b0769417422f5 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 20 Jun 2018 22:00:47 -0400
+Subject: [PATCH 38/40] x86/KVM/VMX: Separate the VMX AUTOLOAD guest/host
+ number accounting
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit 3190709335dd31fe1aeeebfe4ffb6c7624ef971f upstream
+
+This allows to load a different number of MSRs depending on the context:
+VMEXIT or VMENTER.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 29 +++++++++++++++++++----------
+ 1 file changed, 19 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1798,12 +1798,18 @@ static void clear_atomic_switch_msr(stru
+ }
+ i = find_msr(&m->guest, msr);
+ if (i < 0)
+- return;
++ goto skip_guest;
+ --m->guest.nr;
+- --m->host.nr;
+ m->guest.val[i] = m->guest.val[m->guest.nr];
+- m->host.val[i] = m->host.val[m->host.nr];
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
++
++skip_guest:
++ i = find_msr(&m->host, msr);
++ if (i < 0)
++ return;
++
++ --m->host.nr;
++ m->host.val[i] = m->host.val[m->host.nr];
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+ }
+
+@@ -1821,7 +1827,7 @@ static void add_atomic_switch_msr_specia
+ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+ u64 guest_val, u64 host_val)
+ {
+- int i;
++ int i, j;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ switch (msr) {
+@@ -1857,21 +1863,24 @@ static void add_atomic_switch_msr(struct
+ }
+
+ i = find_msr(&m->guest, msr);
+- if (i == NR_AUTOLOAD_MSRS) {
++ j = find_msr(&m->host, msr);
++ if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
+ printk_once(KERN_WARNING "Not enough msr switch entries. "
+ "Can't add msr %x\n", msr);
+ return;
+- } else if (i < 0) {
++ }
++ if (i < 0) {
+ i = m->guest.nr++;
+- ++m->host.nr;
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
++ }
++ if (j < 0) {
++ j = m->host.nr++;
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+ }
+-
+ m->guest.val[i].index = msr;
+ m->guest.val[i].value = guest_val;
+- m->host.val[i].index = msr;
+- m->host.val[i].value = host_val;
++ m->host.val[j].index = msr;
++ m->host.val[j].value = host_val;
+ }
+
+ static void reload_tss(void)
diff --git a/patches.arch/08-x86-cpu-common-provide-detect_ht_early.patch b/patches.arch/08-x86-cpu-common-provide-detect_ht_early.patch
new file mode 100644
index 0000000000..c19771943d
--- /dev/null
+++ b/patches.arch/08-x86-cpu-common-provide-detect_ht_early.patch
@@ -0,0 +1,90 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 15 Jun 2018 18:28:09 +0200
+Subject: x86/cpu/common: Provide detect_ht_early()
+Git-commit: c4b990d9790599116850008bb5af827836fe3bd9
+Patch-mainline: v4.18 or v4.18-rc2 (next release)
+References: bsc#1089343
+
+To support force disabling of SMT it's required to know the number of
+thread siblings early. detect_ht() cannot be called before the APIC driver
+is selected, so split out the part which initializes smp_num_siblings.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: David Wang <davidwang@zhaoxin.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: lkml <linux-kernel@vger.kernel.org>
+Cc: x86-ml <x86@kernel.org>
+Link: http://lkml.kernel.org/r/20180615162948.112259090@linutronix.de
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/common.c | 24 ++++++++++++++----------
+ arch/x86/kernel/cpu/cpu.h | 2 ++
+ 2 files changed, 16 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -615,32 +615,36 @@ static void cpu_detect_tlb(struct cpuinf
+ tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
+ }
+
+-void detect_ht(struct cpuinfo_x86 *c)
++int detect_ht_early(struct cpuinfo_x86 *c)
+ {
+ #ifdef CONFIG_SMP
+ u32 eax, ebx, ecx, edx;
+- int index_msb, core_bits;
+
+ if (!cpu_has(c, X86_FEATURE_HT))
+- return;
++ return -1;
+
+ if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
+- return;
++ return -1;
+
+ if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
+- return;
++ return -1;
+
+ cpuid(1, &eax, &ebx, &ecx, &edx);
+
+ smp_num_siblings = (ebx & 0xff0000) >> 16;
++ if (smp_num_siblings == 1)
++ pr_info_once("CPU0: Hyper-Threading is disabled\n");
++#endif
++ return 0;
++}
+
+- if (!smp_num_siblings)
+- smp_num_siblings = 1;
++void detect_ht(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++ int index_msb, core_bits;
+
+- if (smp_num_siblings == 1) {
+- printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n");
++ if (detect_ht_early(c) < 0)
+ return;
+- }
+
+ index_msb = get_count_order(smp_num_siblings);
+ c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
+--- a/arch/x86/kernel/cpu/cpu.h
++++ b/arch/x86/kernel/cpu/cpu.h
+@@ -47,4 +47,6 @@ extern void get_cpu_cap(struct cpuinfo_x
+ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
+ extern void x86_spec_ctrl_setup_ap(void);
+
++extern int detect_ht_early(struct cpuinfo_x86 *c);
++
+ #endif /* ARCH_X86_CPU_H */
diff --git a/patches.arch/09-x86-KVM-VMX-Extend-add_atomic_switch_msr-to-allow-VM.patch b/patches.arch/09-x86-KVM-VMX-Extend-add_atomic_switch_msr-to-allow-VM.patch
new file mode 100644
index 0000000000..1b5bd1dab0
--- /dev/null
+++ b/patches.arch/09-x86-KVM-VMX-Extend-add_atomic_switch_msr-to-allow-VM.patch
@@ -0,0 +1,92 @@
+From d2f78e25d473d98dd1703f1d07cdcdd3bb49dd24 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 20 Jun 2018 22:01:22 -0400
+Subject: [PATCH 39/40] x86/KVM/VMX: Extend add_atomic_switch_msr() to allow
+ VMENTER only MSRs
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit 989e3992d2eca32c3f1404f2bc91acda3aa122d8 upstream
+
+The IA32_FLUSH_CMD MSR needs only to be written on VMENTER. Extend
+add_atomic_switch_msr() with an entry_only parameter to allow storing the
+MSR only in the guest (ENTRY) MSR array.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 22 ++++++++++++++--------
+ 1 file changed, 14 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1825,9 +1825,9 @@ static void add_atomic_switch_msr_specia
+ }
+
+ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+- u64 guest_val, u64 host_val)
++ u64 guest_val, u64 host_val, bool entry_only)
+ {
+- int i, j;
++ int i, j = 0;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ switch (msr) {
+@@ -1863,7 +1863,9 @@ static void add_atomic_switch_msr(struct
+ }
+
+ i = find_msr(&m->guest, msr);
+- j = find_msr(&m->host, msr);
++ if (!entry_only)
++ j = find_msr(&m->host, msr);
++
+ if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
+ printk_once(KERN_WARNING "Not enough msr switch entries. "
+ "Can't add msr %x\n", msr);
+@@ -1873,12 +1875,16 @@ static void add_atomic_switch_msr(struct
+ i = m->guest.nr++;
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
+ }
++ m->guest.val[i].index = msr;
++ m->guest.val[i].value = guest_val;
++
++ if (entry_only)
++ return;
++
+ if (j < 0) {
+ j = m->host.nr++;
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+ }
+- m->guest.val[i].index = msr;
+- m->guest.val[i].value = guest_val;
+ m->host.val[j].index = msr;
+ m->host.val[j].value = host_val;
+ }
+@@ -1937,7 +1943,7 @@ static bool update_transition_efer(struc
+ guest_efer &= ~EFER_LME;
+ if (guest_efer != host_efer)
+ add_atomic_switch_msr(vmx, MSR_EFER,
+- guest_efer, host_efer);
++ guest_efer, host_efer, false);
+ return false;
+ } else {
+ guest_efer &= ~ignore_bits;
+@@ -3047,7 +3053,7 @@ static int vmx_set_msr(struct kvm_vcpu *
+ vcpu->arch.ia32_xss = data;
+ if (vcpu->arch.ia32_xss != host_xss)
+ add_atomic_switch_msr(vmx, MSR_IA32_XSS,
+- vcpu->arch.ia32_xss, host_xss);
++ vcpu->arch.ia32_xss, host_xss, false);
+ else
+ clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
+ break;
+@@ -8708,7 +8714,7 @@ static void atomic_switch_perf_msrs(stru
+ clear_atomic_switch_msr(vmx, msrs[i].msr);
+ else
+ add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
+- msrs[i].host);
++ msrs[i].host, false);
+ }
+
+ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
diff --git a/patches.arch/09-x86-cpu-topology-provide-detect_extended_topology_early.patch b/patches.arch/09-x86-cpu-topology-provide-detect_extended_topology_early.patch
new file mode 100644
index 0000000000..7f2c39e1cc
--- /dev/null
+++ b/patches.arch/09-x86-cpu-topology-provide-detect_extended_topology_early.patch
@@ -0,0 +1,129 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 15 Jun 2018 18:28:10 +0200
+Subject: x86/cpu/topology: Provide detect_extended_topology_early()
+Git-commit: 50a649ec28ca20bf21c9c3f52b7b10e4cdad43ba
+Patch-mainline: v4.18 or v4.18-rc2 (next release)
+References: bsc#1089343
+
+To support force disabling of SMT it's required to know the number of
+thread siblings early. detect_extended_topology() cannot be called before
+the APIC driver is selected, so split out the part which initializes
+smp_num_siblings.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: David Wang <davidwang@zhaoxin.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Kate Stewart <kstewart@linuxfoundation.org>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: lkml <linux-kernel@vger.kernel.org>
+Cc: x86-ml <x86@kernel.org>
+Link: http://lkml.kernel.org/r/20180615162948.195594861@linutronix.de
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/processor.h | 2 +-
+ arch/x86/kernel/cpu/cpu.h | 1 +
+ arch/x86/kernel/cpu/topology.c | 37 +++++++++++++++++++++++++------------
+ 3 files changed, 27 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -200,7 +200,7 @@ extern u32 get_scattered_cpuid_leaf(unsi
+ extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
+ extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
+
+-extern void detect_extended_topology(struct cpuinfo_x86 *c);
++extern int detect_extended_topology(struct cpuinfo_x86 *c);
+ extern void detect_ht(struct cpuinfo_x86 *c);
+
+ #ifdef CONFIG_X86_32
+--- a/arch/x86/kernel/cpu/cpu.h
++++ b/arch/x86/kernel/cpu/cpu.h
+@@ -47,6 +47,7 @@ extern void get_cpu_cap(struct cpuinfo_x
+ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
+ extern void x86_spec_ctrl_setup_ap(void);
+
++extern int detect_extended_topology_early(struct cpuinfo_x86 *c);
+ extern int detect_ht_early(struct cpuinfo_x86 *c);
+
+ #endif /* ARCH_X86_CPU_H */
+--- a/arch/x86/kernel/cpu/topology.c
++++ b/arch/x86/kernel/cpu/topology.c
+@@ -21,20 +21,13 @@
+ #define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
+ #define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
+
+-/*
+- * Check for extended topology enumeration cpuid leaf 0xb and if it
+- * exists, use it for populating initial_apicid and cpu topology
+- * detection.
+- */
+-void detect_extended_topology(struct cpuinfo_x86 *c)
++int detect_extended_topology_early(struct cpuinfo_x86 *c)
+ {
+ #ifdef CONFIG_SMP
+- unsigned int eax, ebx, ecx, edx, sub_index;
+- unsigned int ht_mask_width, core_plus_mask_width;
+- unsigned int core_select_mask, core_level_siblings;
++ unsigned int eax, ebx, ecx, edx;
+
+ if (c->cpuid_level < 0xb)
+- return;
++ return -1;
+
+ cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
+
+@@ -42,7 +35,7 @@ void detect_extended_topology(struct cpu
+ * check if the cpuid leaf 0xb is actually implemented.
+ */
+ if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
+- return;
++ return -1;
+
+ set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
+
+@@ -50,10 +43,30 @@ void detect_extended_topology(struct cpu
+ * initial apic id, which also represents 32-bit extended x2apic id.
+ */
+ c->initial_apicid = edx;
++ smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
++#endif
++ return 0;
++}
++
++/*
++ * Check for extended topology enumeration cpuid leaf 0xb and if it
++ * exists, use it for populating initial_apicid and cpu topology
++ * detection.
++ */
++int detect_extended_topology(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++ unsigned int eax, ebx, ecx, edx, sub_index;
++ unsigned int ht_mask_width, core_plus_mask_width;
++ unsigned int core_select_mask, core_level_siblings;
++
++ if (detect_extended_topology_early(c) < 0)
++ return -1;
+
+ /*
+ * Populate HT related information from sub-leaf level 0.
+ */
++ cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
+ core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
+ core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
+
+@@ -84,6 +97,6 @@ void detect_extended_topology(struct cpu
+ c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
+
+ c->x86_max_cores = (core_level_siblings / smp_num_siblings);
+- return;
+ #endif
++ return 0;
+ }
diff --git a/patches.arch/10-x86-KVM-VMX-Use-MSR-save-list-for-IA32_FLUSH_CMD-if-.patch b/patches.arch/10-x86-KVM-VMX-Use-MSR-save-list-for-IA32_FLUSH_CMD-if-.patch
new file mode 100644
index 0000000000..35a2d5b6c9
--- /dev/null
+++ b/patches.arch/10-x86-KVM-VMX-Use-MSR-save-list-for-IA32_FLUSH_CMD-if-.patch
@@ -0,0 +1,93 @@
+From 3661874b1c4810195c070e05abe9b9504ead59e2 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Thu, 28 Jun 2018 17:10:36 -0400
+Subject: [PATCH 40/40] x86/KVM/VMX: Use MSR save list for IA32_FLUSH_CMD if
+ required
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit 390d975e0c4e60ce70d4157e0dd91ede37824603 upstream
+
+If the L1D flush module parameter is set to 'always' and the IA32_FLUSH_CMD
+MSR is available, optimize the VMENTER code with the MSR save list.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 42 +++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 37 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -4898,6 +4898,16 @@ static void ept_set_mmio_spte_mask(void)
+ kvm_mmu_set_mmio_spte_mask((0x3ull << 62) | 0x6ull);
+ }
+
++static bool vmx_l1d_use_msr_save_list(void)
++{
++ if (!enable_ept || !boot_cpu_has_bug(X86_BUG_L1TF) ||
++ static_cpu_has(X86_FEATURE_HYPERVISOR) ||
++ !static_cpu_has(X86_FEATURE_FLUSH_L1D))
++ return false;
++
++ return vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
++}
++
+ #define VMX_XSS_EXIT_BITMAP 0
+ /*
+ * Sets up the vmcs for emulated real mode.
+@@ -5243,6 +5253,12 @@ static void vmx_set_nmi_mask(struct kvm_
+ vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
+ }
++ /*
++ * If flushing the L1D cache on every VMENTER is enforced and the
++ * MSR is available, use the MSR save list.
++ */
++ if (vmx_l1d_use_msr_save_list())
++ add_atomic_switch_msr(vmx, MSR_IA32_FLUSH_CMD, L1D_FLUSH, 0, true);
+ }
+
+ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
+@@ -8304,11 +8320,26 @@ static void vmx_l1d_flush(struct kvm_vcp
+ bool always;
+
+ /*
+- * If the mitigation mode is 'flush always', keep the flush bit
+- * set, otherwise clear it. It gets set again either from
+- * vcpu_run() or from one of the unsafe VMEXIT handlers.
++ * This code is only executed when:
++ * - the flush mode is 'cond'
++ * - the flush mode is 'always' and the flush MSR is not
++ * available
++ *
++ * If the CPU has the flush MSR then clear the flush bit because
++ * 'always' mode is handled via the MSR save list.
++ *
++ * If the MSR is not avaibable then act depending on the mitigation
++ * mode: If 'flush always', keep the flush bit set, otherwise clear
++ * it.
++ *
++ * The flush bit gets set again either from vcpu_run() or from one
++ * of the unsafe VMEXIT handlers.
+ */
+- always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
++ if (static_cpu_has(X86_FEATURE_FLUSH_L1D))
++ always = false;
++ else
++ always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
++
+ vcpu->arch.l1tf_flush_l1d = always;
+
+ vcpu->stat.l1d_flush++;
+@@ -11185,7 +11216,8 @@ static int __init vmx_setup_l1d_flush(vo
+ struct page *page;
+
+ if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
+- !boot_cpu_has_bug(X86_BUG_L1TF))
++ !boot_cpu_has_bug(X86_BUG_L1TF) ||
++ vmx_l1d_use_msr_save_list())
+ return 0;
+
+ if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
diff --git a/patches.arch/10-x86-cpu-intel-evaluate-smp_num_siblings-early.patch b/patches.arch/10-x86-cpu-intel-evaluate-smp_num_siblings-early.patch
new file mode 100644
index 0000000000..56b1cdbdaa
--- /dev/null
+++ b/patches.arch/10-x86-cpu-intel-evaluate-smp_num_siblings-early.patch
@@ -0,0 +1,46 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 15 Jun 2018 18:28:11 +0200
+Subject: x86/cpu/intel: Evaluate smp_num_siblings early
+Git-commit: f7a7a3b5889c8ea869437c7f24681ca8e70b1b65
+Patch-mainline: v4.18 or v4.18-rc2 (next release)
+References: bsc#1089343
+
+Make use of the new early detection function to initialize smp_num_siblings
+on the boot cpu before the MP-Table or ACPI/MADT scan happens. That's
+required for force disabling SMT.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: "Levin, Alexander (Sasha Levin)" <alexander.levin@verizon.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: David Wang <davidwang@zhaoxin.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jia Zhang <qianyue.zj@alibaba-inc.com>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: lkml <linux-kernel@vger.kernel.org>
+Cc: x86-ml <x86@kernel.org>
+Link: http://lkml.kernel.org/r/20180615162948.279100396@linutronix.de
+Signed-off-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/intel.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -308,6 +308,13 @@ static void early_init_intel(struct cpui
+ }
+
+ check_mpx_erratum(c);
++
++ /*
++ * Get the number of SMT siblings early from the extended topology
++ * leaf, if available. Otherwise try the legacy SMT detection.
++ */
++ if (detect_extended_topology_early(c) < 0)
++ detect_ht_early(c);
+ }
+
+ #ifdef CONFIG_X86_32
diff --git a/patches.arch/11-x86-cpu-amd-do-not-check-cpuid-max-ext-level-before-parsing-smp-info.patch b/patches.arch/11-x86-cpu-amd-do-not-check-cpuid-max-ext-level-before-parsing-smp-info.patch
new file mode 100644
index 0000000000..61a1b7f9dc
--- /dev/null
+++ b/patches.arch/11-x86-cpu-amd-do-not-check-cpuid-max-ext-level-before-parsing-smp-info.patch
@@ -0,0 +1,35 @@
+From: Borislav Petkov <bp@suse.de>
+Date: Fri, 15 Jun 2018 20:48:39 +0200
+Subject: x86/CPU/AMD: Do not check CPUID max ext level before parsing SMP info
+Git-commit: 119bff8a9c9bb00116a844ec68be7bc4b1c768f5
+Patch-mainline: v4.18 or v4.18-rc2 (next release)
+References: bsc#1089343
+
+Old code used to check whether CPUID ext max level is >= 0x80000008 because
+that last leaf contains the number of cores of the physical CPU. The three
+functions called there now do not depend on that leaf anymore so the check
+can go.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+---
+ arch/x86/kernel/cpu/amd.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -778,11 +778,8 @@ static void init_amd(struct cpuinfo_x86
+
+ cpu_detect_cache_sizes(c);
+
+- /* Multi core CPU? */
+- if (c->extended_cpuid_level >= 0x80000008) {
+- amd_detect_cmp(c);
+- srat_detect_node(c);
+- }
++ amd_detect_cmp(c);
++ srat_detect_node(c);
+
+ init_amd_cacheinfo(c);
+
diff --git a/patches.arch/12-x86-cpu-amd-evaluate-smp_num_siblings-early.patch b/patches.arch/12-x86-cpu-amd-evaluate-smp_num_siblings-early.patch
new file mode 100644
index 0000000000..930c725c18
--- /dev/null
+++ b/patches.arch/12-x86-cpu-amd-evaluate-smp_num_siblings-early.patch
@@ -0,0 +1,47 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Jun 2018 00:57:38 +0200
+Subject: x86/cpu/AMD: Evaluate smp_num_siblings early
+Git-commit: 1e1d7e25fd759eddf96d8ab39d0a90a1979b2d8c
+Patch-mainline: v4.18 or v4.18-rc2 (next release)
+References: bsc#1089343
+
+To support force disabling of SMT it's required to know the number of
+thread siblings early. amd_get_topology() cannot be called before the APIC
+driver is selected, so split out the part which initializes
+smp_num_siblings and invoke it from amd_early_init().
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/amd.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -317,6 +317,16 @@ static void legacy_fixup_core_id(struct
+ c->compute_unit_id %= cus_per_node;
+ }
+
++static void amd_get_topology_early(struct cpuinfo_x86 *c)
++{
++ if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
++ u32 eax, ebx, ecx, edx;
++
++ cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
++ smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
++ }
++}
++
+ /*
+ * Fixup core topology information for
+ * (1) AMD multi-node processors
+@@ -615,6 +625,8 @@ static void early_init_amd(struct cpuinf
+ /* F16h erratum 793, CVE-2013-6885 */
+ if (c->x86 == 0x16 && c->x86_model <= 0xf)
+ msr_set_bit(MSR_AMD64_LS_CFG, 15);
++
++ amd_get_topology_early(c);
+ }
+
+ static const int amd_erratum_383[];
diff --git a/patches.arch/14-x86-cpu-amd-move-topoext-reenablement-before-reading-smp_num_siblings.patch b/patches.arch/14-x86-cpu-amd-move-topoext-reenablement-before-reading-smp_num_siblings.patch
new file mode 100644
index 0000000000..6a00132e4c
--- /dev/null
+++ b/patches.arch/14-x86-cpu-amd-move-topoext-reenablement-before-reading-smp_num_siblings.patch
@@ -0,0 +1,94 @@
+From: Borislav Petkov <bp@suse.de>
+Date: Fri, 22 Jun 2018 11:34:11 +0200
+Subject: x86/CPU/AMD: Move TOPOEXT reenablement before reading smp_num_siblings
+Git-commit: 64cb188bb68bce71d17be37026d04466519357b0
+Patch-mainline: v4.18 or v4.18-rc2 (next release)
+References: bsc#1089343
+
+The TOPOEXT reenablement is a workaround for broken BIOSen which didn't
+enable the CPUID bit. amd_get_topology_early(), however, relies on
+that bit being set so that it can read out the CPUID leaf and set
+smp_num_siblings properly.
+
+Move the reenablement up to early_init_amd(). While at it, simplify
+amd_get_topology_early().
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/amd.c | 37 +++++++++++++++++--------------------
+ 1 file changed, 17 insertions(+), 20 deletions(-)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -319,12 +319,8 @@ static void legacy_fixup_core_id(struct
+
+ static void amd_get_topology_early(struct cpuinfo_x86 *c)
+ {
+- if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+- u32 eax, ebx, ecx, edx;
+-
+- cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
+- smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
+- }
++ if (cpu_has(c, X86_FEATURE_TOPOEXT))
++ smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
+ }
+
+ /*
+@@ -348,7 +344,6 @@ static void amd_get_topology(struct cpui
+ node_id = ecx & 7;
+
+ /* get compute unit information */
+- smp_num_siblings = ((ebx >> 8) & 3) + 1;
+ c->compute_unit_id = ebx & 0xff;
+ cores_per_cu += ((ebx >> 8) & 3);
+
+@@ -575,6 +570,7 @@ static void bsp_init_amd(struct cpuinfo_
+
+ static void early_init_amd(struct cpuinfo_x86 *c)
+ {
++ u64 value;
+ early_init_amd_mc(c);
+
+ /*
+@@ -626,6 +622,20 @@ static void early_init_amd(struct cpuinf
+ if (c->x86 == 0x16 && c->x86_model <= 0xf)
+ msr_set_bit(MSR_AMD64_LS_CFG, 15);
+
++ /* Re-enable TopologyExtensions if switched off by BIOS */
++ if (c->x86 == 0x15 &&
++ (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
++ !cpu_has(c, X86_FEATURE_TOPOEXT)) {
++
++ if (msr_set_bit(0xc0011005, 54) > 0) {
++ rdmsrl(0xc0011005, value);
++ if (value & BIT_64(54)) {
++ set_cpu_cap(c, X86_FEATURE_TOPOEXT);
++ pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
++ }
++ }
++ }
++
+ amd_get_topology_early(c);
+ }
+
+@@ -721,19 +731,6 @@ static void init_amd_bd(struct cpuinfo_x
+ {
+ u64 value;
+
+- /* re-enable TopologyExtensions if switched off by BIOS */
+- if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) &&
+- !cpu_has(c, X86_FEATURE_TOPOEXT)) {
+-
+- if (msr_set_bit(0xc0011005, 54) > 0) {
+- rdmsrl(0xc0011005, value);
+- if (value & BIT_64(54)) {
+- set_cpu_cap(c, X86_FEATURE_TOPOEXT);
+- pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
+- }
+- }
+- }
+-
+ /*
+ * The way access filter has a performance penalty on some workloads.
+ * Disable it on the affected CPUs.
diff --git a/patches.arch/15-cpu-hotplug-boot-HT-siblings-at-least-once.patch b/patches.arch/15-cpu-hotplug-boot-HT-siblings-at-least-once.patch
new file mode 100644
index 0000000000..fe95783cf1
--- /dev/null
+++ b/patches.arch/15-cpu-hotplug-boot-HT-siblings-at-least-once.patch
@@ -0,0 +1,204 @@
+From: Thomas Gleixner <tglx@linuxtronix.de>
+Subject: [PATCH] cpu/hotplug: Boot HT siblings at least once
+Patch-mainline: not yet, under development
+References: bsc#1089343 CVE-2018-3646
+
+Due to the way Machine Check Exceptions work on X86 hyperthreads it's
+required to boot up _all_ logical cores at least once in order to set the
+CR4.MCE bit.
+
+So instead of ignoring the sibling threads right away, let them boot up
+once so they can configure themself. After they came out of the initial
+boot stage check whether its a "secondary" sibling and cancel the operation
+which puts the CPU back into offline state.
+
+[jkosina@suse.cz: 4.4 port; we have to bring hyperthreaded siblings down
+ much later, as the hotplug code in 4.4 has not been fully reworked yet,
+ and therefore it's safe to bring the CPUs down only after the hotplug_threads
+ have started running, otherwise we'll be waiting indefinitely for them to
+ complete while attempting to park them]
+
+Reported-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: lkml <linux-kernel@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20180629140611.033996006@linutronix.de
+Signed-off-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/smpboot.c | 2 ++
+ include/linux/cpu.h | 1 +
+ include/linux/smp.h | 2 ++
+ init/main.c | 2 ++
+ kernel/cpu.c | 36 ++++++++++++++++++++++++++++++------
+ kernel/smp.c | 23 +++++++++++++++++++++++
+ 6 files changed, 60 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -1023,6 +1023,8 @@ int native_cpu_up(unsigned int cpu, stru
+
+ irq_unlock_sparse();
+
++ cpu_set_booted(cpu);
++
+ return 0;
+ }
+
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -176,6 +176,7 @@ int cpu_up(unsigned int cpu);
+ void notify_cpu_starting(unsigned int cpu);
+ extern void cpu_maps_update_begin(void);
+ extern void cpu_maps_update_done(void);
++extern void cpu_set_booted(unsigned int cpu);
+
+ #define cpu_notifier_register_begin cpu_maps_update_begin
+ #define cpu_notifier_register_done cpu_maps_update_done
+--- a/include/linux/smp.h
++++ b/include/linux/smp.h
+@@ -119,6 +119,8 @@ void smp_prepare_boot_cpu(void);
+ extern unsigned int setup_max_cpus;
+ extern void __init setup_nr_cpu_ids(void);
+ extern void __init smp_init(void);
++extern void __init smp_smt_post_init(void);
++extern bool cpu_smt_allowed(unsigned int cpu);
+
+ #else /* !SMP */
+
+--- a/init/main.c
++++ b/init/main.c
+@@ -1039,6 +1039,8 @@ static noinline void __init kernel_init_
+ prepare_namespace();
+ }
+
++ smp_smt_post_init();
++
+ /*
+ * Ok, we have completed the initial bootup, and
+ * we're essentially up and running. Get rid of the
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -26,6 +26,18 @@
+
+ #include "smpboot.h"
+
++/* hotplug state from future kernels */
++struct cpuhp_cpu_state {
++ bool booted_once;
++};
++
++static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = { 0 };
++
++void cpu_set_booted(unsigned int cpu)
++{
++ per_cpu(cpuhp_state, cpu).booted_once = true;
++}
++
+ #ifdef CONFIG_SMP
+ /* Serializes the updates to cpu_online_mask, cpu_present_mask */
+ static DEFINE_MUTEX(cpu_add_remove_lock);
+@@ -434,7 +446,7 @@ out_release:
+ /*
+ * @target unused.
+ */
+-static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
++int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
+ {
+ if (cpu_hotplug_disabled)
+ return -EBUSY;
+@@ -469,13 +481,24 @@ static int __init smt_cmdline_disable(ch
+ }
+ early_param("nosmt", smt_cmdline_disable);
+
+-static inline bool cpu_smt_allowed(unsigned int cpu)
++bool cpu_smt_allowed(unsigned int cpu)
+ {
+- return cpu_smt_control == CPU_SMT_ENABLED ||
+- topology_is_primary_thread(cpu);
++ if (cpu_smt_control == CPU_SMT_ENABLED)
++ return true;
++
++ if (topology_is_primary_thread(cpu))
++ return true;
++
++ /*
++ * X86 requires that the sibling threads are at least booted up
++ * once to set the CR4.MCE bit so Machine Check Exceptions can be
++ * handled and do not end up raising the CPU Internal Error line.
++ */
++ return !per_cpu(cpuhp_state, cpu).booted_once;
+ }
++
+ #else
+-static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
++bool cpu_smt_allowed(unsigned int cpu) { return true; }
+ #endif
+
+ /*
+@@ -548,6 +571,7 @@ static int _cpu_up(unsigned int cpu, int
+
+ if (ret != 0)
+ goto out_notify;
++
+ BUG_ON(!cpu_online(cpu));
+
+ /* Now call notifier in preparation. */
+@@ -585,13 +609,13 @@ int cpu_up(unsigned int cpu)
+ err = -EBUSY;
+ goto out;
+ }
++
+ if (!cpu_smt_allowed(cpu)) {
+ err = -EPERM;
+ goto out;
+ }
+
+ err = _cpu_up(cpu, 0);
+-
+ out:
+ cpu_maps_update_done();
+ return err;
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -562,6 +562,7 @@ void __weak smp_announce(void)
+ printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
+ }
+
++extern int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target);
+ /* Called by boot processor to activate the rest. */
+ void __init smp_init(void)
+ {
+@@ -582,6 +583,28 @@ void __init smp_init(void)
+ smp_cpus_done(setup_max_cpus);
+ }
+
++void __init smp_smt_post_init(void)
++{
++ int cpu, disabled = 0;
++ /*
++ * SMT soft disabling on x86 requires to bring the CPU out of the
++ * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
++ * CPU marked itself as booted_once in native_cpu_up() so the
++ * cpu_smt_allowed() check will now return false if this is not the
++ * primary sibling.
++ */
++ cpu_maps_update_begin();
++ for_each_online_cpu(cpu) {
++ if (!cpu_smt_allowed(cpu)) {
++ disabled++;
++ cpu_down_maps_locked(cpu, 0);
++ }
++ }
++ if (disabled)
++ pr_info("SMT: disabling %d threads\n", disabled);
++ cpu_maps_update_done();
++}
++
+ /*
+ * Call a function on all processors. May be used during early boot while
+ * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
diff --git a/patches.arch/16-cpu-hotplug-Online-siblings-when-SMT-control-is-turn.patch b/patches.arch/16-cpu-hotplug-Online-siblings-when-SMT-control-is-turn.patch
new file mode 100644
index 0000000000..8cc9197ca9
--- /dev/null
+++ b/patches.arch/16-cpu-hotplug-Online-siblings-when-SMT-control-is-turn.patch
@@ -0,0 +1,70 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Subject: [PATCH] cpu/hotplug: Online siblings when SMT control is turned on
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit 215af5499d9e2b55f111d2431ea20218115f29b3 upstream
+
+Writing 'off' to /sys/devices/system/cpu/smt/control offlines all SMT
+siblings. Writing 'on' merily enables the abilify to online them, but does
+not online them automatically.
+
+Make 'on' more useful by onlining all offline siblings.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ kernel/cpu.c | 25 +++++++++++++++++++++++--
+ 1 file changed, 23 insertions(+), 2 deletions(-)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -907,6 +907,15 @@ static void cpuhp_offline_cpu_device(uns
+ kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
+ }
+
++static void cpuhp_online_cpu_device(unsigned int cpu)
++{
++ struct device *dev = get_cpu_device(cpu);
++
++ dev->offline = false;
++ /* Tell user space about the state change */
++ kobject_uevent(&dev->kobj, KOBJ_ONLINE);
++}
++
+ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+ {
+ int cpu, ret = 0;
+@@ -941,10 +950,22 @@ static int cpuhp_smt_disable(enum cpuhp_
+
+ static int cpuhp_smt_enable(void)
+ {
++ int cpu, ret = 0;
++
+ cpu_maps_update_begin();
+ cpu_smt_control = CPU_SMT_ENABLED;
++ for_each_present_cpu(cpu) {
++ /* Skip online CPUs and CPUs on offline nodes */
++ if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
++ continue;
++ ret = _cpu_up(cpu, 0);
++ if (ret)
++ break;
++ /* See comment in cpuhp_smt_disable() */
++ cpuhp_online_cpu_device(cpu);
++ }
+ cpu_maps_update_done();
+- return 0;
++ return ret;
+ }
+
+ static ssize_t
+@@ -975,7 +996,7 @@ store_smt_control(struct device *dev, st
+ if (ctrlval != cpu_smt_control) {
+ switch (ctrlval) {
+ case CPU_SMT_ENABLED:
+- cpuhp_smt_enable();
++ ret = cpuhp_smt_enable();
+ break;
+ case CPU_SMT_DISABLED:
+ case CPU_SMT_FORCE_DISABLED:
diff --git a/patches.arch/x86-l1tf-01-increase-32bitPAE-__PHYSICAL_PAGE_MASK.patch b/patches.arch/x86-l1tf-01-increase-32bitPAE-__PHYSICAL_PAGE_MASK.patch
new file mode 100644
index 0000000000..71a53daec6
--- /dev/null
+++ b/patches.arch/x86-l1tf-01-increase-32bitPAE-__PHYSICAL_PAGE_MASK.patch
@@ -0,0 +1,72 @@
+From: Andi Kleen <ak@linux.intel.com>
+Date: Thu, 3 May 2018 08:35:42 -0700
+Subject: [PATCH 1/8] x86, l1tf: Increase 32bit PAE __PHYSICAL_PAGE_MASK
+Patch-mainline: not yet (under discussion)
+References: bnc#1087081, CVE-2018-3620
+
+We need to protect memory inside the guest's memory against L1TF
+by inverting the right bits to point to non existing memory.
+
+The hypervisor should already protect itself against the guest by flushing
+the caches as needed, but pages inside the guest are not protected against
+attacks from other processes in that guest.
+
+Our inverted PTE mask has to match the host to provide the full
+protection for all pages the host could possibly map into our guest.
+The host is likely 64bit and may use more than 43 bits of
+memory. We want to set all possible bits to be safe here.
+
+On 32bit PAE the max PTE mask is currently set to 44 bit because that is
+the limit imposed by 32bit unsigned long PFNs in the VMs. This limits
+the mask to be below what the host could possible use for physical
+pages.
+
+The L1TF PROT_NONE protection code uses the PTE masks to determine
+what bits to invert to make sure the higher bits are set for unmapped
+entries to prevent L1TF speculation attacks against EPT inside guests.
+
+We want to invert all bits that could be used by the host.
+
+So increase the mask on 32bit PAE to 52 to match 64bit.
+
+The real limit for a 32bit OS is still 44 bits.
+
+All Linux PTEs are created from unsigned long PFNs, so cannot be
+higher than 44 bits on a 32bit kernel. So these extra PFN
+bits should be never set. The only users of this macro are using
+it to look at PTEs, so it's safe.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-By: Dave Hansen <dave.hansen@intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+
+---
+
+v2: Improve commit message.
+---
+ arch/x86/include/asm/page_32_types.h | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
+index aa30c3241ea7..0d5c739eebd7 100644
+--- a/arch/x86/include/asm/page_32_types.h
++++ b/arch/x86/include/asm/page_32_types.h
+@@ -29,8 +29,13 @@
+ #define N_EXCEPTION_STACKS 1
+
+ #ifdef CONFIG_X86_PAE
+-/* 44=32+12, the limit we can fit into an unsigned long pfn */
+-#define __PHYSICAL_MASK_SHIFT 44
++/*
++ * This is beyond the 44 bit limit imposed by the 32bit long pfns,
++ * but we need the full mask to make sure inverted PROT_NONE
++ * entries have all the host bits set in a guest.
++ * The real limit is still 44 bits.
++ */
++#define __PHYSICAL_MASK_SHIFT 52
+ #define __VIRTUAL_MASK_SHIFT 32
+
+ #else /* !CONFIG_X86_PAE */
+--
+2.14.4
diff --git a/patches.arch/x86-l1tf-02-change-order-of-offset-type.patch b/patches.arch/x86-l1tf-02-change-order-of-offset-type.patch
new file mode 100644
index 0000000000..1d5fe1d2ca
--- /dev/null
+++ b/patches.arch/x86-l1tf-02-change-order-of-offset-type.patch
@@ -0,0 +1,89 @@
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Fri, 27 Apr 2018 09:06:34 -0700
+Subject: [PATCH 2/8] x86/speculation/l1tf: Change order of offset/type in swap
+ entry
+Patch-mainline: not yet (under discussion)
+References: bnc#1087081, CVE-2018-3620
+
+Here's a patch that switches the order of "type" and
+"offset" in the x86-64 encoding in preparation of the next
+patch which inverts the swap entry to protect against L1TF.
+
+That means that now the offset is bits 9-58 in the page table, and that
+the type is in the bits that hardware generally doesn't care about.
+
+That, in turn, means that if you have a desktop chip with only 40 bits of
+physical addressing, now that the offset starts at bit 9, you still have
+to have 30 bits of offset actually *in use* until bit 39 ends up being
+clear.
+
+So that's 4 terabyte of swap space (because the offset is counted in
+pages, so 30 bits of offset is 42 bits of actual coverage). With bigger
+physical addressing, that obviously grows further, until you hit the limit
+of the offset (at 50 bits of offset - 62 bits of actual swap file
+coverage).
+
+[updated description and minor tweaks by AK]
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Tested-by: Andi Kleen <ak@linux.intel.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Acked-by: Dave Hansen <dave.hansen@intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+
+---
+ arch/x86/include/asm/pgtable_64.h | 31 ++++++++++++++++++++-----------
+ 1 file changed, 20 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -274,7 +274,7 @@ static inline int pgd_large(pgd_t pgd) {
+ *
+ * | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0| <- bit number
+ * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names
+- * | OFFSET (14->63) | TYPE (9-13) |0|X|X|X| X| X|X|X|0| <- swp entry
++ * | TYPE (59-63) | OFFSET (9-58) |0|X|X|X| X| X|X|SD|0| <- swp entry
+ *
+ * G (8) is aliased and used as a PROT_NONE indicator for
+ * !present ptes. We need to start storing swap entries above
+@@ -282,19 +282,28 @@ static inline int pgd_large(pgd_t pgd) {
+ * erratum where they can be incorrectly set by hardware on
+ * non-present PTEs.
+ */
+-#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
+-#define SWP_TYPE_BITS 5
+-/* Place the offset above the type: */
+-#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS)
++#define SWP_TYPE_BITS 5
++
++#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
++
++/* We always extract/encode the offset by shifting it all the way up, and then down again */
++#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
+
+ #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
+
+-#define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \
+- & ((1U << SWP_TYPE_BITS) - 1))
+-#define __swp_offset(x) ((x).val >> SWP_OFFSET_FIRST_BIT)
+-#define __swp_entry(type, offset) ((swp_entry_t) { \
+- ((type) << (SWP_TYPE_FIRST_BIT)) \
+- | ((offset) << SWP_OFFSET_FIRST_BIT) })
++/* Extract the high bits for type */
++#define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
++
++/* Shift up (to get rid of type), then down to get value */
++#define __swp_offset(x) ((x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
++
++/*
++ * Shift the offset up "too far" by TYPE bits, then down again
++ */
++#define __swp_entry(type, offset) ((swp_entry_t) { \
++ ((unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
++ | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
++
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
+ #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
+
diff --git a/patches.arch/x86-l1tf-03-protect-swap-entries.patch b/patches.arch/x86-l1tf-03-protect-swap-entries.patch
new file mode 100644
index 0000000000..3ae6bcd01b
--- /dev/null
+++ b/patches.arch/x86-l1tf-03-protect-swap-entries.patch
@@ -0,0 +1,78 @@
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Fri, 27 Apr 2018 09:06:34 -0700
+Subject: [PATCH 2/8] x86, l1tf: Protect swap entries against L1TF
+Patch-mainline: not yet (under discussion)
+References: bnc#1087081, CVE-2018-3620
+
+With L1 terminal fault the CPU speculates into unmapped PTEs, and
+resulting side effects allow to read the memory the PTE is pointing
+too, if its values are still in the L1 cache.
+
+For swapped out pages Linux uses unmapped PTEs and stores a swap entry
+into them.
+
+We need to make sure the swap entry is not pointing to valid memory,
+which requires setting higher bits (between bit 36 and bit 45) that
+are inside the CPUs physical address space, but outside any real
+memory.
+
+To do this we invert the offset to make sure the higher bits are always
+set, as long as the swap file is not too big.
+
+Note there is no workaround for 32bit !PAE, or on systems which
+have more than MAX_PA/2 worth of memory. The later case is very unlikely
+to happen on real systems.
+
+[updated description and minor tweaks by AK]
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Tested-by: Andi Kleen <ak@linux.intel.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Acked-By: Dave Hansen <dave.hansen@intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+---
+v2: Split out patch that swaps fields.
+---
+ arch/x86/include/asm/pgtable_64.h | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -274,13 +274,16 @@ static inline int pgd_large(pgd_t pgd) {
+ *
+ * | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0| <- bit number
+ * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names
+- * | TYPE (59-63) | OFFSET (9-58) |0|X|X|X| X| X|X|SD|0| <- swp entry
++ * | TYPE (59-63) | ~OFFSET (9-58) |0|X|X|X| X| X|X|SD|0| <- swp entry
+ *
+ * G (8) is aliased and used as a PROT_NONE indicator for
+ * !present ptes. We need to start storing swap entries above
+ * there. We also need to avoid using A and D because of an
+ * erratum where they can be incorrectly set by hardware on
+ * non-present PTEs.
++ *
++ * The offset is inverted by a binary not operation to make the high
++ * physical bits set.
+ */
+ #define SWP_TYPE_BITS 5
+
+@@ -295,13 +298,15 @@ static inline int pgd_large(pgd_t pgd) {
+ #define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
+
+ /* Shift up (to get rid of type), then down to get value */
+-#define __swp_offset(x) ((x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
++#define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
+
+ /*
+ * Shift the offset up "too far" by TYPE bits, then down again
++ * The offset is inverted by a binary not operation to make the high
++ * physical bits set.
+ */
+ #define __swp_entry(type, offset) ((swp_entry_t) { \
+- ((unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
++ (~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
+ | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
+
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
diff --git a/patches.arch/x86-l1tf-04-protect-PROT_NONE-ptes.patch b/patches.arch/x86-l1tf-04-protect-PROT_NONE-ptes.patch
new file mode 100644
index 0000000000..4a28b26c8c
--- /dev/null
+++ b/patches.arch/x86-l1tf-04-protect-PROT_NONE-ptes.patch
@@ -0,0 +1,244 @@
+From 73a8594bdc5d88bdb125e458a4147669b8ff1cd1 Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Fri, 27 Apr 2018 09:47:37 -0700
+Subject: [PATCH 3/8] x86, l1tf: Protect PROT_NONE PTEs against speculation
+Patch-mainline: not yet (under discussion)
+References: bnc#1087081, CVE-2018-3620
+
+We also need to protect PTEs that are set to PROT_NONE against
+L1TF speculation attacks.
+
+This is important inside guests, because L1TF speculation
+bypasses physical page remapping. While the VM has its own
+migitations preventing leaking data from other VMs into
+the guest, this would still risk leaking the wrong page
+inside the current guest.
+
+This uses the same technique as Linus' swap entry patch:
+while an entry is is in PROTNONE state we invert the
+complete PFN part part of it. This ensures that the
+the highest bit will point to non existing memory.
+
+The invert is done by pte/pmd_modify and pfn/pmd/pud_pte for
+PROTNONE and pte/pmd/pud_pfn undo it.
+
+We assume that noone tries to touch the PFN part of
+a PTE without using these primitives.
+
+This doesn't handle the case that MMIO is on the top
+of the CPU physical memory. If such an MMIO region
+was exposed by an unpriviledged driver for mmap
+it would be possible to attack some real memory.
+However this situation is all rather unlikely.
+
+For 32bit non PAE we don't try inversion because
+there are really not enough bits to protect anything.
+
+Q: Why does the guest need to be protected when the
+HyperVisor already has L1TF mitigations?
+A: Here's an example:
+You have physical pages 1 2. They get mapped into a guest as
+GPA 1 -> PA 2
+GPA 2 -> PA 1
+through EPT.
+
+The L1TF speculation ignores the EPT remapping.
+
+Now the guest kernel maps GPA 1 to process A and GPA 2 to process B,
+and they belong to different users and should be isolated.
+
+A sets the GPA 1 PA 2 PTE to PROT_NONE to bypass the EPT remapping
+and gets read access to the underlying physical page. Which
+in this case points to PA 2, so it can read process B's data,
+if it happened to be in L1.
+
+So we broke isolation inside the guest.
+
+There's nothing the hypervisor can do about this. This
+mitigation has to be done in the guest.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Acked-By: Dave Hansen <dave.hansen@intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+
+---
+v2: Use new helper to generate XOR mask to invert (Linus)
+v3: Use inline helper for protnone mask checking
+v4: Use inline helpers to check for PROT_NONE changes
+---
+ arch/x86/include/asm/pgtable-2level.h | 17 +++++++++++++++
+ arch/x86/include/asm/pgtable-3level.h | 2 +
+ arch/x86/include/asm/pgtable-invert.h | 32 ++++++++++++++++++++++++++++
+ arch/x86/include/asm/pgtable.h | 38 ++++++++++++++++++++++++----------
+ arch/x86/include/asm/pgtable_64.h | 2 +
+ 5 files changed, 80 insertions(+), 11 deletions(-)
+ create mode 100644 arch/x86/include/asm/pgtable-invert.h
+
+--- a/arch/x86/include/asm/pgtable-2level.h
++++ b/arch/x86/include/asm/pgtable-2level.h
+@@ -77,4 +77,21 @@ static inline unsigned long pte_bitop(un
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
+ #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
+
++/* No inverted PFNs on 2 level page tables */
++
++static inline u64 protnone_mask(u64 val)
++{
++ return 0;
++}
++
++static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
++{
++ return val;
++}
++
++static inline bool __pte_needs_invert(u64 val)
++{
++ return false;
++}
++
+ #endif /* _ASM_X86_PGTABLE_2LEVEL_H */
+--- a/arch/x86/include/asm/pgtable-3level.h
++++ b/arch/x86/include/asm/pgtable-3level.h
+@@ -184,4 +184,6 @@ static inline pmd_t native_pmdp_get_and_
+ #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
+ #define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
+
++#include <asm/pgtable-invert.h>
++
+ #endif /* _ASM_X86_PGTABLE_3LEVEL_H */
+--- /dev/null
++++ b/arch/x86/include/asm/pgtable-invert.h
+@@ -0,0 +1,32 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_PGTABLE_INVERT_H
++#define _ASM_PGTABLE_INVERT_H 1
++
++#ifndef __ASSEMBLY__
++
++static inline bool __pte_needs_invert(u64 val)
++{
++ return (val & (_PAGE_PRESENT|_PAGE_PROTNONE)) == _PAGE_PROTNONE;
++}
++
++/* Get a mask to xor with the page table entry to get the correct pfn. */
++static inline u64 protnone_mask(u64 val)
++{
++ return __pte_needs_invert(val) ? ~0ull : 0;
++}
++
++static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
++{
++ /*
++ * When a PTE transitions from NONE to !NONE or vice-versa
++ * invert the PFN part to stop speculation.
++ * pte_pfn undoes this when needed.
++ */
++ if (__pte_needs_invert(oldval) != __pte_needs_invert(val))
++ val = (val & ~mask) | (~val & mask);
++ return val;
++}
++
++#endif /* __ASSEMBLY__ */
++
++#endif
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -162,19 +162,29 @@ static inline int pte_special(pte_t pte)
+ return pte_flags(pte) & _PAGE_SPECIAL;
+ }
+
++/* Entries that were set to PROT_NONE are inverted */
++
++static inline u64 protnone_mask(u64 val);
++
+ static inline unsigned long pte_pfn(pte_t pte)
+ {
+- return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
++ unsigned long pfn = pte_val(pte);
++ pfn ^= protnone_mask(pfn);
++ return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
+ }
+
+ static inline unsigned long pmd_pfn(pmd_t pmd)
+ {
+- return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
++ unsigned long pfn = pmd_val(pmd);
++ pfn ^= protnone_mask(pfn);
++ return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
+ }
+
+ static inline unsigned long pud_pfn(pud_t pud)
+ {
+- return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
++ unsigned long pfn = pud_val(pud);
++ pfn ^= protnone_mask(pfn);
++ return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
+ }
+
+ #define pte_page(pte) pfn_to_page(pte_pfn(pte))
+@@ -390,19 +400,25 @@ static inline pgprotval_t massage_pgprot
+
+ static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
+ {
+- return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
+- massage_pgprot(pgprot));
++ phys_addr_t pfn = page_nr << PAGE_SHIFT;
++ pfn ^= protnone_mask(pgprot_val(pgprot));
++ pfn &= PTE_PFN_MASK;
++ return __pte(pfn | massage_pgprot(pgprot));
+ }
+
+ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
+ {
+- return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
+- massage_pgprot(pgprot));
++ phys_addr_t pfn = page_nr << PAGE_SHIFT;
++ pfn ^= protnone_mask(pgprot_val(pgprot));
++ pfn &= PHYSICAL_PMD_PAGE_MASK;
++ return __pmd(pfn | massage_pgprot(pgprot));
+ }
+
++static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
++
+ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ {
+- pteval_t val = pte_val(pte);
++ pteval_t val = pte_val(pte), oldval = val;
+
+ /*
+ * Chop off the NX bit (if present), and add the NX portion of
+@@ -410,17 +426,17 @@ static inline pte_t pte_modify(pte_t pte
+ */
+ val &= _PAGE_CHG_MASK;
+ val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
+-
++ val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
+ return __pte(val);
+ }
+
+ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+ {
+- pmdval_t val = pmd_val(pmd);
++ pmdval_t val = pmd_val(pmd), oldval = val;
+
+ val &= _HPAGE_CHG_MASK;
+ val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
+-
++ val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
+ return __pmd(val);
+ }
+
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -229,6 +229,8 @@ extern void cleanup_highmap(void);
+ extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
+ extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
+
++#include <asm/pgtable-invert.h>
++
+ #endif /* !__ASSEMBLY__ */
+
+ #endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/patches.arch/x86-l1tf-05-make-sure-first-pages-is-reserved.patch b/patches.arch/x86-l1tf-05-make-sure-first-pages-is-reserved.patch
new file mode 100644
index 0000000000..507e3fc447
--- /dev/null
+++ b/patches.arch/x86-l1tf-05-make-sure-first-pages-is-reserved.patch
@@ -0,0 +1,46 @@
+From 17df1843b8d59783742f2c0becad3eb9f275b76a Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Mon, 23 Apr 2018 15:57:54 -0700
+Subject: [PATCH 4/8] x86, l1tf: Make sure the first page is always reserved
+Patch-mainline: not yet (under discussion)
+References: bnc#1087081, CVE-2018-3620
+
+The L1TF workaround doesn't make any attempt to mitigate speculate
+accesses to the first physical page for zeroed PTEs. Normally
+it only contains some data from the early real mode BIOS.
+
+I couldn't convince myself we always reserve the first page in
+all configurations, so add an extra reservation call to
+make sure it is really reserved. In most configurations (e.g.
+with the standard reservations) it's likely a nop.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Acked-By: Dave Hansen <dave.hansen@intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+
+---
+v2: improve comment
+---
+ arch/x86/kernel/setup.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 5c623dfe39d1..89fd35349412 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -823,6 +823,12 @@ void __init setup_arch(char **cmdline_p)
+ memblock_reserve(__pa_symbol(_text),
+ (unsigned long)__bss_stop - (unsigned long)_text);
+
++ /*
++ * Make sure page 0 is always reserved because on systems with
++ * L1TF its contents can be leaked to user processes.
++ */
++ memblock_reserve(0, PAGE_SIZE);
++
+ early_reserve_initrd();
+
+ /*
+--
+2.14.4
diff --git a/patches.arch/x86-l1tf-06-add-sysfs-report.patch b/patches.arch/x86-l1tf-06-add-sysfs-report.patch
new file mode 100644
index 0000000000..f83591f8b3
--- /dev/null
+++ b/patches.arch/x86-l1tf-06-add-sysfs-report.patch
@@ -0,0 +1,235 @@
+From 8865a468fa92e1e507b820f74e8d051c50ef49dc Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Fri, 27 Apr 2018 14:44:53 -0700
+Subject: [PATCH 5/8] x86, l1tf: Add sysfs report for l1tf
+Patch-mainline: not yet (under discussion)
+References: bnc#1087081, CVE-2018-3620
+
+mhocko@suse.com:
+feature bits have to abuse a different word because word 7 is full already.
+Followed bp's lead and hooked into word 2.
+
+L1TF core kernel workarounds are cheap and normally always enabled,
+However we still want to report in sysfs if the system is vulnerable
+or mitigated. Add the necessary checks.
+
+- We extend the existing checks for Meltdowns to determine if the system is
+vulnerable. This excludes some Atom CPUs which don't have this
+problem.
+- We check for 32bit non PAE and warn
+- If the system has more than MAX_PA/2 physical memory the
+invert page workarounds don't protect the system against
+the L1TF attack anymore, because an inverted physical address
+will point to valid memory. Print a warning in this case
+and report that the system is vulnerable.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Acked-By: Dave Hansen <dave.hansen@intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+
+---
+v2: Use positive instead of negative flag for WA. Fix override
+reporting.
+v3: Fix L1TF_WA flag settting
+v4: Rebase to SSB tree
+v5: Minor cleanups. No functional changes.
+Don't mark atoms and knights as vulnerable
+v6: Change _WA to _FIX
+v7: Use common sysfs function
+v8: Improve commit message
+Move mitigation check into check_bugs.
+Integrate memory size checking into this patch
+White space changes. Move l1tf_pfn_limit here.
+---
+ arch/x86/include/asm/cpufeatures.h | 2 +
+ arch/x86/include/asm/processor.h | 5 ++++
+ arch/x86/kernel/cpu/bugs.c | 40 +++++++++++++++++++++++++++++++++++++
+ arch/x86/kernel/cpu/common.c | 20 ++++++++++++++++++
+ drivers/base/cpu.c | 8 +++++++
+ include/linux/cpu.h | 2 +
+ 6 files changed, 77 insertions(+)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -77,6 +77,7 @@
+ * word 7 und we not even attempting to do a nasty kABI breakage.
+ */
+ #define X86_FEATURE_ZEN ( 2*32+ 4) /* "" CPU is AMD family 0x17 (Zen) */
++#define X86_FEATURE_L1TF_FIX ( 2*32+5) /* "" L1TF workaround used */
+
+ /* Other features, Linux-defined mapping, word 3 */
+ /* This range is used for feature bits which conflict or are synthesized */
+@@ -327,5 +328,6 @@
+ #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
+ #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
+ #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
++#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
+
+ #endif /* _ASM_X86_CPUFEATURES_H */
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -187,6 +187,11 @@ extern const struct seq_operations cpuin
+
+ extern void cpu_detect(struct cpuinfo_x86 *c);
+
++static inline unsigned long l1tf_pfn_limit(void)
++{
++ return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
++}
++
+ extern void early_cpu_init(void);
+ extern void identify_boot_cpu(void);
+ extern void identify_secondary_cpu(struct cpuinfo_x86 *);
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -27,9 +27,11 @@
+ #include <asm/cacheflush.h>
+ #include <asm/intel-family.h>
+ #include <asm/spec_ctrl.h>
++#include <asm/e820.h>
+
+ static void __init spectre_v2_select_mitigation(void);
+ static void __init ssb_select_mitigation(void);
++static void __init l1tf_select_mitigation(void);
+
+ /*
+ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
+@@ -81,6 +83,8 @@ void __init check_bugs(void)
+ */
+ ssb_select_mitigation();
+
++ l1tf_select_mitigation();
++
+ #ifdef CONFIG_X86_32
+ /*
+ * Check whether we are able to run this kernel safely on SMP.
+@@ -209,6 +213,32 @@ static void x86_amd_ssb_disable(void)
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
+ }
+
++static void __init l1tf_select_mitigation(void)
++{
++ u64 half_pa;
++
++ if (!boot_cpu_has_bug(X86_BUG_L1TF))
++ return;
++
++#if CONFIG_PGTABLE_LEVELS == 2
++ pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
++ return;
++#endif
++
++ /*
++ * This is extremely unlikely to happen because almost all
++ * systems have far more MAX_PA/2 than RAM can be fit into
++ * DIMM slots.
++ */
++ half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
++ if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
++ pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
++ return;
++ }
++
++ setup_force_cpu_cap(X86_FEATURE_L1TF_FIX);
++}
++
+ #ifdef RETPOLINE
+ static bool spectre_v2_bad_module;
+
+@@ -693,6 +723,11 @@ static ssize_t cpu_show_common(struct de
+ case X86_BUG_SPEC_STORE_BYPASS:
+ return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
+
++ case X86_BUG_L1TF:
++ if (boot_cpu_has(X86_FEATURE_L1TF_FIX))
++ return sprintf(buf, "Mitigation: Page Table Inversion\n");
++ break;
++
+ default:
+ break;
+ }
+@@ -721,4 +756,9 @@ ssize_t cpu_show_spec_store_bypass(struc
+ {
+ return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
+ }
++
++ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
++}
+ #endif
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -921,6 +921,21 @@ static const __initconst struct x86_cpu_
+ {}
+ };
+
++static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
++ /* in addition to cpu_no_speculation */
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
++ {}
++};
++
+ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+ u64 ia32_cap = 0;
+@@ -946,6 +961,11 @@ static void __init cpu_set_bug_bits(stru
+ return;
+
+ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
++
++ if (x86_match_cpu(cpu_no_l1tf))
++ return;
++
++ setup_force_cpu_bug(X86_BUG_L1TF);
+ }
+
+ /*
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -524,16 +524,24 @@ ssize_t __weak cpu_show_spec_store_bypas
+ return sprintf(buf, "Not affected\n");
+ }
+
++ssize_t __weak cpu_show_l1tf(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sprintf(buf, "Not affected\n");
++}
++
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+ static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+ static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
++static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
+
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ &dev_attr_meltdown.attr,
+ &dev_attr_spectre_v1.attr,
+ &dev_attr_spectre_v2.attr,
+ &dev_attr_spec_store_bypass.attr,
++ &dev_attr_l1tf.attr,
+ NULL
+ };
+
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -48,6 +48,8 @@ extern ssize_t cpu_show_spectre_v2(struc
+ struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
+ struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_l1tf(struct device *dev,
++ struct device_attribute *attr, char *buf);
+
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/patches.arch/x86-l1tf-07-limit-swap-file-size.patch b/patches.arch/x86-l1tf-07-limit-swap-file-size.patch
new file mode 100644
index 0000000000..67264a9b9a
--- /dev/null
+++ b/patches.arch/x86-l1tf-07-limit-swap-file-size.patch
@@ -0,0 +1,140 @@
+From aaedeb15cb5c75e44b29e895b60c2dbffa1a7e14 Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Fri, 27 Apr 2018 15:29:17 -0700
+Subject: [PATCH 7/8] x86, l1tf: Limit swap file size to MAX_PA/2
+Patch-mainline: not yet (under discussion)
+References: bnc#1087081, CVE-2018-3620
+
+For the L1TF workaround we want to limit the swap file size to below
+MAX_PA/2, so that the higher bits of the swap offset inverted never
+point to valid memory.
+
+Add a way for the architecture to override the swap file
+size check in swapfile.c and add a x86 specific max swapfile check
+function that enforces that limit.
+
+The check is only enabled if the CPU is vulnerable to L1TF.
+
+In VMs with 42bit MAX_PA the typical limit is 2TB now,
+on a native system with 46bit PA it is 32TB. The limit
+is only per individual swap file, so it's always possible
+to exceed these limits with multiple swap files or
+partitions.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-By: Dave Hansen <dave.hansen@intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+
+---
+v2: Use new helper for maxpa_mask computation.
+v3: Use l1tf_pfn_limit (Thomas)
+Reformat comment
+v4: Use boot_cpu_has_bug
+v5: Move l1tf_pfn_limit to earlier patch
+---
+ arch/x86/mm/init.c | 15 +++++++++++++++
+ include/linux/swapfile.h | 2 ++
+ mm/swapfile.c | 46 ++++++++++++++++++++++++++++++----------------
+ 3 files changed, 47 insertions(+), 16 deletions(-)
+
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -4,6 +4,8 @@
+ #include <linux/swap.h>
+ #include <linux/memblock.h>
+ #include <linux/bootmem.h> /* for max_low_pfn */
++#include <linux/swapfile.h>
++#include <linux/swapops.h>
+
+ #include <asm/cacheflush.h>
+ #include <asm/e820.h>
+@@ -767,3 +769,16 @@ void update_cache_mode_entry(unsigned en
+ __cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
+ __pte2cachemode_tbl[entry] = cache;
+ }
++
++unsigned long max_swapfile_size(void)
++{
++ unsigned long pages;
++
++ pages = generic_max_swapfile_size();
++
++ if (boot_cpu_has_bug(X86_BUG_L1TF)) {
++ /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
++ pages = min_t(unsigned long, l1tf_pfn_limit() + 1, pages);
++ }
++ return pages;
++}
+--- a/include/linux/swapfile.h
++++ b/include/linux/swapfile.h
+@@ -9,5 +9,7 @@ extern spinlock_t swap_lock;
+ extern struct plist_head swap_active_head;
+ extern struct swap_info_struct *swap_info[];
+ extern int try_to_unuse(unsigned int, bool, unsigned long);
++extern unsigned long generic_max_swapfile_size(void);
++extern unsigned long max_swapfile_size(void);
+
+ #endif /* _LINUX_SWAPFILE_H */
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2212,6 +2212,35 @@ static int claim_swapfile(struct swap_in
+ return 0;
+ }
+
++
++/*
++ * Find out how many pages are allowed for a single swap device. There
++ * are two limiting factors:
++ * 1) the number of bits for the swap offset in the swp_entry_t type, and
++ * 2) the number of bits in the swap pte, as defined by the different
++ * architectures.
++ *
++ * In order to find the largest possible bit mask, a swap entry with
++ * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
++ * decoded to a swp_entry_t again, and finally the swap offset is
++ * extracted.
++ *
++ * This will mask all the bits from the initial ~0UL mask that can't
++ * be encoded in either the swp_entry_t or the architecture definition
++ * of a swap pte.
++ */
++unsigned long generic_max_swapfile_size(void)
++{
++ return swp_offset(pte_to_swp_entry(
++ swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
++}
++
++/* Can be overridden by an architecture for additional checks. */
++__weak unsigned long max_swapfile_size(void)
++{
++ return generic_max_swapfile_size();
++}
++
+ static unsigned long read_swap_header(struct swap_info_struct *p,
+ union swap_header *swap_header,
+ struct inode *inode)
+@@ -2247,22 +2276,7 @@ static unsigned long read_swap_header(st
+ p->cluster_next = 1;
+ p->cluster_nr = 0;
+
+- /*
+- * Find out how many pages are allowed for a single swap
+- * device. There are two limiting factors: 1) the number
+- * of bits for the swap offset in the swp_entry_t type, and
+- * 2) the number of bits in the swap pte as defined by the
+- * different architectures. In order to find the
+- * largest possible bit mask, a swap entry with swap type 0
+- * and swap offset ~0UL is created, encoded to a swap pte,
+- * decoded to a swp_entry_t again, and finally the swap
+- * offset is extracted. This will mask all the bits from
+- * the initial ~0UL mask that can't be encoded in either
+- * the swp_entry_t or the architecture definition of a
+- * swap pte.
+- */
+- maxpages = swp_offset(pte_to_swp_entry(
+- swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
++ maxpages = max_swapfile_size();
+ last_page = swap_header->info.last_page;
+ if (!last_page) {
+ pr_warn("Empty swap-file\n");
diff --git a/patches.arch/x86-l1tf-08-disallow-non-privileged-high-MMIO-PROT_NONE.patch b/patches.arch/x86-l1tf-08-disallow-non-privileged-high-MMIO-PROT_NONE.patch
new file mode 100644
index 0000000000..8e7d7f8477
--- /dev/null
+++ b/patches.arch/x86-l1tf-08-disallow-non-privileged-high-MMIO-PROT_NONE.patch
@@ -0,0 +1,280 @@
+From: Andi Kleen <ak@linux.intel.com>
+Date: Thu, 3 May 2018 16:39:51 -0700
+Subject: [PATCH 8/8] mm, l1tf: Disallow non privileged high MMIO PROT_NONE
+ mappings
+Patch-mainline: not yet (under discussion)
+References: bnc#1087081, CVE-2018-3620
+
+For L1TF PROT_NONE mappings are protected by inverting the PFN in the
+page table entry. This sets the high bits in the CPU's address space,
+thus making sure to point to not point an unmapped entry to valid
+cached memory.
+
+Some server system BIOS put the MMIO mappings high up in the physical
+address space. If such an high mapping was mapped to an unprivileged
+user they could attack low memory by setting such a mapping to
+PROT_NONE. This could happen through a special device driver
+which is not access protected. Normal /dev/mem is of course
+access protect.
+
+To avoid this we forbid PROT_NONE mappings or mprotect for high MMIO
+mappings.
+
+Valid page mappings are allowed because the system is then unsafe
+anyways.
+
+We don't expect users to commonly use PROT_NONE on MMIO. But
+to minimize any impact here we only do this if the mapping actually
+refers to a high MMIO address (defined as the MAX_PA-1 bit being set),
+and also skip the check for root.
+
+For mmaps this is straight forward and can be handled in vm_insert_pfn
+and in remap_pfn_range().
+
+For mprotect it's a bit trickier. At the point we're looking at the
+actual PTEs a lot of state has been changed and would be difficult
+to undo on an error. Since this is a uncommon case we use a separate
+early page talk walk pass for MMIO PROT_NONE mappings that
+checks for this condition early. For non MMIO and non PROT_NONE
+there are no changes.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Acked-By: Dave Hansen <dave.hansen@intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+
+---
+v2: Use new helpers added earlier
+v3: Fix inverted check added in v3
+v4: Use l1tf_pfn_limit (Thomas)
+Add comment for locked down kernels
+v5: Use boot_cpu_has_bug. Check bug early in arch_has_pfn_modify_check
+---
+ arch/x86/include/asm/pgtable.h | 8 ++++++
+ arch/x86/mm/mmap.c | 21 +++++++++++++++++
+ include/asm-generic/pgtable.h | 12 ++++++++++
+ mm/memory.c | 30 +++++++++++++++++++------
+ mm/mprotect.c | 49 +++++++++++++++++++++++++++++++++++++++++
+ 5 files changed, 113 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -1017,6 +1017,14 @@ static inline u16 pte_flags_pkey(unsigne
+ #endif
+ }
+
++#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
++extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
++
++static inline bool arch_has_pfn_modify_check(void)
++{
++ return boot_cpu_has_bug(X86_BUG_L1TF);
++}
++
+ #include <asm-generic/pgtable.h>
+ #endif /* __ASSEMBLY__ */
+
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -121,3 +121,24 @@ const char *arch_vma_name(struct vm_area
+ return "[mpx]";
+ return NULL;
+ }
++
++/*
++ * Only allow root to set high MMIO mappings to PROT_NONE.
++ * This prevents an unpriv. user to set them to PROT_NONE and invert
++ * them, then pointing to valid memory for L1TF speculation.
++ *
++ * Note: for locked down kernels may want to disable the root override.
++ */
++bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
++{
++ if (!boot_cpu_has_bug(X86_BUG_L1TF))
++ return true;
++ if (!__pte_needs_invert(pgprot_val(prot)))
++ return true;
++ /* If it's real memory always allow */
++ if (pfn_valid(pfn))
++ return true;
++ if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
++ return false;
++ return true;
++}
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -807,4 +807,16 @@ static inline int pmd_free_pte_page(pmd_
+ #define io_remap_pfn_range remap_pfn_range
+ #endif
+
++#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
++static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
++{
++ return true;
++}
++
++static inline bool arch_has_pfn_modify_check(void)
++{
++ return false;
++}
++#endif
++
+ #endif /* _ASM_GENERIC_PGTABLE_H */
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1616,6 +1616,10 @@ int vm_insert_pfn(struct vm_area_struct
+
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return -EFAULT;
++
++ if (!pfn_modify_allowed(pfn, pgprot))
++ return -EACCES;
++
+ if (track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV)))
+ return -EINVAL;
+
+@@ -1633,6 +1637,9 @@ int vm_insert_mixed(struct vm_area_struc
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return -EFAULT;
+
++ if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), vma->vm_page_prot))
++ return -EACCES;
++
+ /*
+ * If we don't have pte special, then we have to use the pfn_valid()
+ * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
+@@ -1661,6 +1668,7 @@ static int remap_pte_range(struct mm_str
+ {
+ pte_t *pte;
+ spinlock_t *ptl;
++ int err = 0;
+
+ pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
+ if (!pte)
+@@ -1668,12 +1676,16 @@ static int remap_pte_range(struct mm_str
+ arch_enter_lazy_mmu_mode();
+ do {
+ BUG_ON(!pte_none(*pte));
++ if (!pfn_modify_allowed(pfn, prot)) {
++ err = -EACCES;
++ break;
++ }
+ set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
+ pfn++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ arch_leave_lazy_mmu_mode();
+ pte_unmap_unlock(pte - 1, ptl);
+- return 0;
++ return err;
+ }
+
+ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
+@@ -1682,6 +1694,7 @@ static inline int remap_pmd_range(struct
+ {
+ pmd_t *pmd;
+ unsigned long next;
++ int err;
+
+ pfn -= addr >> PAGE_SHIFT;
+ pmd = pmd_alloc(mm, pud, addr);
+@@ -1690,9 +1703,10 @@ static inline int remap_pmd_range(struct
+ VM_BUG_ON(pmd_trans_huge(*pmd));
+ do {
+ next = pmd_addr_end(addr, end);
+- if (remap_pte_range(mm, pmd, addr, next,
+- pfn + (addr >> PAGE_SHIFT), prot))
+- return -ENOMEM;
++ err = remap_pte_range(mm, pmd, addr, next,
++ pfn + (addr >> PAGE_SHIFT), prot);
++ if (err)
++ return err;
+ } while (pmd++, addr = next, addr != end);
+ return 0;
+ }
+@@ -1703,6 +1717,7 @@ static inline int remap_pud_range(struct
+ {
+ pud_t *pud;
+ unsigned long next;
++ int err;
+
+ pfn -= addr >> PAGE_SHIFT;
+ pud = pud_alloc(mm, pgd, addr);
+@@ -1710,9 +1725,10 @@ static inline int remap_pud_range(struct
+ return -ENOMEM;
+ do {
+ next = pud_addr_end(addr, end);
+- if (remap_pmd_range(mm, pud, addr, next,
+- pfn + (addr >> PAGE_SHIFT), prot))
+- return -ENOMEM;
++ err = remap_pmd_range(mm, pud, addr, next,
++ pfn + (addr >> PAGE_SHIFT), prot);
++ if (err)
++ return err;
+ } while (pud++, addr = next, addr != end);
+ return 0;
+ }
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -274,6 +274,42 @@ unsigned long change_protection(struct v
+ return pages;
+ }
+
++static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
++ unsigned long next, struct mm_walk *walk)
++{
++ return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
++ 0 : -EACCES;
++}
++
++static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
++ unsigned long addr, unsigned long next,
++ struct mm_walk *walk)
++{
++ return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
++ 0 : -EACCES;
++}
++
++static int prot_none_test(unsigned long addr, unsigned long next,
++ struct mm_walk *walk)
++{
++ return 0;
++}
++
++static int prot_none_walk(struct vm_area_struct *vma, unsigned long start,
++ unsigned long end, unsigned long newflags)
++{
++ pgprot_t new_pgprot = vm_get_page_prot(newflags);
++ struct mm_walk prot_none_walk = {
++ .pte_entry = prot_none_pte_entry,
++ .hugetlb_entry = prot_none_hugetlb_entry,
++ .test_walk = prot_none_test,
++ .mm = current->mm,
++ .private = &new_pgprot,
++ };
++
++ return walk_page_range(start, end, &prot_none_walk);
++}
++
+ int
+ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
+ unsigned long start, unsigned long end, unsigned long newflags)
+@@ -292,6 +328,19 @@ mprotect_fixup(struct vm_area_struct *vm
+ }
+
+ /*
++ * Do PROT_NONE PFN permission checks here when we can still
++ * bail out without undoing a lot of state. This is a rather
++ * uncommon case, so doesn't need to be very optimized.
++ */
++ if (arch_has_pfn_modify_check() &&
++ (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
++ (newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) {
++ error = prot_none_walk(vma, start, end, newflags);
++ if (error)
++ return error;
++ }
++
++ /*
+ * If we make a private mapping writable we increase our commit;
+ * but (without finer accounting) cannot reduce our commit if we
+ * make it unwritable again. hugetlb mapping were accounted for
diff --git a/patches.arch/x86-mm-Simplify-p-g4um-d_page-macros.patch b/patches.arch/x86-mm-Simplify-p-g4um-d_page-macros.patch
new file mode 100644
index 0000000000..6d2db05cb2
--- /dev/null
+++ b/patches.arch/x86-mm-Simplify-p-g4um-d_page-macros.patch
@@ -0,0 +1,99 @@
+From fd7e315988b784509ba3f1b42f539bd0b1fca9bb Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Mon, 17 Jul 2017 16:10:06 -0500
+Subject: [PATCH] x86/mm: Simplify p[g4um]d_page() macros
+Mime-version: 1.0
+Content-type: text/plain; charset=UTF-8
+Content-transfer-encoding: 8bit
+Git-commit: fd7e315988b784509ba3f1b42f539bd0b1fca9bb
+Patch-mainline: 4.14-rc1
+References: 1087081
+
+mhocko@suse.cz:
+This is a dependency for patches.arch/x86-l1tf-04-protect-PROT_NONE-ptes.patch
+because all prot_none (numa including) entries need to be uncluttered again so
+p{gd,ud,md}_page cannot use p{gf,ud,md}_val directly and need to go through the
+respective _pfn() helper. We have seen oopses in the numa migration code paths
+(pmd_trans_migrating triggering "unable to handle kernel paging request at"
+on an address with PGD: 0).
+
+Create a pgd_pfn() macro similar to the p[4um]d_pfn() macros and then
+use the p[g4um]d_pfn() macros in the p[g4um]d_page() macros instead of
+duplicating the code.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brijesh Singh <brijesh.singh@amd.com>
+Cc: Dave Young <dyoung@redhat.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Larry Woodman <lwoodman@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Michael S. Tsirkin <mst@redhat.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Toshimitsu Kani <toshi.kani@hpe.com>
+Cc: kasan-dev@googlegroups.com
+Cc: kvm@vger.kernel.org
+Cc: linux-arch@vger.kernel.org
+Cc: linux-doc@vger.kernel.org
+Cc: linux-efi@vger.kernel.org
+Cc: linux-mm@kvack.org
+Link: http://lkml.kernel.org/r/e61eb533a6d0aac941db2723d8aa63ef6b882dee.1500319216.git.thomas.lendacky@amd.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Michal Hocko <mhocko@suse.cz>
+
+---
+ arch/x86/include/asm/pgtable.h | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -585,12 +585,16 @@ static inline unsigned long pmd_page_vad
+ return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
+ }
+
++static inline unsigned long pgd_pfn(pgd_t pgd)
++{
++ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
++}
++
+ /*
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+-#define pmd_page(pmd) \
+- pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
++#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
+
+ /*
+ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
+@@ -658,8 +662,7 @@ static inline unsigned long pud_page_vad
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+-#define pud_page(pud) \
+- pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
++#define pud_page(pud) pfn_to_page(pud_pfn(pud))
+
+ /* Find an entry in the second-level page table.. */
+ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+@@ -699,7 +702,7 @@ static inline unsigned long pgd_page_vad
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
++#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
+
+ /* to find an entry in a page-table-directory. */
+ static inline unsigned long pud_index(unsigned long address)
diff --git a/patches.fixes/inet-frag-enforce-memory-limits-earlier.patch b/patches.fixes/inet-frag-enforce-memory-limits-earlier.patch
new file mode 100644
index 0000000000..cfe4626579
--- /dev/null
+++ b/patches.fixes/inet-frag-enforce-memory-limits-earlier.patch
@@ -0,0 +1,58 @@
+From: Eric Dumazet <edumazet@google.com>
+Subject: inet: frag: enforce memory limits earlier
+Git-commit: 56e2c94f055d328f5f6b0a5c1721cca2f2d4e0a1
+Patch-mainline: v4.18
+References: bsc#1103097, CVE-2018-5391
+Acked-by: Jiri Bohac <jbohac@suse.cz>
+
+We currently check current frags memory usage only when
+a new frag queue is created. This allows attackers to first
+consume the memory budget (default : 4 MB) creating thousands
+of frag queues, then sending tiny skbs to exceed high_thresh
+limit by 2 to 3 order of magnitude.
+
+Note that before commit 648700f76b03 ("inet: frags: use rhashtables
+for reassembly units"), work queue could be starved under DOS,
+getting no cpu cycles.
+After commit 648700f76b03, only the per frag queue timer can eventually
+remove an incomplete frag queue and its skbs.
+
+Fixes: b13d3cbfb8e8 ("inet: frag: move eviction of queues to work queue")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Jann Horn <jannh@google.com>
+Cc: Florian Westphal <fw@strlen.de>
+Cc: Peter Oskolkov <posk@google.com>
+Cc: Paolo Abeni <pabeni@redhat.com>
+Acked-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/inet_fragment.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -363,11 +363,6 @@ static struct inet_frag_queue *inet_frag
+ {
+ struct inet_frag_queue *q;
+
+- if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
+- inet_frag_schedule_worker(f);
+- return NULL;
+- }
+-
+ q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
+ if (!q)
+ return NULL;
+@@ -404,6 +399,11 @@ struct inet_frag_queue *inet_frag_find(s
+ struct inet_frag_queue *q;
+ int depth = 0;
+
++ if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
++ inet_frag_schedule_worker(f);
++ return NULL;
++ }
++
+ if (frag_mem_limit(nf) > nf->low_thresh)
+ inet_frag_schedule_worker(f);
+
diff --git a/patches.fixes/ip-discard-ipv4-datagrams-with-overlapping-segments.patch b/patches.fixes/ip-discard-ipv4-datagrams-with-overlapping-segments.patch
new file mode 100644
index 0000000000..5304014ee6
--- /dev/null
+++ b/patches.fixes/ip-discard-ipv4-datagrams-with-overlapping-segments.patch
@@ -0,0 +1,144 @@
+From: Peter Oskolkov <posk@google.com>
+Subject: ip: discard IPv4 datagrams with overlapping segments.
+Git-commit: 7969e5c40dfd04799d4341f1b7cd266b6e47f227
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+References: bsc#1103097, CVE-2018-5391
+Acked-by: Jiri Bohac <jbohac@suse.cz>
+
+This behavior is required in IPv6, and there is little need
+to tolerate overlapping fragments in IPv4. This change
+simplifies the code and eliminates potential DDoS attack vectors.
+
+Tested: ran ip_defrag selftest (not yet available uptream).
+
+Suggested-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Peter Oskolkov <posk@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Florian Westphal <fw@strlen.de>
+Acked-by: Stephen Hemminger <stephen@networkplumber.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+
+---
+ include/uapi/linux/snmp.h | 1
+ net/ipv4/ip_fragment.c | 73 ++++++++++++----------------------------------
+ net/ipv4/proc.c | 1
+ 3 files changed, 22 insertions(+), 53 deletions(-)
+
+--- a/include/uapi/linux/snmp.h
++++ b/include/uapi/linux/snmp.h
+@@ -55,6 +55,7 @@ enum
+ IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */
+ IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
+ IPSTATS_MIB_CEPKTS, /* InCEPkts */
++ IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */
+ __IPSTATS_MIB_MAX
+ };
+
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -333,6 +333,7 @@ static int ip_frag_reinit(struct ipq *qp
+ /* Add new segment to existing queue. */
+ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
+ {
++ struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
+ struct sk_buff *prev, *next;
+ struct net_device *dev;
+ unsigned int fragsize;
+@@ -413,60 +414,22 @@ static int ip_frag_queue(struct ipq *qp,
+ }
+
+ found:
+- /* We found where to put this one. Check for overlap with
+- * preceding fragment, and, if needed, align things so that
+- * any overlaps are eliminated.
++ /* RFC5722, Section 4, amended by Errata ID : 3089
++ * When reassembling an IPv6 datagram, if
++ * one or more its constituent fragments is determined to be an
++ * overlapping fragment, the entire datagram (and any constituent
++ * fragments) MUST be silently discarded.
++ *
++ * We do the same here for IPv4.
+ */
+- if (prev) {
+- int i = (FRAG_CB(prev)->offset + prev->len) - offset;
+-
+- if (i > 0) {
+- offset += i;
+- err = -EINVAL;
+- if (end <= offset)
+- goto err;
+- err = -ENOMEM;
+- if (!pskb_pull(skb, i))
+- goto err;
+- if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+- skb->ip_summed = CHECKSUM_NONE;
+- }
+- }
+-
+- err = -ENOMEM;
+-
+- while (next && FRAG_CB(next)->offset < end) {
+- int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
+-
+- if (i < next->len) {
+- /* Eat head of the next overlapped fragment
+- * and leave the loop. The next ones cannot overlap.
+- */
+- if (!pskb_pull(next, i))
+- goto err;
+- FRAG_CB(next)->offset += i;
+- qp->q.meat -= i;
+- if (next->ip_summed != CHECKSUM_UNNECESSARY)
+- next->ip_summed = CHECKSUM_NONE;
+- break;
+- } else {
+- struct sk_buff *free_it = next;
+-
+- /* Old fragment is completely overridden with
+- * new one drop it.
+- */
+- next = next->next;
+-
+- if (prev)
+- prev->next = next;
+- else
+- qp->q.fragments = next;
+-
+- qp->q.meat -= free_it->len;
+- sub_frag_mem_limit(qp->q.net, free_it->truesize);
+- kfree_skb(free_it);
+- }
+- }
++ /* Is there an overlap with the previous fragment? */
++ if (prev &&
++ (FRAG_CB(prev)->offset + prev->len) > offset)
++ goto discard_qp;
++
++ /* Is there an overlap with the next fragment? */
++ if (next && FRAG_CB(next)->offset < end)
++ goto discard_qp;
+
+ FRAG_CB(skb)->offset = offset;
+
+@@ -513,6 +476,10 @@ found:
+ skb_dst_drop(skb);
+ return -EINPROGRESS;
+
++discard_qp:
++ inet_frag_kill(&qp->q, &ip4_frags);
++ err = -EINVAL;
++ IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
+ err:
+ kfree_skb(skb);
+ return err;
+--- a/net/ipv4/proc.c
++++ b/net/ipv4/proc.c
+@@ -132,6 +132,7 @@ static const struct snmp_mib snmp4_ipext
+ SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
+ SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
+ SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
++ SNMP_MIB_ITEM("ReasmOverlaps", IPSTATS_MIB_REASM_OVERLAPS),
+ SNMP_MIB_SENTINEL
+ };
+
diff --git a/patches.fixes/ipv6-defrag-drop-non-last-frags-smaller-than-min-mtu.patch b/patches.fixes/ipv6-defrag-drop-non-last-frags-smaller-than-min-mtu.patch
new file mode 100644
index 0000000000..b15a2a11ee
--- /dev/null
+++ b/patches.fixes/ipv6-defrag-drop-non-last-frags-smaller-than-min-mtu.patch
@@ -0,0 +1,55 @@
+From: Florian Westphal <fw@strlen.de>
+Subject: ipv6: defrag: drop non-last frags smaller than min mtu
+Git-commit: 0ed4229b08c13c84a3c301a08defdc9e7f4467e6
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+References: bsc#1103097, CVE-2018-5391
+Acked-by: Jiri Bohac <jbohac@suse.cz>
+
+don't bother with pathological cases, they only waste cycles.
+IPv6 requires a minimum MTU of 1280 so we should never see fragments
+smaller than this (except last frag).
+
+v3: don't use awkward "-offset + len"
+v2: drop IPv4 part, which added same check w. IPV4_MIN_MTU (68).
+ There were concerns that there could be even smaller frags
+ generated by intermediate nodes, e.g. on radio networks.
+
+Cc: Peter Oskolkov <posk@google.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+
+---
+ net/ipv6/netfilter/nf_conntrack_reasm.c | 6 ++++++
+ net/ipv6/reassembly.c | 4 ++++
+ 2 files changed, 10 insertions(+)
+
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -601,6 +601,12 @@ struct sk_buff *nf_ct_frag6_gather(struc
+ hdr = ipv6_hdr(clone);
+ fhdr = (struct frag_hdr *)skb_transport_header(clone);
+
++ if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
++ fhdr->frag_off & htons(IP6_MF)) {
++ pr_debug("fragment too short\n");
++ goto ret_orig;
++ }
++
+ fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
+ skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
+ if (fq == NULL) {
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -552,6 +552,10 @@ static int ipv6_frag_rcv(struct sk_buff
+ return 1;
+ }
+
++ if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
++ fhdr->frag_off & htons(IP6_MF))
++ goto fail_hdr;
++
+ fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
+ skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
+ if (fq) {
diff --git a/patches.kabi/ip-drop-IPSTATS_MIB_REASM_OVERLAPS.patch b/patches.kabi/ip-drop-IPSTATS_MIB_REASM_OVERLAPS.patch
new file mode 100644
index 0000000000..6480267562
--- /dev/null
+++ b/patches.kabi/ip-drop-IPSTATS_MIB_REASM_OVERLAPS.patch
@@ -0,0 +1,51 @@
+From: Jiri Bohac <jbohac@suse.cz>
+Subject: kabi; ip: drop IPSTATS_MIB_REASM_OVERLAPS
+Patch-mainline: Never, KABI fix
+References: bsc#1103097, CVE-2018-5391
+
+patches.fixes/ip-discard-ipv4-datagrams-with-overlapping-segments.patch adds
+the IPSTATS_MIB_REASM_OVERLAPS snmp attribute, whic breaks KABI.
+
+Drop this attribute and account the dropped fragments in IPSTATS_MIB_REASMFAILS
+instead.
+
+
+Signed-off-by: Jiri Bohac <jbohac@suse.cz>
+
+---
+ include/uapi/linux/snmp.h | 1 -
+ net/ipv4/ip_fragment.c | 2 +-
+ net/ipv4/proc.c | 1 -
+ 3 files changed, 1 insertion(+), 3 deletions(-)
+
+--- a/include/uapi/linux/snmp.h
++++ b/include/uapi/linux/snmp.h
+@@ -55,7 +55,6 @@ enum
+ IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */
+ IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
+ IPSTATS_MIB_CEPKTS, /* InCEPkts */
+- IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */
+ __IPSTATS_MIB_MAX
+ };
+
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -479,7 +479,7 @@ found:
+ discard_qp:
+ inet_frag_kill(&qp->q, &ip4_frags);
+ err = -EINVAL;
+- IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
++ IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
+ err:
+ kfree_skb(skb);
+ return err;
+--- a/net/ipv4/proc.c
++++ b/net/ipv4/proc.c
+@@ -132,7 +132,6 @@ static const struct snmp_mib snmp4_ipext
+ SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
+ SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
+ SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
+- SNMP_MIB_ITEM("ReasmOverlaps", IPSTATS_MIB_REASM_OVERLAPS),
+ SNMP_MIB_SENTINEL
+ };
+
diff --git a/series.conf b/series.conf
index 8ed5daba63..79e381f7cd 100644
--- a/series.conf
+++ b/series.conf
@@ -16422,6 +16422,12 @@
# end of sorted patches
patches.suse/suse-hv-hv_compose_msi_msg.patch
+
+ patches.fixes/inet-frag-enforce-memory-limits-earlier.patch
+ patches.fixes/ip-discard-ipv4-datagrams-with-overlapping-segments.patch
+ patches.kabi/ip-drop-IPSTATS_MIB_REASM_OVERLAPS.patch
+ patches.fixes/ipv6-defrag-drop-non-last-frags-smaller-than-min-mtu.patch
+
########################################################
# Wireless Networking
########################################################
@@ -16863,6 +16869,8 @@
patches.drivers/qla2xxx-Convert-QLA_TGT_ABTS-to-TARGET_SCF_LOOKUP_LU.patch
patches.drivers/scsi-qla2xxx-Fix-task-mgmt-handling-for-NPIV.patch
patches.drivers/target-Add-support-for-TMR-percpu-reference-counting.patch
+ # bsc#1097501
+ patches.drivers/scsi-qla2xxx-Spinlock-recursion-in-qla_target.patch
########################################################
# dlm
@@ -24145,14 +24153,67 @@
patches.suse/nospec-fix-forced-cpucaps-ordering.patch
patches.suse/0001-KVM-x86-Sync-back-MSR_IA32_SPEC_CTRL-to-VCPU-data-st.patch
- # bsc#1097501
- patches.drivers/scsi-qla2xxx-Spinlock-recursion-in-qla_target.patch
+ # bare metal
+ patches.arch/x86-mm-Simplify-p-g4um-d_page-macros.patch
+ patches.arch/x86-l1tf-01-increase-32bitPAE-__PHYSICAL_PAGE_MASK.patch
+ patches.arch/x86-l1tf-02-change-order-of-offset-type.patch
+ patches.arch/x86-l1tf-03-protect-swap-entries.patch
+ patches.arch/x86-l1tf-04-protect-PROT_NONE-ptes.patch
+ patches.arch/x86-l1tf-05-make-sure-first-pages-is-reserved.patch
+ patches.arch/x86-l1tf-06-add-sysfs-report.patch
+ patches.arch/x86-l1tf-07-limit-swap-file-size.patch
+ patches.arch/x86-l1tf-08-disallow-non-privileged-high-MMIO-PROT_NONE.patch
+ # nosmt
+ patches.arch/01-sched-smt-update-sched_smt_present-at-runtime.patch
+ patches.arch/02-x86-smp-provide-topology_is_primary_thread.patch
+ patches.arch/03-x86-topology-provide-topology_smt_supported.patch
+ patches.arch/04-cpu-hotplug-split-do_cpu_down.patch
+ patches.arch/04.1-cpu-hotplug-add-sysfs-state-interface.patch
+ patches.arch/04.2-x86-topology-add-topology_max_smt_threads.patch
+ patches.arch/04.3-x86-smpboot-do-not-use-smp_num_siblings-in-_max_logical_packages-calculation.patch
+ patches.arch/05-cpu-hotplug-provide-knobs-to-control-smt.patch
+ patches.arch/06-x86-cpu-remove-the-pointless-cpu-printout.patch
+ patches.arch/07-x86-cpu-amd-remove-the-pointless-detect_ht-call.patch
+ patches.arch/08-x86-cpu-common-provide-detect_ht_early.patch
+ patches.arch/09-x86-cpu-topology-provide-detect_extended_topology_early.patch
+ patches.arch/10-x86-cpu-intel-evaluate-smp_num_siblings-early.patch
+ patches.arch/11-x86-cpu-amd-do-not-check-cpuid-max-ext-level-before-parsing-smp-info.patch
+ patches.arch/12-x86-cpu-amd-evaluate-smp_num_siblings-early.patch
+ patches.arch/14-x86-cpu-amd-move-topoext-reenablement-before-reading-smp_num_siblings.patch
+ patches.arch/15-cpu-hotplug-boot-HT-siblings-at-least-once.patch
+ patches.arch/16-cpu-hotplug-Online-siblings-when-SMT-control-is-turn.patch
+ # KVM
+ patches.arch/00-x86-cpufeatures-Add-detection-of-L1D-cache-flush-sup.patch
+ patches.arch/01-x86-KVM-Warn-user-if-KVM-is-loaded-SMT-and-L1TF-CPU-.patch
+ patches.arch/02-x86-KVM-VMX-Add-module-argument-for-L1TF-mitigation.patch
+ patches.arch/03-x86-KVM-VMX-Add-L1D-flush-algorithm.patch
+ patches.arch/04-x86-KVM-VMX-Add-L1D-MSR-based-flush.patch
+ patches.arch/05-x86-KVM-VMX-Add-L1D-flush-logic.patch
+ patches.arch/06-x86-KVM-VMX-Split-the-VMX-MSR-LOAD-structures-to-hav.patch
+ patches.arch/07-x86-KVM-VMX-Add-find_msr-helper-function.patch
+ patches.arch/08-x86-KVM-VMX-Separate-the-VMX-AUTOLOAD-guest-host-num.patch
+ patches.arch/09-x86-KVM-VMX-Extend-add_atomic_switch_msr-to-allow-VM.patch
+ patches.arch/10-x86-KVM-VMX-Use-MSR-save-list-for-IA32_FLUSH_CMD-if-.patch
+ # SMT runtime control
+ patches.arch/0001-x86-litf-Introduce-vmx-status-variable.patch
+ patches.arch/0002-x86-kvm-Drop-L1TF-MSR-list-approach.patch
+ patches.arch/0003-x86-l1tf-Handle-EPT-disabled-state-proper.patch
+ patches.arch/0004-x86-kvm-Move-l1tf-setup-function.patch
+ patches.arch/0005-x86-kvm-Add-static-key-for-flush-always.patch
+ patches.arch/0006-x86-kvm-Serialize-L1D-flush-parameter-setter.patch
+ patches.arch/0007-x86-kvm-Allow-runtime-control-of-L1D-flush.patch
+ patches.arch/0008-cpu-hotplug-Expose-SMT-control-init-function.patch
+ patches.arch/0009-cpu-hotplug-Set-CPU_SMT_NOT_SUPPORTED-early.patch
+ patches.arch/0010-x86-bugs-kvm-Introduce-boot-time-control-of-L1TF-mit.patch
+ patches.arch/0011-Documentation-Add-section-about-CPU-vulnerabilities.patch
+ patches.arch/0012-cpu-hotplug-detect-SMT-disabled-by-BIOS.patch
+ # fixes
+ patches.arch/0001-x86-KVM-VMX-Initialize-the-vmx_l1d_flush_pages-conte.patch
# bsc#1096978
patches.drivers/bonding-add-802.3ad-support-for-25G-speeds.patch
patches.drivers/bonding-fix-802.3ad-support-for-5G-and-50G-speeds.patch
-
########################################################
# You'd better have a good reason for adding a patch
# below here.