Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKernel Build Daemon <kbuild@suse.de>2018-08-15 07:04:38 +0200
committerKernel Build Daemon <kbuild@suse.de>2018-08-15 07:04:38 +0200
commit7edca9f6412f0e9172ad6930a636a92547d02e27 (patch)
tree073681e50c9ee20dc6dfb58bef9cfd3ecc157d54
parentba5cca1eb1c4492a1c003fc29d47e6de82b69d0b (diff)
parentd44e7e2bd25c14b4667e49603c0601ce152d5338 (diff)
Merge branch 'SLE15' into SLE15-AZURE
-rw-r--r--config/x86_64/default1
-rw-r--r--kabi/severities1
-rw-r--r--patches.arch/00-jump_label-reorder-hotplug-lock-and-jump_label_lock.patch223
-rw-r--r--patches.arch/00-x86-cpufeatures-Add-detection-of-L1D-cache-flush-sup.patch35
-rw-r--r--patches.arch/0001-x86-KVM-VMX-Initialize-the-vmx_l1d_flush_pages-conte.patch81
-rw-r--r--patches.arch/0001-x86-litf-Introduce-vmx-status-variable.patch175
-rw-r--r--patches.arch/0002-x86-kvm-Drop-L1TF-MSR-list-approach.patch113
-rw-r--r--patches.arch/0003-x86-l1tf-Handle-EPT-disabled-state-proper.patch131
-rw-r--r--patches.arch/0004-x86-kvm-Move-l1tf-setup-function.patch208
-rw-r--r--patches.arch/0005-x86-kvm-Add-static-key-for-flush-always.patch68
-rw-r--r--patches.arch/0006-x86-kvm-Serialize-L1D-flush-parameter-setter.patch54
-rw-r--r--patches.arch/0007-x86-kvm-Allow-runtime-control-of-L1D-flush.patch78
-rw-r--r--patches.arch/0008-cpu-hotplug-Expose-SMT-control-init-function.patch71
-rw-r--r--patches.arch/0009-cpu-hotplug-Set-CPU_SMT_NOT_SUPPORTED-early.patch89
-rw-r--r--patches.arch/0010-x86-bugs-kvm-Introduce-boot-time-control-of-L1TF-mit.patch370
-rw-r--r--patches.arch/0011-Documentation-Add-section-about-CPU-vulnerabilities.patch641
-rw-r--r--patches.arch/0012-cpu-hotplug-detect-SMT-disabled-by-BIOS.patch49
-rw-r--r--patches.arch/01-jump_label-fix-concurrent-static_key_enable-disable.patch148
-rw-r--r--patches.arch/01-x86-KVM-Warn-user-if-KVM-is-loaded-SMT-and-L1TF-CPU-.patch102
-rw-r--r--patches.arch/02-jump_label-provide-hotplug-context-variants.patch147
-rw-r--r--patches.arch/02-sched-smt-update-sched_smt_present-at-runtime.patch87
-rw-r--r--patches.arch/02-x86-KVM-VMX-Add-module-argument-for-L1TF-mitigation.patch132
-rw-r--r--patches.arch/03-x86-KVM-VMX-Add-L1D-flush-algorithm.patch137
-rw-r--r--patches.arch/03-x86-smp-provide-topology_is_primary_thread.patch104
-rw-r--r--patches.arch/04-x86-KVM-VMX-Add-L1D-MSR-based-flush.patch84
-rw-r--r--patches.arch/04-x86-topology-provide-topology_smt_supported.patch54
-rw-r--r--patches.arch/05-cpu-hotplug-make-bringup-teardown-of-smp-threads-symmetric.patch39
-rw-r--r--patches.arch/05-x86-KVM-VMX-Add-L1D-flush-logic.patch178
-rw-r--r--patches.arch/06-cpu-hotplug-split-do_cpu_down.patch51
-rw-r--r--patches.arch/06-x86-KVM-VMX-Split-the-VMX-MSR-LOAD-structures-to-hav.patch149
-rw-r--r--patches.arch/07-cpu-hotplug-provide-knobs-to-control-smt.patch341
-rw-r--r--patches.arch/07-x86-KVM-VMX-Add-find_msr-helper-function.patch85
-rw-r--r--patches.arch/08-x86-KVM-VMX-Separate-the-VMX-AUTOLOAD-guest-host-num.patch84
-rw-r--r--patches.arch/08-x86-cpu-remove-the-pointless-cpu-printout.patch99
-rw-r--r--patches.arch/09-x86-KVM-VMX-Extend-add_atomic_switch_msr-to-allow-VM.patch92
-rw-r--r--patches.arch/09-x86-cpu-amd-remove-the-pointless-detect_ht-call.patch31
-rw-r--r--patches.arch/10-x86-KVM-VMX-Use-MSR-save-list-for-IA32_FLUSH_CMD-if-.patch93
-rw-r--r--patches.arch/10-x86-cpu-common-provide-detect_ht_early.patch78
-rw-r--r--patches.arch/11-x86-cpu-topology-provide-detect_extended_topology_early.patch117
-rw-r--r--patches.arch/12-x86-cpu-intel-evaluate-smp_num_siblings-early.patch35
-rw-r--r--patches.arch/13-x86-cpu-amd-do-not-check-cpuid-max-ext-level-before-parsing-smp-info.patch36
-rw-r--r--patches.arch/14-x86-cpu-amd-evaluate-smp_num_siblings-early.patch48
-rw-r--r--patches.arch/16-x86-cpu-amd-move-topoext-reenablement-before-reading-smp_num_siblings.patch95
-rw-r--r--patches.arch/17-cpu-hotplug-Boot-HT-siblings-at-least-once.patch142
-rw-r--r--patches.arch/18-cpu-hotplug-Online-siblings-when-SMT-control-is-turn.patch77
-rw-r--r--patches.arch/x86-l1tf-01-increase-32bitPAE-__PHYSICAL_PAGE_MASK.patch72
-rw-r--r--patches.arch/x86-l1tf-02-change-order-of-offset-type.patch89
-rw-r--r--patches.arch/x86-l1tf-03-protect-swap-entries.patch78
-rw-r--r--patches.arch/x86-l1tf-04-protect-PROT_NONE-ptes.patch254
-rw-r--r--patches.arch/x86-l1tf-05-make-sure-first-pages-is-reserved.patch46
-rw-r--r--patches.arch/x86-l1tf-06-add-sysfs-report.patch230
-rw-r--r--patches.arch/x86-l1tf-07-limit-swap-file-size.patch140
-rw-r--r--patches.arch/x86-l1tf-08-disallow-non-privileged-high-MMIO-PROT_NONE.patch301
-rw-r--r--patches.drivers/ACPI-PCI-Bail-early-in-acpi_pci_add_bus-if-there-is-45
-rw-r--r--patches.drivers/EDAC-altera-Fix-ARM64-build-warning49
-rw-r--r--patches.drivers/EDAC-mv64x60-Fix-an-error-handling-path40
-rw-r--r--patches.drivers/EDAC-octeon-Fix-an-uninitialized-variable-warning52
-rw-r--r--patches.drivers/EDAC-sb_edac-Fix-missing-break-in-switch37
-rw-r--r--patches.drivers/media-omap3isp-fix-unbalanced-dma_iommu_mapping61
-rw-r--r--patches.drivers/media-videobuf2-core-don-t-call-memop-finish-when-qu41
-rw-r--r--patches.drivers/xhci-Fix-perceived-dead-host-due-to-runtime-s.patch130
-rw-r--r--patches.fixes/0001-net-lan78xx-Fix-race-in-tx-pending-skb-size-calculat.patch58
-rw-r--r--patches.fixes/0001-net-lan78xx-fix-rx-handling-before-first-packet-is-s.patch39
-rw-r--r--patches.fixes/0001-net-qmi_wwan-Add-Netgear-Aircard-779S.patch36
-rw-r--r--patches.fixes/0001-net-qmi_wwan-add-BroadMobi-BM806U-2020-2033.patch37
-rw-r--r--patches.fixes/0001-net-usb-add-qmi_wwan-if-on-lte-modem-wistron-neweb-d.patch70
-rw-r--r--patches.fixes/0001-net-usb-asix-replace-mii_nway_restart-in-resume-path.patch46
-rw-r--r--patches.fixes/0001-typec-tcpm-fusb302-Resolve-out-of-order-messaging-ev.patch89
-rw-r--r--patches.fixes/inet-frag-enforce-memory-limits-earlier.patch63
-rw-r--r--patches.fixes/init-rename-and-re-order-boot_cpu_state_init.patch91
-rw-r--r--patches.fixes/ip-discard-IPv4-datagrams-with-overlapping-segments.patch158
-rw-r--r--patches.fixes/ipv4-frags-handle-possible-skb-truesize-change.patch54
-rw-r--r--patches.fixes/ipv6-defrag-drop-non-last-frags-smaller-than-min-mtu.patch61
-rw-r--r--patches.fixes/x86-xen-init-gs-very-early.patch61
-rw-r--r--patches.fixes/xen-netfront-dont-cache-skb_shinfo.patch53
-rw-r--r--patches.fixes/xen-xsa270-fix.patch56
-rw-r--r--patches.kabi/ip-drop-IPSTATS_MIB_REASM_OVERLAPS.patch51
-rw-r--r--patches.kabi/kvm_x86_ops-l1tf-kabi-fix.patch42
-rw-r--r--patches.suse/hv-netvsc-Fix-NULL-dereference-at-single-queue-mode-.patch38
-rw-r--r--patches.suse/sched-debug-Reverse-the-order-of-printing-faults.patch48
-rw-r--r--patches.suse/sched-numa-Evaluate-move-once-per-node.patch268
-rw-r--r--patches.suse/sched-numa-Modify-migrate_swap-to-accept-additional-parameters.patch114
-rw-r--r--patches.suse/sched-numa-Move-task_numa_placement-closer-to-numa_migrate_preferred.patch111
-rw-r--r--patches.suse/sched-numa-Remove-redundant-field-kabi.patch30
-rw-r--r--patches.suse/sched-numa-Remove-redundant-field.patch39
-rw-r--r--patches.suse/sched-numa-Remove-unused-task_capacity-from-struct-numa_stats.patch66
-rw-r--r--patches.suse/sched-numa-Set-preferred_node-based-on-best_cpu.patch121
-rw-r--r--patches.suse/sched-numa-Simplify-load_too_imbalanced.patch110
-rw-r--r--patches.suse/sched-numa-Skip-nodes-that-are-at-hoplimit.patch90
-rw-r--r--patches.suse/sched-numa-Update-the-scan-period-without-holding-the-numa_group-lock.patch55
-rw-r--r--patches.suse/sched-numa-Use-group_weights-to-identify-if-migration-degrades-locality.patch126
-rw-r--r--patches.suse/sched-numa-Use-task-faults-only-if-numa_group-is-not-yet-set-up.patch124
-rw-r--r--series.conf105
93 files changed, 9427 insertions, 1 deletions
diff --git a/config/x86_64/default b/config/x86_64/default
index dd43af3fbc..1a21f41c0a 100644
--- a/config/x86_64/default
+++ b/config/x86_64/default
@@ -259,6 +259,7 @@ CONFIG_PROFILING=y
CONFIG_TRACEPOINTS=y
CONFIG_CRASH_CORE=y
CONFIG_KEXEC_CORE=y
+CONFIG_HOTPLUG_SMT=y
CONFIG_OPROFILE=m
# CONFIG_OPROFILE_EVENT_MULTIPLEX is not set
CONFIG_HAVE_OPROFILE=y
diff --git a/kabi/severities b/kabi/severities
index 6623c6127d..037161b7b0 100644
--- a/kabi/severities
+++ b/kabi/severities
@@ -17,6 +17,7 @@ kvm_x86_ops FAIL
arch/x86/kvm/* PASS
arch/powerpc/kvm/* PASS
+arch/x86/kvm/* PASS
kvmppc_* PASS
# removed upstream, not included in inlines/defines
diff --git a/patches.arch/00-jump_label-reorder-hotplug-lock-and-jump_label_lock.patch b/patches.arch/00-jump_label-reorder-hotplug-lock-and-jump_label_lock.patch
new file mode 100644
index 0000000000..40f99d5acf
--- /dev/null
+++ b/patches.arch/00-jump_label-reorder-hotplug-lock-and-jump_label_lock.patch
@@ -0,0 +1,223 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:35 +0200
+Subject: jump_label: Reorder hotplug lock and jump_label_lock
+Git-commit: f2545b2d4ce13e068897ef60ae64dffe215f4152
+Patch-mainline: v4.13-rc1
+References: bsc#1089343
+
+The conversion of the hotplug locking to a percpu rwsem unearthed lock
+ordering issues all over the place.
+
+The jump_label code has two issues:
+
+ 1) Nested get_online_cpus() invocations
+
+ 2) Ordering problems vs. the cpus rwsem and the jump_label_mutex
+
+To cure these, the following lock order has been established;
+
+ cpus_rwsem -> jump_label_lock -> text_mutex
+
+Even if not all architectures need protection against CPU hotplug, taking
+cpus_rwsem before jump_label_lock is now mandatory in code pathes which
+actually modify code and therefor need text_mutex protection.
+
+Move the get_online_cpus() invocations into the core jump label code and
+establish the proper lock order where required.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: "David S. Miller" <davem@davemloft.net>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Chris Metcalf <cmetcalf@mellanox.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Jason Baron <jbaron@akamai.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Link: http://lkml.kernel.org/r/20170524081549.025830817@linutronix.de
+
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/mips/kernel/jump_label.c | 2 --
+ arch/sparc/kernel/jump_label.c | 2 --
+ arch/tile/kernel/jump_label.c | 2 --
+ arch/x86/kernel/jump_label.c | 2 --
+ kernel/jump_label.c | 20 ++++++++++++++------
+ 5 files changed, 14 insertions(+), 14 deletions(-)
+
+diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
+index 3e586daa3a32..32e3168316cd 100644
+--- a/arch/mips/kernel/jump_label.c
++++ b/arch/mips/kernel/jump_label.c
+@@ -58,7 +58,6 @@ void arch_jump_label_transform(struct jump_entry *e,
+ insn.word = 0; /* nop */
+ }
+
+- get_online_cpus();
+ mutex_lock(&text_mutex);
+ if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) {
+ insn_p->halfword[0] = insn.word >> 16;
+@@ -70,7 +69,6 @@ void arch_jump_label_transform(struct jump_entry *e,
+ (unsigned long)insn_p + sizeof(*insn_p));
+
+ mutex_unlock(&text_mutex);
+- put_online_cpus();
+ }
+
+ #endif /* HAVE_JUMP_LABEL */
+diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
+index 07933b9e9ce0..93adde1ac166 100644
+--- a/arch/sparc/kernel/jump_label.c
++++ b/arch/sparc/kernel/jump_label.c
+@@ -41,12 +41,10 @@ void arch_jump_label_transform(struct jump_entry *entry,
+ val = 0x01000000;
+ }
+
+- get_online_cpus();
+ mutex_lock(&text_mutex);
+ *insn = val;
+ flushi(insn);
+ mutex_unlock(&text_mutex);
+- put_online_cpus();
+ }
+
+ #endif
+diff --git a/arch/tile/kernel/jump_label.c b/arch/tile/kernel/jump_label.c
+index 07802d586988..93931a46625b 100644
+--- a/arch/tile/kernel/jump_label.c
++++ b/arch/tile/kernel/jump_label.c
+@@ -45,14 +45,12 @@ static void __jump_label_transform(struct jump_entry *e,
+ void arch_jump_label_transform(struct jump_entry *e,
+ enum jump_label_type type)
+ {
+- get_online_cpus();
+ mutex_lock(&text_mutex);
+
+ __jump_label_transform(e, type);
+ flush_icache_range(e->code, e->code + sizeof(tilegx_bundle_bits));
+
+ mutex_unlock(&text_mutex);
+- put_online_cpus();
+ }
+
+ __init_or_module void arch_jump_label_transform_static(struct jump_entry *e,
+diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
+index c37bd0f39c70..ab4f491da2a9 100644
+--- a/arch/x86/kernel/jump_label.c
++++ b/arch/x86/kernel/jump_label.c
+@@ -105,11 +105,9 @@ static void __jump_label_transform(struct jump_entry *entry,
+ void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+ {
+- get_online_cpus();
+ mutex_lock(&text_mutex);
+ __jump_label_transform(entry, type, NULL, 0);
+ mutex_unlock(&text_mutex);
+- put_online_cpus();
+ }
+
+ static enum {
+diff --git a/kernel/jump_label.c b/kernel/jump_label.c
+index 6c9cb208ac48..d11c506a6ac3 100644
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -15,6 +15,7 @@
+ #include <linux/static_key.h>
+ #include <linux/jump_label_ratelimit.h>
+ #include <linux/bug.h>
++#include <linux/cpu.h>
+
+ #ifdef HAVE_JUMP_LABEL
+
+@@ -124,6 +125,7 @@ void static_key_slow_inc(struct static_key *key)
+ return;
+ }
+
++ cpus_read_lock();
+ jump_label_lock();
+ if (atomic_read(&key->enabled) == 0) {
+ atomic_set(&key->enabled, -1);
+@@ -133,12 +135,14 @@ void static_key_slow_inc(struct static_key *key)
+ atomic_inc(&key->enabled);
+ }
+ jump_label_unlock();
++ cpus_read_unlock();
+ }
+ EXPORT_SYMBOL_GPL(static_key_slow_inc);
+
+ static void __static_key_slow_dec(struct static_key *key,
+ unsigned long rate_limit, struct delayed_work *work)
+ {
++ cpus_read_lock();
+ /*
+ * The negative count check is valid even when a negative
+ * key->enabled is in use by static_key_slow_inc(); a
+@@ -149,6 +153,7 @@ static void __static_key_slow_dec(struct static_key *key,
+ if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
+ WARN(atomic_read(&key->enabled) < 0,
+ "jump label: negative count!\n");
++ cpus_read_unlock();
+ return;
+ }
+
+@@ -159,6 +164,7 @@ static void __static_key_slow_dec(struct static_key *key,
+ jump_label_update(key);
+ }
+ jump_label_unlock();
++ cpus_read_unlock();
+ }
+
+ static void jump_label_update_timeout(struct work_struct *work)
+@@ -334,6 +340,7 @@ void __init jump_label_init(void)
+ if (static_key_initialized)
+ return;
+
++ cpus_read_lock();
+ jump_label_lock();
+ jump_label_sort_entries(iter_start, iter_stop);
+
+@@ -353,6 +360,7 @@ void __init jump_label_init(void)
+ }
+ static_key_initialized = true;
+ jump_label_unlock();
++ cpus_read_unlock();
+ }
+
+ #ifdef CONFIG_MODULES
+@@ -590,28 +598,28 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val,
+ struct module *mod = data;
+ int ret = 0;
+
++ cpus_read_lock();
++ jump_label_lock();
++
+ switch (val) {
+ case MODULE_STATE_COMING:
+- jump_label_lock();
+ ret = jump_label_add_module(mod);
+ if (ret) {
+ WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
+ jump_label_del_module(mod);
+ }
+- jump_label_unlock();
+ break;
+ case MODULE_STATE_GOING:
+- jump_label_lock();
+ jump_label_del_module(mod);
+- jump_label_unlock();
+ break;
+ case MODULE_STATE_LIVE:
+- jump_label_lock();
+ jump_label_invalidate_module_init(mod);
+- jump_label_unlock();
+ break;
+ }
+
++ jump_label_unlock();
++ cpus_read_unlock();
++
+ return notifier_from_errno(ret);
+ }
+
+
diff --git a/patches.arch/00-x86-cpufeatures-Add-detection-of-L1D-cache-flush-sup.patch b/patches.arch/00-x86-cpufeatures-Add-detection-of-L1D-cache-flush-sup.patch
new file mode 100644
index 0000000000..3497ca91c5
--- /dev/null
+++ b/patches.arch/00-x86-cpufeatures-Add-detection-of-L1D-cache-flush-sup.patch
@@ -0,0 +1,35 @@
+From 182a68debec3df71553dea079bad650361372e88 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 20 Jun 2018 16:42:58 -0400
+Subject: [PATCH] x86/cpufeatures: Add detection of L1D cache flush support.
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit 11e34e64e4103955fc4568750914c75d65ea87ee upstream
+
+336996-Speculative-Execution-Side-Channel-Mitigations.pdf defines a new MSR
+(IA32_FLUSH_CMD) which is detected by CPUID.7.EDX[28]=1 bit being set.
+
+This new MSR "gives software a way to invalidate structures with finer
+granularity than other architectual methods like WBINVD."
+
+A copy of this document is available at
+ https://bugzilla.kernel.org/show_bug.cgi?id=199511
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -338,6 +338,7 @@
+ #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+ #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
++#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */
+ #define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+ #define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
+
diff --git a/patches.arch/0001-x86-KVM-VMX-Initialize-the-vmx_l1d_flush_pages-conte.patch b/patches.arch/0001-x86-KVM-VMX-Initialize-the-vmx_l1d_flush_pages-conte.patch
new file mode 100644
index 0000000000..8349a1d322
--- /dev/null
+++ b/patches.arch/0001-x86-KVM-VMX-Initialize-the-vmx_l1d_flush_pages-conte.patch
@@ -0,0 +1,81 @@
+From 288d152c23dcf3c09da46c5c481903ca10ebfef7 Mon Sep 17 00:00:00 2001
+From: Nicolai Stange <nstange@suse.de>
+Date: Wed, 18 Jul 2018 19:07:38 +0200
+Subject: [PATCH] x86/KVM/VMX: Initialize the vmx_l1d_flush_pages' content
+Patch-mainline: not yet, under development
+References: bsc#1089343 CVE-2018-3646
+
+The slow path in vmx_l1d_flush() reads from vmx_l1d_flush_pages in order
+to evict the L1d cache.
+
+However, these pages are never cleared and, in theory, their data could be
+leaked.
+
+More importantly, KSM could merge a nested hypervisor's vmx_l1d_flush_pages
+to fewer than 1 << L1D_CACHE_ORDER host physical pages and this would break
+the L1d flushing algorithm: L1D on x86_64 is tagged by physical addresses.
+
+Fix this by initializing the individual vmx_l1d_flush_pages with a
+different pattern each.
+
+Rename the "empty_zp" asm constraint identifier in vmx_l1d_flush() to
+"flush_pages" to reflect this change.
+
+Fixes: a47dd5f06714 ("x86/KVM/VMX: Add L1D flush algorithm")
+Signed-off-by: Nicolai Stange <nstange@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/kvm/vmx.c | 17 ++++++++++++++---
+ 1 file changed, 14 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -217,6 +217,7 @@ static void *vmx_l1d_flush_pages;
+ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
+ {
+ struct page *page;
++ unsigned int i;
+
+ if (!enable_ept) {
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
+@@ -249,6 +250,16 @@ static int vmx_setup_l1d_flush(enum vmx_
+ if (!page)
+ return -ENOMEM;
+ vmx_l1d_flush_pages = page_address(page);
++
++ /*
++ * Initialize each page with a different pattern in
++ * order to protect against KSM in the nested
++ * virtualization case.
++ */
++ for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
++ memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
++ PAGE_SIZE);
++ }
+ }
+
+ l1tf_vmx_mitigation = l1tf;
+@@ -9180,7 +9191,7 @@ static void vmx_l1d_flush(struct kvm_vcp
+ /* First ensure the pages are in the TLB */
+ "xorl %%eax, %%eax\n"
+ ".Lpopulate_tlb:\n\t"
+- "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
++ "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
+ "addl $4096, %%eax\n\t"
+ "cmpl %%eax, %[size]\n\t"
+ "jne .Lpopulate_tlb\n\t"
+@@ -9189,12 +9200,12 @@ static void vmx_l1d_flush(struct kvm_vcp
+ /* Now fill the cache */
+ "xorl %%eax, %%eax\n"
+ ".Lfill_cache:\n"
+- "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
++ "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
+ "addl $64, %%eax\n\t"
+ "cmpl %%eax, %[size]\n\t"
+ "jne .Lfill_cache\n\t"
+ "lfence\n"
+- :: [empty_zp] "r" (vmx_l1d_flush_pages),
++ :: [flush_pages] "r" (vmx_l1d_flush_pages),
+ [size] "r" (size)
+ : "eax", "ebx", "ecx", "edx");
+ }
diff --git a/patches.arch/0001-x86-litf-Introduce-vmx-status-variable.patch b/patches.arch/0001-x86-litf-Introduce-vmx-status-variable.patch
new file mode 100644
index 0000000000..2791fcc530
--- /dev/null
+++ b/patches.arch/0001-x86-litf-Introduce-vmx-status-variable.patch
@@ -0,0 +1,175 @@
+From 1018fc06496c6c99b41aceb65e7638313b2e0d0f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:16 +0200
+Subject: [PATCH 01/11] x86/litf: Introduce vmx status variable
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit 72c6d2db64fa18c996ece8f06e499509e6c9a37e upstream
+
+Store the effective mitigation of VMX in a status variable and use it to
+report the VMX state in the l1tf sysfs file.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.433098358@linutronix.de
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/include/asm/vmx.h | 9 +++++++++
+ arch/x86/kernel/cpu/bugs.c | 34 +++++++++++++++++++++++++++++++++-
+ arch/x86/kvm/vmx.c | 22 +++++++++++-----------
+ 3 files changed, 53 insertions(+), 12 deletions(-)
+
+--- a/arch/x86/include/asm/vmx.h
++++ b/arch/x86/include/asm/vmx.h
+@@ -571,4 +571,13 @@ enum vm_instruction_error_number {
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
+ };
+
++enum vmx_l1d_flush_state {
++ VMENTER_L1D_FLUSH_AUTO,
++ VMENTER_L1D_FLUSH_NEVER,
++ VMENTER_L1D_FLUSH_COND,
++ VMENTER_L1D_FLUSH_ALWAYS,
++};
++
++extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
++
+ #endif
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -21,6 +21,7 @@
+ #include <asm/processor-flags.h>
+ #include <asm/fpu/internal.h>
+ #include <asm/msr.h>
++#include <asm/vmx.h>
+ #include <asm/paravirt.h>
+ #include <asm/alternative.h>
+ #include <asm/pgtable.h>
+@@ -211,6 +212,11 @@ static void x86_amd_ssb_disable(void)
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
+ }
+
++#if IS_ENABLED(CONFIG_KVM_INTEL)
++enum vmx_l1d_flush_state l1tf_vmx_mitigation __ro_after_init = VMENTER_L1D_FLUSH_AUTO;
++EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
++#endif
++
+ static void __init l1tf_select_mitigation(void)
+ {
+ u64 half_pa;
+@@ -705,6 +711,32 @@ void x86_spec_ctrl_setup_ap(void)
+
+ #ifdef CONFIG_SYSFS
+
++#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
++
++#if IS_ENABLED(CONFIG_KVM_INTEL)
++static const char *l1tf_vmx_states[] = {
++ [VMENTER_L1D_FLUSH_AUTO] = "auto",
++ [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
++ [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
++ [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
++};
++
++static ssize_t l1tf_show_state(char *buf)
++{
++ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
++ return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
++
++ return sprintf(buf, "%s; VMX: SMT %s, L1D %s\n", L1TF_DEFAULT_MSG,
++ cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled",
++ l1tf_vmx_states[l1tf_vmx_mitigation]);
++}
++#else
++static ssize_t l1tf_show_state(char *buf)
++{
++ return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
++}
++#endif
++
+ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+ char *buf, unsigned int bug)
+ {
+@@ -736,7 +768,7 @@ static ssize_t cpu_show_common(struct de
+
+ case X86_BUG_L1TF:
+ if (boot_cpu_has(X86_FEATURE_L1TF_FIX))
+- return sprintf(buf, "Mitigation: Page Table Inversion\n");
++ return l1tf_show_state(buf);
+ break;
+
+ default:
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -199,19 +199,13 @@ extern const ulong vmx_return;
+
+ static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
+
+-/* These MUST be in sync with vmentry_l1d_param order. */
+-enum vmx_l1d_flush_state {
+- VMENTER_L1D_FLUSH_NEVER,
+- VMENTER_L1D_FLUSH_COND,
+- VMENTER_L1D_FLUSH_ALWAYS,
+-};
+-
+ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush = VMENTER_L1D_FLUSH_COND;
+
+ static const struct {
+ const char *option;
+ enum vmx_l1d_flush_state cmd;
+ } vmentry_l1d_param[] = {
++ {"auto", VMENTER_L1D_FLUSH_AUTO},
+ {"never", VMENTER_L1D_FLUSH_NEVER},
+ {"cond", VMENTER_L1D_FLUSH_COND},
+ {"always", VMENTER_L1D_FLUSH_ALWAYS},
+@@ -12578,8 +12572,12 @@ static int __init vmx_setup_l1d_flush(vo
+ {
+ struct page *page;
+
++ if (!boot_cpu_has_bug(X86_BUG_L1TF))
++ return 0;
++
++ l1tf_vmx_mitigation = vmentry_l1d_flush;
++
+ if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
+- !boot_cpu_has_bug(X86_BUG_L1TF) ||
+ vmx_l1d_use_msr_save_list())
+ return 0;
+
+@@ -12594,12 +12592,14 @@ static int __init vmx_setup_l1d_flush(vo
+ return 0;
+ }
+
+-static void vmx_free_l1d_flush_pages(void)
++static void vmx_cleanup_l1d_flush(void)
+ {
+ if (vmx_l1d_flush_pages) {
+ free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
+ vmx_l1d_flush_pages = NULL;
+ }
++ /* Restore state so sysfs ignores VMX */
++ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+ }
+
+ static int __init vmx_init(void)
+@@ -12613,7 +12613,7 @@ static int __init vmx_init(void)
+ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+ __alignof__(struct vcpu_vmx), THIS_MODULE);
+ if (r) {
+- vmx_free_l1d_flush_pages();
++ vmx_cleanup_l1d_flush();
+ return r;
+ }
+
+@@ -12634,7 +12634,7 @@ static void __exit vmx_exit(void)
+
+ kvm_exit();
+
+- vmx_free_l1d_flush_pages();
++ vmx_cleanup_l1d_flush();
+ }
+
+ module_init(vmx_init)
diff --git a/patches.arch/0002-x86-kvm-Drop-L1TF-MSR-list-approach.patch b/patches.arch/0002-x86-kvm-Drop-L1TF-MSR-list-approach.patch
new file mode 100644
index 0000000000..3e40b6c7a2
--- /dev/null
+++ b/patches.arch/0002-x86-kvm-Drop-L1TF-MSR-list-approach.patch
@@ -0,0 +1,113 @@
+From 7ec66a2a22a6fb2e6d1359c23ef223e41ce85517 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:17 +0200
+Subject: [PATCH 02/11] x86/kvm: Drop L1TF MSR list approach
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit 2f055947ae5e2741fb2dc5bba1033c417ccf4faa upstream
+
+The VMX module parameter to control the L1D flush should become
+writeable.
+
+The MSR list is set up at VM init per guest VCPU, but the run time
+switching is based on a static key which is global. Toggling the MSR list
+at run time might be feasible, but for now drop this optimization and use
+the regular MSR write to make run-time switching possible.
+
+The default mitigation is the conditional flush anyway, so for extra
+paranoid setups this will add some small overhead, but the extra code
+executed is in the noise compared to the flush itself.
+
+Aside of that the EPT disabled case is not handled correctly at the moment
+and the MSR list magic is in the way for fixing that as well.
+
+If it's really providing a significant advantage, then this needs to be
+revisited after the code is correct and the control is writable.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.516940445@linutronix.de
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 43 +++++++------------------------------------
+ 1 file changed, 7 insertions(+), 36 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -5710,16 +5710,6 @@ static void ept_set_mmio_spte_mask(void)
+ VMX_EPT_MISCONFIG_WX_VALUE);
+ }
+
+-static bool vmx_l1d_use_msr_save_list(void)
+-{
+- if (!enable_ept || !boot_cpu_has_bug(X86_BUG_L1TF) ||
+- static_cpu_has(X86_FEATURE_HYPERVISOR) ||
+- !static_cpu_has(X86_FEATURE_FLUSH_L1D))
+- return false;
+-
+- return vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
+-}
+-
+ #define VMX_XSS_EXIT_BITMAP 0
+ /*
+ * Sets up the vmcs for emulated real mode.
+@@ -6067,12 +6057,6 @@ static void vmx_set_nmi_mask(struct kvm_
+ vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
+ }
+- /*
+- * If flushing the L1D cache on every VMENTER is enforced and the
+- * MSR is available, use the MSR save list.
+- */
+- if (vmx_l1d_use_msr_save_list())
+- add_atomic_switch_msr(vmx, MSR_IA32_FLUSH_CMD, L1D_FLUSH, 0, true);
+ }
+
+ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
+@@ -9096,26 +9080,14 @@ static void vmx_l1d_flush(struct kvm_vcp
+ bool always;
+
+ /*
+- * This code is only executed when:
+- * - the flush mode is 'cond'
+- * - the flush mode is 'always' and the flush MSR is not
+- * available
+- *
+- * If the CPU has the flush MSR then clear the flush bit because
+- * 'always' mode is handled via the MSR save list.
+- *
+- * If the MSR is not avaibable then act depending on the mitigation
+- * mode: If 'flush always', keep the flush bit set, otherwise clear
+- * it.
++ * This code is only executed when the the flush mode is 'cond' or
++ * 'always'
+ *
+- * The flush bit gets set again either from vcpu_run() or from one
+- * of the unsafe VMEXIT handlers.
++ * If 'flush always', keep the flush bit set, otherwise clear
++ * it. The flush bit gets set again either from vcpu_run() or from
++ * one of the unsafe VMEXIT handlers.
+ */
+- if (static_cpu_has(X86_FEATURE_FLUSH_L1D))
+- always = false;
+- else
+- always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
+-
++ always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
+ vcpu->arch.l1tf_flush_l1d = always;
+
+ vcpu->stat.l1d_flush++;
+@@ -12577,8 +12549,7 @@ static int __init vmx_setup_l1d_flush(vo
+
+ l1tf_vmx_mitigation = vmentry_l1d_flush;
+
+- if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
+- vmx_l1d_use_msr_save_list())
++ if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER)
+ return 0;
+
+ if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
diff --git a/patches.arch/0003-x86-l1tf-Handle-EPT-disabled-state-proper.patch b/patches.arch/0003-x86-l1tf-Handle-EPT-disabled-state-proper.patch
new file mode 100644
index 0000000000..5547b1ac8a
--- /dev/null
+++ b/patches.arch/0003-x86-l1tf-Handle-EPT-disabled-state-proper.patch
@@ -0,0 +1,131 @@
+From e7df88f07e31908829a68d29c6da50250e033ffd Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:18 +0200
+Subject: [PATCH 03/11] x86/l1tf: Handle EPT disabled state proper
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit a7b9020b06ec6d7c3f3b0d4ef1a9eba12654f4f7 upstream
+
+If Extended Page Tables (EPT) are disabled or not supported, no L1D
+flushing is required. The setup function can just avoid setting up the L1D
+flush for the EPT=n case.
+
+Invoke it after the hardware setup has be done and enable_ept has the
+correct state and expose the EPT disabled state in the mitigation status as
+well.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.612160168@linutronix.de
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/include/asm/vmx.h | 1 +
+ arch/x86/kernel/cpu/bugs.c | 9 +++++----
+ arch/x86/kvm/vmx.c | 44 ++++++++++++++++++++++++++------------------
+ 3 files changed, 32 insertions(+), 22 deletions(-)
+
+--- a/arch/x86/include/asm/vmx.h
++++ b/arch/x86/include/asm/vmx.h
+@@ -576,6 +576,7 @@ enum vmx_l1d_flush_state {
+ VMENTER_L1D_FLUSH_NEVER,
+ VMENTER_L1D_FLUSH_COND,
+ VMENTER_L1D_FLUSH_ALWAYS,
++ VMENTER_L1D_FLUSH_EPT_DISABLED,
+ };
+
+ extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -715,10 +715,11 @@ void x86_spec_ctrl_setup_ap(void)
+
+ #if IS_ENABLED(CONFIG_KVM_INTEL)
+ static const char *l1tf_vmx_states[] = {
+- [VMENTER_L1D_FLUSH_AUTO] = "auto",
+- [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
+- [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
+- [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
++ [VMENTER_L1D_FLUSH_AUTO] = "auto",
++ [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
++ [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
++ [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
++ [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
+ };
+
+ static ssize_t l1tf_show_state(char *buf)
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -12547,6 +12547,11 @@ static int __init vmx_setup_l1d_flush(vo
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
+ return 0;
+
++ if (!enable_ept) {
++ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
++ return 0;
++ }
++
+ l1tf_vmx_mitigation = vmentry_l1d_flush;
+
+ if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER)
+@@ -12573,18 +12578,35 @@ static void vmx_cleanup_l1d_flush(void)
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+ }
+
++
++static void __exit vmx_exit(void)
++{
++#ifdef CONFIG_KEXEC_CORE
++ RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
++ synchronize_rcu();
++#endif
++
++ kvm_exit();
++
++ vmx_cleanup_l1d_flush();
++}
++module_exit(vmx_exit)
++
+ static int __init vmx_init(void)
+ {
+ int r;
+
+- r = vmx_setup_l1d_flush();
++ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
++ __alignof__(struct vcpu_vmx), THIS_MODULE);
+ if (r)
+ return r;
+
+- r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+- __alignof__(struct vcpu_vmx), THIS_MODULE);
++ /*
++ * Must be called after kvm_init() so enable_ept is properly set up
++ */
++ r = vmx_setup_l1d_flush();
+ if (r) {
+- vmx_cleanup_l1d_flush();
++ vmx_exit();
+ return r;
+ }
+
+@@ -12595,18 +12617,4 @@ static int __init vmx_init(void)
+
+ return 0;
+ }
+-
+-static void __exit vmx_exit(void)
+-{
+-#ifdef CONFIG_KEXEC_CORE
+- RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
+- synchronize_rcu();
+-#endif
+-
+- kvm_exit();
+-
+- vmx_cleanup_l1d_flush();
+-}
+-
+ module_init(vmx_init)
+-module_exit(vmx_exit)
diff --git a/patches.arch/0004-x86-kvm-Move-l1tf-setup-function.patch b/patches.arch/0004-x86-kvm-Move-l1tf-setup-function.patch
new file mode 100644
index 0000000000..dc0abb043b
--- /dev/null
+++ b/patches.arch/0004-x86-kvm-Move-l1tf-setup-function.patch
@@ -0,0 +1,208 @@
+From 97b75b4eb58eeeab1422d73a6f275d6e7fe0bd34 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:19 +0200
+Subject: [PATCH 04/11] x86/kvm: Move l1tf setup function
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit 7db92e165ac814487264632ab2624e832f20ae38 upstream
+
+In preparation of allowing run time control for L1D flushing, move the
+setup code to the module parameter handler.
+
+In case of pre module init parsing, just store the value and let vmx_init()
+do the actual setup after running kvm_init() so that enable_ept is having
+the correct state.
+
+During run-time invoke it directly from the parameter setter to prepare for
+run-time control.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.694063239@linutronix.de
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 125 +++++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 78 insertions(+), 47 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -199,7 +199,8 @@ extern const ulong vmx_return;
+
+ static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
+
+-static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush = VMENTER_L1D_FLUSH_COND;
++/* Storage for pre module init parameter parsing */
++static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
+
+ static const struct {
+ const char *option;
+@@ -211,33 +212,85 @@ static const struct {
+ {"always", VMENTER_L1D_FLUSH_ALWAYS},
+ };
+
+-static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
++#define L1D_CACHE_ORDER 4
++static void *vmx_l1d_flush_pages;
++
++static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
+ {
+- unsigned int i;
++ struct page *page;
+
+- if (!s)
+- return -EINVAL;
++ /* If set to 'auto' select 'cond' */
++ if (l1tf == VMENTER_L1D_FLUSH_AUTO)
++ l1tf = VMENTER_L1D_FLUSH_COND;
+
+- for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
+- if (!strcmp(s, vmentry_l1d_param[i].option)) {
+- vmentry_l1d_flush = vmentry_l1d_param[i].cmd;
+- return 0;
+- }
++ if (!enable_ept) {
++ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
++ return 0;
+ }
+
++ if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
++ !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
++ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
++ if (!page)
++ return -ENOMEM;
++ vmx_l1d_flush_pages = page_address(page);
++ }
++
++ l1tf_vmx_mitigation = l1tf;
++
++ if (l1tf != VMENTER_L1D_FLUSH_NEVER)
++ static_branch_enable(&vmx_l1d_should_flush);
++ return 0;
++}
++
++static int vmentry_l1d_flush_parse(const char *s)
++{
++ unsigned int i;
++
++ if (s) {
++ for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
++ if (!strcmp(s, vmentry_l1d_param[i].option))
++ return vmentry_l1d_param[i].cmd;
++ }
++ }
+ return -EINVAL;
+ }
+
++static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
++{
++ int l1tf;
++
++ if (!boot_cpu_has(X86_BUG_L1TF))
++ return 0;
++
++ l1tf = vmentry_l1d_flush_parse(s);
++ if (l1tf < 0)
++ return l1tf;
++
++ /*
++ * Has vmx_init() run already? If not then this is the pre init
++ * parameter parsing. In that case just store the value and let
++ * vmx_init() do the proper setup after enable_ept has been
++ * established.
++ */
++ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
++ vmentry_l1d_flush_param = l1tf;
++ return 0;
++ }
++
++ return vmx_setup_l1d_flush(l1tf);
++}
++
+ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
+ {
+- return sprintf(s, "%s\n", vmentry_l1d_param[vmentry_l1d_flush].option);
++ return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
+ }
+
+ static const struct kernel_param_ops vmentry_l1d_flush_ops = {
+ .set = vmentry_l1d_flush_set,
+ .get = vmentry_l1d_flush_get,
+ };
+-module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, &vmentry_l1d_flush, S_IRUGO);
++module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, S_IRUGO);
+
+ #define NR_AUTOLOAD_MSRS 8
+
+@@ -9087,7 +9140,7 @@ static void vmx_l1d_flush(struct kvm_vcp
+ * it. The flush bit gets set again either from vcpu_run() or from
+ * one of the unsafe VMEXIT handlers.
+ */
+- always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
++ always = l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_ALWAYS;
+ vcpu->arch.l1tf_flush_l1d = always;
+
+ vcpu->stat.l1d_flush++;
+@@ -12540,34 +12593,6 @@ static struct kvm_x86_ops vmx_x86_ops __
+ .enable_smi_window = enable_smi_window,
+ };
+
+-static int __init vmx_setup_l1d_flush(void)
+-{
+- struct page *page;
+-
+- if (!boot_cpu_has_bug(X86_BUG_L1TF))
+- return 0;
+-
+- if (!enable_ept) {
+- l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
+- return 0;
+- }
+-
+- l1tf_vmx_mitigation = vmentry_l1d_flush;
+-
+- if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER)
+- return 0;
+-
+- if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+- page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
+- if (!page)
+- return -ENOMEM;
+- vmx_l1d_flush_pages = page_address(page);
+- }
+-
+- static_branch_enable(&vmx_l1d_should_flush);
+- return 0;
+-}
+-
+ static void vmx_cleanup_l1d_flush(void)
+ {
+ if (vmx_l1d_flush_pages) {
+@@ -12602,12 +12627,18 @@ static int __init vmx_init(void)
+ return r;
+
+ /*
+- * Must be called after kvm_init() so enable_ept is properly set up
+- */
+- r = vmx_setup_l1d_flush();
+- if (r) {
+- vmx_exit();
+- return r;
++ * Must be called after kvm_init() so enable_ept is properly set
++ * up. Hand the parameter mitigation value in which was stored in
++ * the pre module init parser. If no parameter was given, it will
++ * contain 'auto' which will be turned into the default 'cond'
++ * mitigation mode.
++ */
++ if (boot_cpu_has(X86_BUG_L1TF)) {
++ r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
++ if (r) {
++ vmx_exit();
++ return r;
++ }
+ }
+
+ #ifdef CONFIG_KEXEC_CORE
diff --git a/patches.arch/0005-x86-kvm-Add-static-key-for-flush-always.patch b/patches.arch/0005-x86-kvm-Add-static-key-for-flush-always.patch
new file mode 100644
index 0000000000..31aab4bb30
--- /dev/null
+++ b/patches.arch/0005-x86-kvm-Add-static-key-for-flush-always.patch
@@ -0,0 +1,68 @@
+From 4869e95527728ca067e0111eca899023cd6b3652 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:20 +0200
+Subject: [PATCH 05/11] x86/kvm: Add static key for flush always
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit 4c6523ec59fe895ea352a650218a6be0653910b1 upstream
+
+Avoid the conditional in the L1D flush control path.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.790914912@linutronix.de
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 16 +++++++++++-----
+ 1 file changed, 11 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -198,6 +198,7 @@ module_param(ple_window_max, int, S_IRUG
+ extern const ulong vmx_return;
+
+ static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
++static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_always);
+
+ /* Storage for pre module init parameter parsing */
+ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
+@@ -238,8 +239,12 @@ static int vmx_setup_l1d_flush(enum vmx_
+
+ l1tf_vmx_mitigation = l1tf;
+
+- if (l1tf != VMENTER_L1D_FLUSH_NEVER)
+- static_branch_enable(&vmx_l1d_should_flush);
++ if (l1tf == VMENTER_L1D_FLUSH_NEVER)
++ return 0;
++
++ static_branch_enable(&vmx_l1d_should_flush);
++ if (l1tf == VMENTER_L1D_FLUSH_ALWAYS)
++ static_branch_enable(&vmx_l1d_flush_always);
+ return 0;
+ }
+
+@@ -9130,7 +9135,6 @@ static void *vmx_l1d_flush_pages;
+ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
+ {
+ int size = PAGE_SIZE << L1D_CACHE_ORDER;
+- bool always;
+
+ /*
+ * This code is only executed when the the flush mode is 'cond' or
+@@ -9140,8 +9144,10 @@ static void vmx_l1d_flush(struct kvm_vcp
+ * it. The flush bit gets set again either from vcpu_run() or from
+ * one of the unsafe VMEXIT handlers.
+ */
+- always = l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_ALWAYS;
+- vcpu->arch.l1tf_flush_l1d = always;
++ if (static_branch_unlikely(&vmx_l1d_flush_always))
++ vcpu->arch.l1tf_flush_l1d = true;
++ else
++ vcpu->arch.l1tf_flush_l1d = false;
+
+ vcpu->stat.l1d_flush++;
+
diff --git a/patches.arch/0006-x86-kvm-Serialize-L1D-flush-parameter-setter.patch b/patches.arch/0006-x86-kvm-Serialize-L1D-flush-parameter-setter.patch
new file mode 100644
index 0000000000..1930611fa6
--- /dev/null
+++ b/patches.arch/0006-x86-kvm-Serialize-L1D-flush-parameter-setter.patch
@@ -0,0 +1,54 @@
+From 7ac0b796e89e597baabffb9a26b173ca557df876 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:21 +0200
+Subject: [PATCH 06/11] x86/kvm: Serialize L1D flush parameter setter
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit dd4bfa739a72508b75760b393d129ed7b431daab upstream
+
+Writes to the parameter files are not serialized at the sysfs core
+level, so local serialization is required.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.873642605@linutronix.de
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -199,6 +199,7 @@ extern const ulong vmx_return;
+
+ static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
+ static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_always);
++static DEFINE_MUTEX(vmx_l1d_flush_mutex);
+
+ /* Storage for pre module init parameter parsing */
+ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
+@@ -263,7 +264,7 @@ static int vmentry_l1d_flush_parse(const
+
+ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
+ {
+- int l1tf;
++ int l1tf, ret;
+
+ if (!boot_cpu_has(X86_BUG_L1TF))
+ return 0;
+@@ -283,7 +284,10 @@ static int vmentry_l1d_flush_set(const c
+ return 0;
+ }
+
+- return vmx_setup_l1d_flush(l1tf);
++ mutex_lock(&vmx_l1d_flush_mutex);
++ ret = vmx_setup_l1d_flush(l1tf);
++ mutex_unlock(&vmx_l1d_flush_mutex);
++ return ret;
+ }
+
+ static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
diff --git a/patches.arch/0007-x86-kvm-Allow-runtime-control-of-L1D-flush.patch b/patches.arch/0007-x86-kvm-Allow-runtime-control-of-L1D-flush.patch
new file mode 100644
index 0000000000..e8ee9405a3
--- /dev/null
+++ b/patches.arch/0007-x86-kvm-Allow-runtime-control-of-L1D-flush.patch
@@ -0,0 +1,78 @@
+From fa2ecdf1e0d193c282bd8bd81d9af6cbd6e9483e Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:22 +0200
+Subject: [PATCH 07/11] x86/kvm: Allow runtime control of L1D flush
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit 895ae47f9918833c3a880fbccd41e0692b37e7d9 upstream
+
+All mitigation modes can be switched at run time with a static key now:
+
+ - Use sysfs_streq() instead of strcmp() to handle the trailing new line
+ from sysfs writes correctly.
+ - Make the static key management handle multiple invocations properly.
+ - Set the module parameter file to RW
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142322.954525119@linutronix.de
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kernel/cpu/bugs.c | 2 +-
+ arch/x86/kvm/vmx.c | 13 ++++++++-----
+ 2 files changed, 9 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -213,7 +213,7 @@ static void x86_amd_ssb_disable(void)
+ }
+
+ #if IS_ENABLED(CONFIG_KVM_INTEL)
+-enum vmx_l1d_flush_state l1tf_vmx_mitigation __ro_after_init = VMENTER_L1D_FLUSH_AUTO;
++enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+ EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
+ #endif
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -240,12 +240,15 @@ static int vmx_setup_l1d_flush(enum vmx_
+
+ l1tf_vmx_mitigation = l1tf;
+
+- if (l1tf == VMENTER_L1D_FLUSH_NEVER)
+- return 0;
++ if (l1tf != VMENTER_L1D_FLUSH_NEVER)
++ static_branch_enable(&vmx_l1d_should_flush);
++ else
++ static_branch_disable(&vmx_l1d_should_flush);
+
+- static_branch_enable(&vmx_l1d_should_flush);
+ if (l1tf == VMENTER_L1D_FLUSH_ALWAYS)
+ static_branch_enable(&vmx_l1d_flush_always);
++ else
++ static_branch_disable(&vmx_l1d_flush_always);
+ return 0;
+ }
+
+@@ -255,7 +258,7 @@ static int vmentry_l1d_flush_parse(const
+
+ if (s) {
+ for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
+- if (!strcmp(s, vmentry_l1d_param[i].option))
++ if (sysfs_streq(s, vmentry_l1d_param[i].option))
+ return vmentry_l1d_param[i].cmd;
+ }
+ }
+@@ -299,7 +302,7 @@ static const struct kernel_param_ops vme
+ .set = vmentry_l1d_flush_set,
+ .get = vmentry_l1d_flush_get,
+ };
+-module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, S_IRUGO);
++module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
+
+ #define NR_AUTOLOAD_MSRS 8
+
diff --git a/patches.arch/0008-cpu-hotplug-Expose-SMT-control-init-function.patch b/patches.arch/0008-cpu-hotplug-Expose-SMT-control-init-function.patch
new file mode 100644
index 0000000000..96fcbb7c40
--- /dev/null
+++ b/patches.arch/0008-cpu-hotplug-Expose-SMT-control-init-function.patch
@@ -0,0 +1,71 @@
+From 338b2f8743e1c0ee1f4cd1ef8a02499846212733 Mon Sep 17 00:00:00 2001
+From: Jiri Kosina <jkosina@suse.cz>
+Date: Fri, 13 Jul 2018 16:23:23 +0200
+Subject: [PATCH 08/11] cpu/hotplug: Expose SMT control init function
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit 8e1b706b6e819bed215c0db16345568864660393 upstream
+
+The L1TF mitigation will gain a commend line parameter which allows to set
+a combination of hypervisor mitigation and SMT control.
+
+Expose cpu_smt_disable() so the command line parser can tweak SMT settings.
+
+[ tglx: Split out of larger patch and made it preserve an already existing
+ force off state ]
+
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142323.039715135@linutronix.de
+---
+ include/linux/cpu.h | 2 ++
+ kernel/cpu.c | 16 +++++++++++++---
+ 2 files changed, 15 insertions(+), 3 deletions(-)
+
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -186,8 +186,10 @@ enum cpuhp_smt_control {
+
+ #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
+ extern enum cpuhp_smt_control cpu_smt_control;
++extern void cpu_smt_disable(bool force);
+ #else
+ # define cpu_smt_control (CPU_SMT_ENABLED)
++static inline void cpu_smt_disable(bool force) { }
+ #endif
+
+ #endif /* _LINUX_CPU_H_ */
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -349,13 +349,23 @@ EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
+ enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
+ EXPORT_SYMBOL_GPL(cpu_smt_control);
+
+-static int __init smt_cmdline_disable(char *str)
++void __init cpu_smt_disable(bool force)
+ {
+- cpu_smt_control = CPU_SMT_DISABLED;
+- if (str && !strcmp(str, "force")) {
++ if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
++ cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
++ return;
++
++ if (force) {
+ pr_info("SMT: Force disabled\n");
+ cpu_smt_control = CPU_SMT_FORCE_DISABLED;
++ } else {
++ cpu_smt_control = CPU_SMT_DISABLED;
+ }
++}
++
++static int __init smt_cmdline_disable(char *str)
++{
++ cpu_smt_disable(str && !strcmp(str, "force"));
+ return 0;
+ }
+ early_param("nosmt", smt_cmdline_disable);
diff --git a/patches.arch/0009-cpu-hotplug-Set-CPU_SMT_NOT_SUPPORTED-early.patch b/patches.arch/0009-cpu-hotplug-Set-CPU_SMT_NOT_SUPPORTED-early.patch
new file mode 100644
index 0000000000..eae9074071
--- /dev/null
+++ b/patches.arch/0009-cpu-hotplug-Set-CPU_SMT_NOT_SUPPORTED-early.patch
@@ -0,0 +1,89 @@
+From 62e81568e375272dc52a649e841b496dd1c12a43 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:24 +0200
+Subject: [PATCH 09/11] cpu/hotplug: Set CPU_SMT_NOT_SUPPORTED early
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit fee0aede6f4739c87179eca76136f83210953b86 upstream
+
+The CPU_SMT_NOT_SUPPORTED state is set (if the processor does not support
+SMT) when the sysfs SMT control file is initialized.
+
+That was fine so far as this was only required to make the output of the
+control file correct and to prevent writes in that case.
+
+With the upcoming l1tf command line parameter, this needs to be set up
+before the L1TF mitigation selection and command line parsing happens.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142323.121795971@linutronix.de
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kernel/cpu/bugs.c | 6 ++++++
+ include/linux/cpu.h | 2 ++
+ kernel/cpu.c | 13 ++++++++++---
+ 3 files changed, 18 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -58,6 +58,12 @@ void __init check_bugs(void)
+ {
+ identify_boot_cpu();
+
++ /*
++ * identify_boot_cpu() initialized SMT support information, let the
++ * core code know.
++ */
++ cpu_smt_check_topology();
++
+ if (!IS_ENABLED(CONFIG_SMP)) {
+ pr_info("CPU: ");
+ print_cpu_info(&boot_cpu_data);
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -187,9 +187,11 @@ enum cpuhp_smt_control {
+ #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
+ extern enum cpuhp_smt_control cpu_smt_control;
+ extern void cpu_smt_disable(bool force);
++extern void cpu_smt_check_topology(void);
+ #else
+ # define cpu_smt_control (CPU_SMT_ENABLED)
+ static inline void cpu_smt_disable(bool force) { }
++static inline void cpu_smt_check_topology(void) { }
+ #endif
+
+ #endif /* _LINUX_CPU_H_ */
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -363,6 +363,16 @@ void __init cpu_smt_disable(bool force)
+ }
+ }
+
++/*
++ * The decision whether SMT is supported can only be done after the full
++ * CPU identification. Called from architecture code.
++ */
++void __init cpu_smt_check_topology(void)
++{
++ if (!topology_smt_supported())
++ cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
++}
++
+ static int __init smt_cmdline_disable(char *str)
+ {
+ cpu_smt_disable(str && !strcmp(str, "force"));
+@@ -1919,9 +1929,6 @@ static const struct attribute_group cpuh
+
+ static int __init cpu_smt_state_init(void)
+ {
+- if (!topology_smt_supported())
+- cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
+-
+ return sysfs_create_group(&cpu_subsys.dev_root->kobj,
+ &cpuhp_smt_attr_group);
+ }
diff --git a/patches.arch/0010-x86-bugs-kvm-Introduce-boot-time-control-of-L1TF-mit.patch b/patches.arch/0010-x86-bugs-kvm-Introduce-boot-time-control-of-L1TF-mit.patch
new file mode 100644
index 0000000000..28c3e73793
--- /dev/null
+++ b/patches.arch/0010-x86-bugs-kvm-Introduce-boot-time-control-of-L1TF-mit.patch
@@ -0,0 +1,370 @@
+From 2edd564f0dbf8f19717786c31776aa5e06b42253 Mon Sep 17 00:00:00 2001
+From: Jiri Kosina <jkosina@suse.cz>
+Date: Fri, 13 Jul 2018 16:23:25 +0200
+Subject: [PATCH 10/11] x86/bugs, kvm: Introduce boot-time control of L1TF
+ mitigations
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit d90a7a0ec83fb86622cd7dae23255d3c50a99ec8 upstream
+
+Introduce the 'l1tf=' kernel command line option to allow for boot-time
+switching of mitigation that is used on processors affected by L1TF.
+
+The possible values are:
+
+ full
+ Provides all available mitigations for the L1TF vulnerability. Disables
+ SMT and enables all mitigations in the hypervisors. SMT control via
+ /sys/devices/system/cpu/smt/control is still possible after boot.
+ Hypervisors will issue a warning when the first VM is started in
+ a potentially insecure configuration, i.e. SMT enabled or L1D flush
+ disabled.
+
+ full,force
+ Same as 'full', but disables SMT control. Implies the 'nosmt=force'
+ command line option. sysfs control of SMT and the hypervisor flush
+ control is disabled.
+
+ flush
+ Leaves SMT enabled and enables the conditional hypervisor mitigation.
+ Hypervisors will issue a warning when the first VM is started in a
+ potentially insecure configuration, i.e. SMT enabled or L1D flush
+ disabled.
+
+ flush,nosmt
+ Disables SMT and enables the conditional hypervisor mitigation. SMT
+ control via /sys/devices/system/cpu/smt/control is still possible
+ after boot. If SMT is reenabled or flushing disabled at runtime
+ hypervisors will issue a warning.
+
+ flush,nowarn
+ Same as 'flush', but hypervisors will not warn when
+ a VM is started in a potentially insecure configuration.
+
+ off
+ Disables hypervisor mitigations and doesn't emit any warnings.
+
+Default is 'flush'.
+
+Let KVM adhere to these semantics, which means:
+
+ - 'lt1f=full,force' : Performe L1D flushes. No runtime control
+ possible.
+
+ - 'l1tf=full'
+ - 'l1tf-flush'
+ - 'l1tf=flush,nosmt' : Perform L1D flushes and warn on VM start if
+ SMT has been runtime enabled or L1D flushing
+ has been run-time enabled
+
+ - 'l1tf=flush,nowarn' : Perform L1D flushes and no warnings are emitted.
+
+ - 'l1tf=off' : L1D flushes are not performed and no warnings
+ are emitted.
+
+KVM can always override the L1D flushing behavior using its 'vmentry_l1d_flush'
+module parameter except when lt1f=full,force is set.
+
+This makes KVM's private 'nosmt' option redundant, and as it is a bit
+non-systematic anyway (this is something to control globally, not on
+hypervisor level), remove that option.
+
+Add the missing Documentation entry for the l1tf vulnerability sysfs file
+while at it.
+
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20180713142323.202758176@linutronix.de
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu | 4 +
+ Documentation/admin-guide/kernel-parameters.txt | 68 +++++++++++++++++++--
+ arch/x86/include/asm/processor.h | 12 +++
+ arch/x86/kernel/cpu/bugs.c | 45 +++++++++++++
+ arch/x86/kvm/vmx.c | 56 +++++++++++++----
+ 5 files changed, 166 insertions(+), 19 deletions(-)
+
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -963,4 +963,16 @@ bool xen_set_default_idle(void);
+
+ void stop_this_cpu(void *dummy);
+ void df_debug(struct pt_regs *regs, long error_code);
++
++enum l1tf_mitigations {
++ L1TF_MITIGATION_OFF,
++ L1TF_MITIGATION_FLUSH_NOWARN,
++ L1TF_MITIGATION_FLUSH,
++ L1TF_MITIGATION_FLUSH_NOSMT,
++ L1TF_MITIGATION_FULL,
++ L1TF_MITIGATION_FULL_FORCE
++};
++
++extern enum l1tf_mitigations l1tf_mitigation;
++
+ #endif /* _ASM_X86_PROCESSOR_H */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -218,7 +218,11 @@ static void x86_amd_ssb_disable(void)
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
+ }
+
++/* Default mitigation for L1TF-affected CPUs */
++enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
+ #if IS_ENABLED(CONFIG_KVM_INTEL)
++EXPORT_SYMBOL_GPL(l1tf_mitigation);
++
+ enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
+ EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
+ #endif
+@@ -230,6 +234,20 @@ static void __init l1tf_select_mitigatio
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
+ return;
+
++ switch (l1tf_mitigation) {
++ case L1TF_MITIGATION_OFF:
++ case L1TF_MITIGATION_FLUSH_NOWARN:
++ case L1TF_MITIGATION_FLUSH:
++ break;
++ case L1TF_MITIGATION_FLUSH_NOSMT:
++ case L1TF_MITIGATION_FULL:
++ cpu_smt_disable(false);
++ break;
++ case L1TF_MITIGATION_FULL_FORCE:
++ cpu_smt_disable(true);
++ break;
++ }
++
+ #if CONFIG_PGTABLE_LEVELS == 2
+ pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
+ return;
+@@ -249,6 +267,33 @@ static void __init l1tf_select_mitigatio
+ setup_force_cpu_cap(X86_FEATURE_L1TF_FIX);
+ }
+
++
++static int __init l1tf_cmdline(char *str)
++{
++ if (!boot_cpu_has_bug(X86_BUG_L1TF))
++ return 0;
++
++ if (!str)
++ return -EINVAL;
++
++ if (!strcmp(str, "off"))
++ l1tf_mitigation = L1TF_MITIGATION_OFF;
++ else if (!strcmp(str, "flush,nowarn"))
++ l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
++ else if (!strcmp(str, "flush"))
++ l1tf_mitigation = L1TF_MITIGATION_FLUSH;
++ else if (!strcmp(str, "flush,nosmt"))
++ l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
++ else if (!strcmp(str, "full"))
++ l1tf_mitigation = L1TF_MITIGATION_FULL;
++ else if (!strcmp(str, "full,force"))
++ l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
++
++ return 0;
++}
++early_param("l1tf", l1tf_cmdline);
++
++
+ #ifdef RETPOLINE
+ static bool spectre_v2_bad_module;
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -70,9 +70,6 @@ static const struct x86_cpu_id vmx_cpu_i
+ };
+ MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
+
+-static bool __read_mostly nosmt;
+-module_param(nosmt, bool, S_IRUGO);
+-
+ static bool __read_mostly enable_vpid = 1;
+ module_param_named(vpid, enable_vpid, bool, 0444);
+
+@@ -221,15 +218,31 @@ static int vmx_setup_l1d_flush(enum vmx_
+ {
+ struct page *page;
+
+- /* If set to 'auto' select 'cond' */
+- if (l1tf == VMENTER_L1D_FLUSH_AUTO)
+- l1tf = VMENTER_L1D_FLUSH_COND;
+-
+ if (!enable_ept) {
+ l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
+ return 0;
+ }
+
++ /* If set to auto use the default l1tf mitigation method */
++ if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
++ switch (l1tf_mitigation) {
++ case L1TF_MITIGATION_OFF:
++ l1tf = VMENTER_L1D_FLUSH_NEVER;
++ break;
++ case L1TF_MITIGATION_FLUSH_NOWARN:
++ case L1TF_MITIGATION_FLUSH:
++ case L1TF_MITIGATION_FLUSH_NOSMT:
++ l1tf = VMENTER_L1D_FLUSH_COND;
++ break;
++ case L1TF_MITIGATION_FULL:
++ case L1TF_MITIGATION_FULL_FORCE:
++ l1tf = VMENTER_L1D_FLUSH_ALWAYS;
++ break;
++ }
++ } else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
++ l1tf = VMENTER_L1D_FLUSH_ALWAYS;
++ }
++
+ if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
+ !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
+@@ -10037,16 +10050,33 @@ free_vcpu:
+ return ERR_PTR(err);
+ }
+
+-#define L1TF_MSG "SMT enabled with L1TF CPU bug present. Refer to CVE-2018-3620 for details.\n"
++#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
++#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
+
+ static int vmx_vm_init(struct kvm *kvm)
+ {
+- if (boot_cpu_has(X86_BUG_L1TF) && cpu_smt_control == CPU_SMT_ENABLED) {
+- if (nosmt) {
+- pr_err(L1TF_MSG);
+- return -EOPNOTSUPP;
++ if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
++ switch (l1tf_mitigation) {
++ case L1TF_MITIGATION_OFF:
++ case L1TF_MITIGATION_FLUSH_NOWARN:
++ /* 'I explicitly don't care' is set */
++ break;
++ case L1TF_MITIGATION_FLUSH:
++ case L1TF_MITIGATION_FLUSH_NOSMT:
++ case L1TF_MITIGATION_FULL:
++ /*
++ * Warn upon starting the first VM in a potentially
++ * insecure environment.
++ */
++ if (cpu_smt_control == CPU_SMT_ENABLED)
++ pr_warn_once(L1TF_MSG_SMT);
++ if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
++ pr_warn_once(L1TF_MSG_L1D);
++ break;
++ case L1TF_MITIGATION_FULL_FORCE:
++ /* Flush is enforced */
++ break;
+ }
+- pr_warn(L1TF_MSG);
+ }
+ return 0;
+ }
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -379,6 +379,7 @@ What: /sys/devices/system/cpu/vulnerabi
+ /sys/devices/system/cpu/vulnerabilities/spectre_v1
+ /sys/devices/system/cpu/vulnerabilities/spectre_v2
+ /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
++ /sys/devices/system/cpu/vulnerabilities/l1tf
+ Date: January 2018
+ Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+ Description: Information about CPU vulnerabilities
+@@ -391,6 +392,9 @@ Description: Information about CPU vulne
+ "Vulnerable" CPU is affected and no mitigation in effect
+ "Mitigation: $M" CPU is affected and mitigation $M is in effect
+
++ Details about the l1tf file can be found in
++ Documentation/admin-guide/l1tf.rst
++
+ What: /sys/devices/system/cpu/smt
+ /sys/devices/system/cpu/smt/active
+ /sys/devices/system/cpu/smt/control
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -1860,12 +1860,6 @@
+ [KVM,ARM] Allow use of GICv4 for direct injection of
+ LPIs.
+
+- kvm-intel.nosmt=[KVM,Intel] If the L1TF CPU bug is present (CVE-2018-3620)
+- and the system has SMT (aka Hyper-Threading) enabled then
+- don't allow guests to be created.
+-
+- Default is 0 (allow guests to be created).
+-
+ kvm-intel.ept= [KVM,Intel] Disable extended page tables
+ (virtualized MMU) support on capable Intel chips.
+ Default is 1 (enabled)
+@@ -1907,6 +1901,68 @@
+ Disables the paravirtualized spinlock slowpath
+ optimizations for KVM.
+
++ l1tf= [X86] Control mitigation of the L1TF vulnerability on
++ affected CPUs
++
++ The kernel PTE inversion protection is unconditionally
++ enabled and cannot be disabled.
++
++ full
++ Provides all available mitigations for the
++ L1TF vulnerability. Disables SMT and
++ enables all mitigations in the
++ hypervisors, i.e. unconditional L1D flush.
++
++ SMT control and L1D flush control via the
++ sysfs interface is still possible after
++ boot. Hypervisors will issue a warning
++ when the first VM is started in a
++ potentially insecure configuration,
++ i.e. SMT enabled or L1D flush disabled.
++
++ full,force
++ Same as 'full', but disables SMT and L1D
++ flush runtime control. Implies the
++ 'nosmt=force' command line option.
++ (i.e. sysfs control of SMT is disabled.)
++
++ flush
++ Leaves SMT enabled and enables the default
++ hypervisor mitigation, i.e. conditional
++ L1D flush.
++
++ SMT control and L1D flush control via the
++ sysfs interface is still possible after
++ boot. Hypervisors will issue a warning
++ when the first VM is started in a
++ potentially insecure configuration,
++ i.e. SMT enabled or L1D flush disabled.
++
++ flush,nosmt
++
++ Disables SMT and enables the default
++ hypervisor mitigation.
++
++ SMT control and L1D flush control via the
++ sysfs interface is still possible after
++ boot. Hypervisors will issue a warning
++ when the first VM is started in a
++ potentially insecure configuration,
++ i.e. SMT enabled or L1D flush disabled.
++
++ flush,nowarn
++ Same as 'flush', but hypervisors will not
++ warn when a VM is started in a potentially
++ insecure configuration.
++
++ off
++ Disables hypervisor mitigations and doesn't
++ emit any warnings.
++
++ Default is 'flush'.
++
++ For details see: Documentation/admin-guide/l1tf.rst
++
+ l2cr= [PPC]
+
+ l3cr= [PPC]
diff --git a/patches.arch/0011-Documentation-Add-section-about-CPU-vulnerabilities.patch b/patches.arch/0011-Documentation-Add-section-about-CPU-vulnerabilities.patch
new file mode 100644
index 0000000000..a6212395ab
--- /dev/null
+++ b/patches.arch/0011-Documentation-Add-section-about-CPU-vulnerabilities.patch
@@ -0,0 +1,641 @@
+From b4f6a2228077ea61b5944835cc67aba83cc9e82d Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Jul 2018 16:23:26 +0200
+Subject: [PATCH 11/11] Documentation: Add section about CPU vulnerabilities
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+
+commit 3ec8ce5d866ec6a08a9cfab82b62acf4a830b35f upstream
+
+Add documentation for the L1TF vulnerability and the mitigation mechanisms:
+
+ - Explain the problem and risks
+ - Document the mitigation mechanisms
+ - Document the command line controls
+ - Document the sysfs files
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Link: https://lkml.kernel.org/r/20180713142323.287429944@linutronix.de
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ Documentation/admin-guide/index.rst | 9
+ Documentation/admin-guide/l1tf.rst | 591 ++++++++++++++++++++++++++++++++++++
+ 2 files changed, 600 insertions(+)
+ create mode 100644 Documentation/admin-guide/l1tf.rst
+
+--- a/Documentation/admin-guide/index.rst
++++ b/Documentation/admin-guide/index.rst
+@@ -17,6 +17,15 @@ etc.
+ kernel-parameters
+ devices
+
++This section describes CPU vulnerabilities and provides an overview of the
++possible mitigations along with guidance for selecting mitigations if they
++are configurable at compile, boot or run time.
++
++.. toctree::
++ :maxdepth: 1
++
++ l1tf
++
+ Here is a set of documents aimed at users who are trying to track down
+ problems and bugs in particular.
+
+--- /dev/null
++++ b/Documentation/admin-guide/l1tf.rst
+@@ -0,0 +1,591 @@
++L1TF - L1 Terminal Fault
++========================
++
++L1 Terminal Fault is a hardware vulnerability which allows unprivileged
++speculative access to data which is available in the Level 1 Data Cache
++when the page table entry controlling the virtual address, which is used
++for the access, has the Present bit cleared or other reserved bits set.
++
++Affected processors
++-------------------
++
++This vulnerability affects a wide range of Intel processors. The
++vulnerability is not present on:
++
++ - Processors from AMD, Centaur and other non Intel vendors
++
++ - Older processor models, where the CPU family is < 6
++
++ - A range of Intel ATOM processors (Cedarview, Cloverview, Lincroft,
++ Penwell, Pineview, Slivermont, Airmont, Merrifield)
++
++ - The Intel Core Duo Yonah variants (2006 - 2008)
++
++ - The Intel XEON PHI family
++
++ - Intel processors which have the ARCH_CAP_RDCL_NO bit set in the
++ IA32_ARCH_CAPABILITIES MSR. If the bit is set the CPU is not affected
++ by the Meltdown vulnerability either. These CPUs should become
++ available by end of 2018.
++
++Whether a processor is affected or not can be read out from the L1TF
++vulnerability file in sysfs. See :ref:`l1tf_sys_info`.
++
++Related CVEs
++------------
++
++The following CVE entries are related to the L1TF vulnerability:
++
++ ============= ================= ==============================
++ CVE-2018-3615 L1 Terminal Fault SGX related aspects
++ CVE-2018-3620 L1 Terminal Fault OS, SMM related aspects
++ CVE-2018-3646 L1 Terminal Fault Virtualization related aspects
++ ============= ================= ==============================
++
++Problem
++-------
++
++If an instruction accesses a virtual address for which the relevant page
++table entry (PTE) has the Present bit cleared or other reserved bits set,
++then speculative execution ignores the invalid PTE and loads the referenced
++data if it is present in the Level 1 Data Cache, as if the page referenced
++by the address bits in the PTE was still present and accessible.
++
++While this is a purely speculative mechanism and the instruction will raise
++a page fault when it is retired eventually, the pure act of loading the
++data and making it available to other speculative instructions opens up the
++opportunity for side channel attacks to unprivileged malicious code,
++similar to the Meltdown attack.
++
++While Meltdown breaks the user space to kernel space protection, L1TF
++allows to attack any physical memory address in the system and the attack
++works across all protection domains. It allows an attack of SGX and also
++works from inside virtual machines because the speculation bypasses the
++extended page table (EPT) protection mechanism.
++
++
++Attack scenarios
++----------------
++
++1. Malicious user space
++^^^^^^^^^^^^^^^^^^^^^^^
++
++ Operating Systems store arbitrary information in the address bits of a
++ PTE which is marked non present. This allows a malicious user space
++ application to attack the physical memory to which these PTEs resolve.
++ In some cases user-space can maliciously influence the information
++ encoded in the address bits of the PTE, thus making attacks more
++ deterministic and more practical.
++
++ The Linux kernel contains a mitigation for this attack vector, PTE
++ inversion, which is permanently enabled and has no performance
++ impact. The kernel ensures that the address bits of PTEs, which are not
++ marked present, never point to cacheable physical memory space.
++
++ A system with an up to date kernel is protected against attacks from
++ malicious user space applications.
++
++2. Malicious guest in a virtual machine
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++ The fact that L1TF breaks all domain protections allows malicious guest
++ OSes, which can control the PTEs directly, and malicious guest user
++ space applications, which run on an unprotected guest kernel lacking the
++ PTE inversion mitigation for L1TF, to attack physical host memory.
++
++ A special aspect of L1TF in the context of virtualization is symmetric
++ multi threading (SMT). The Intel implementation of SMT is called
++ HyperThreading. The fact that Hyperthreads on the affected processors
++ share the L1 Data Cache (L1D) is important for this. As the flaw allows
++ only to attack data which is present in L1D, a malicious guest running
++ on one Hyperthread can attack the data which is brought into the L1D by
++ the context which runs on the sibling Hyperthread of the same physical
++ core. This context can be host OS, host user space or a different guest.
++
++ If the processor does not support Extended Page Tables, the attack is
++ only possible, when the hypervisor does not sanitize the content of the
++ effective (shadow) page tables.
++
++ While solutions exist to mitigate these attack vectors fully, these
++ mitigations are not enabled by default in the Linux kernel because they
++ can affect performance significantly. The kernel provides several
++ mechanisms which can be utilized to address the problem depending on the
++ deployment scenario. The mitigations, their protection scope and impact
++ are described in the next sections.
++
++ The default mitigations and the rationale for chosing them are explained
++ at the end of this document. See :ref:`default_mitigations`.
++
++.. _l1tf_sys_info:
++
++L1TF system information
++-----------------------
++
++The Linux kernel provides a sysfs interface to enumerate the current L1TF
++status of the system: whether the system is vulnerable, and which
++mitigations are active. The relevant sysfs file is:
++
++/sys/devices/system/cpu/vulnerabilities/l1tf
++
++The possible values in this file are:
++
++ =========================== ===============================
++ 'Not affected' The processor is not vulnerable
++ 'Mitigation: PTE Inversion' The host protection is active
++ =========================== ===============================
++
++If KVM/VMX is enabled and the processor is vulnerable then the following
++information is appended to the 'Mitigation: PTE Inversion' part:
++
++ - SMT status:
++
++ ===================== ================
++ 'VMX: SMT vulnerable' SMT is enabled
++ 'VMX: SMT disabled' SMT is disabled
++ ===================== ================
++
++ - L1D Flush mode:
++
++ ================================ ====================================
++ 'L1D vulnerable' L1D flushing is disabled
++
++ 'L1D conditional cache flushes' L1D flush is conditionally enabled
++
++ 'L1D cache flushes' L1D flush is unconditionally enabled
++ ================================ ====================================
++
++The resulting grade of protection is discussed in the following sections.
++
++
++Host mitigation mechanism
++-------------------------
++
++The kernel is unconditionally protected against L1TF attacks from malicious
++user space running on the host.
++
++
++Guest mitigation mechanisms
++---------------------------
++
++.. _l1d_flush:
++
++1. L1D flush on VMENTER
++^^^^^^^^^^^^^^^^^^^^^^^
++
++ To make sure that a guest cannot attack data which is present in the L1D
++ the hypervisor flushes the L1D before entering the guest.
++
++ Flushing the L1D evicts not only the data which should not be accessed
++ by a potentially malicious guest, it also flushes the guest
++ data. Flushing the L1D has a performance impact as the processor has to
++ bring the flushed guest data back into the L1D. Depending on the
++ frequency of VMEXIT/VMENTER and the type of computations in the guest
++ performance degradation in the range of 1% to 50% has been observed. For
++ scenarios where guest VMEXIT/VMENTER are rare the performance impact is
++ minimal. Virtio and mechanisms like posted interrupts are designed to
++ confine the VMEXITs to a bare minimum, but specific configurations and
++ application scenarios might still suffer from a high VMEXIT rate.
++
++ The kernel provides two L1D flush modes:
++ - conditional ('cond')
++ - unconditional ('always')
++
++ The conditional mode avoids L1D flushing after VMEXITs which execute
++ only audited code pathes before the corresponding VMENTER. These code
++ pathes have beed verified that they cannot expose secrets or other
++ interesting data to an attacker, but they can leak information about the
++ address space layout of the hypervisor.
++
++ Unconditional mode flushes L1D on all VMENTER invocations and provides
++ maximum protection. It has a higher overhead than the conditional
++ mode. The overhead cannot be quantified correctly as it depends on the
++ work load scenario and the resulting number of VMEXITs.
++
++ The general recommendation is to enable L1D flush on VMENTER. The kernel
++ defaults to conditional mode on affected processors.
++
++ **Note**, that L1D flush does not prevent the SMT problem because the
++ sibling thread will also bring back its data into the L1D which makes it
++ attackable again.
++
++ L1D flush can be controlled by the administrator via the kernel command
++ line and sysfs control files. See :ref:`mitigation_control_command_line`
++ and :ref:`mitigation_control_kvm`.
++
++.. _guest_confinement:
++
++2. Guest VCPU confinement to dedicated physical cores
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++ To address the SMT problem, it is possible to make a guest or a group of
++ guests affine to one or more physical cores. The proper mechanism for
++ that is to utilize exclusive cpusets to ensure that no other guest or
++ host tasks can run on these cores.
++
++ If only a single guest or related guests run on sibling SMT threads on
++ the same physical core then they can only attack their own memory and
++ restricted parts of the host memory.
++
++ Host memory is attackable, when one of the sibling SMT threads runs in
++ host OS (hypervisor) context and the other in guest context. The amount
++ of valuable information from the host OS context depends on the context
++ which the host OS executes, i.e. interrupts, soft interrupts and kernel
++ threads. The amount of valuable data from these contexts cannot be
++ declared as non-interesting for an attacker without deep inspection of
++ the code.
++
++ **Note**, that assigning guests to a fixed set of physical cores affects
++ the ability of the scheduler to do load balancing and might have
++ negative effects on CPU utilization depending on the hosting
++ scenario. Disabling SMT might be a viable alternative for particular
++ scenarios.
++
++ For further information about confining guests to a single or to a group
++ of cores consult the cpusets documentation:
++
++ https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt
++
++.. _interrupt_isolation:
++
++3. Interrupt affinity
++^^^^^^^^^^^^^^^^^^^^^
++
++ Interrupts can be made affine to logical CPUs. This is not universally
++ true because there are types of interrupts which are truly per CPU
++ interrupts, e.g. the local timer interrupt. Aside of that multi queue
++ devices affine their interrupts to single CPUs or groups of CPUs per
++ queue without allowing the administrator to control the affinities.
++
++ Moving the interrupts, which can be affinity controlled, away from CPUs
++ which run untrusted guests, reduces the attack vector space.
++
++ Whether the interrupts with are affine to CPUs, which run untrusted
++ guests, provide interesting data for an attacker depends on the system
++ configuration and the scenarios which run on the system. While for some
++ of the interrupts it can be assumed that they wont expose interesting
++ information beyond exposing hints about the host OS memory layout, there
++ is no way to make general assumptions.
++
++ Interrupt affinity can be controlled by the administrator via the
++ /proc/irq/$NR/smp_affinity[_list] files. Limited documentation is
++ available at:
++
++ https://www.kernel.org/doc/Documentation/IRQ-affinity.txt
++
++.. _smt_control:
++
++4. SMT control
++^^^^^^^^^^^^^^
++
++ To prevent the SMT issues of L1TF it might be necessary to disable SMT
++ completely. Disabling SMT can have a significant performance impact, but
++ the impact depends on the hosting scenario and the type of workloads.
++ The impact of disabling SMT needs also to be weighted against the impact
++ of other mitigation solutions like confining guests to dedicated cores.
++
++ The kernel provides a sysfs interface to retrieve the status of SMT and
++ to control it. It also provides a kernel command line interface to
++ control SMT.
++
++ The kernel command line interface consists of the following options:
++
++ =========== ==========================================================
++ nosmt Affects the bring up of the secondary CPUs during boot. The
++ kernel tries to bring all present CPUs online during the
++ boot process. "nosmt" makes sure that from each physical
++ core only one - the so called primary (hyper) thread is
++ activated. Due to a design flaw of Intel processors related
++ to Machine Check Exceptions the non primary siblings have
++ to be brought up at least partially and are then shut down
++ again. "nosmt" can be undone via the sysfs interface.
++
++ nosmt=force Has the same effect as "nosmt' but it does not allow to
++ undo the SMT disable via the sysfs interface.
++ =========== ==========================================================
++
++ The sysfs interface provides two files:
++
++ - /sys/devices/system/cpu/smt/control
++ - /sys/devices/system/cpu/smt/active
++
++ /sys/devices/system/cpu/smt/control:
++
++ This file allows to read out the SMT control state and provides the
++ ability to disable or (re)enable SMT. The possible states are:
++
++ ============== ===================================================
++ on SMT is supported by the CPU and enabled. All
++ logical CPUs can be onlined and offlined without
++ restrictions.
++
++ off SMT is supported by the CPU and disabled. Only
++ the so called primary SMT threads can be onlined
++ and offlined without restrictions. An attempt to
++ online a non-primary sibling is rejected
++
++ forceoff Same as 'off' but the state cannot be controlled.
++ Attempts to write to the control file are rejected.
++
++ notsupported The processor does not support SMT. It's therefore
++ not affected by the SMT implications of L1TF.
++ Attempts to write to the control file are rejected.
++ ============== ===================================================
++
++ The possible states which can be written into this file to control SMT
++ state are:
++
++ - on
++ - off
++ - forceoff
++
++ /sys/devices/system/cpu/smt/active:
++
++ This file reports whether SMT is enabled and active, i.e. if on any
++ physical core two or more sibling threads are online.
++
++ SMT control is also possible at boot time via the l1tf kernel command
++ line parameter in combination with L1D flush control. See
++ :ref:`mitigation_control_command_line`.
++
++5. Disabling EPT
++^^^^^^^^^^^^^^^^
++
++ Disabling EPT for virtual machines provides full mitigation for L1TF even
++ with SMT enabled, because the effective page tables for guests are
++ managed and sanitized by the hypervisor. Though disabling EPT has a
++ significant performance impact especially when the Meltdown mitigation
++ KPTI is enabled.
++
++ EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter.
++
++There is ongoing research and development for new mitigation mechanisms to
++address the performance impact of disabling SMT or EPT.
++
++.. _mitigation_control_command_line:
++
++Mitigation control on the kernel command line
++---------------------------------------------
++
++The kernel command line allows to control the L1TF mitigations at boot
++time with the option "l1tf=". The valid arguments for this option are:
++
++ ============ =============================================================
++ full Provides all available mitigations for the L1TF
++ vulnerability. Disables SMT and enables all mitigations in
++ the hypervisors, i.e. unconditional L1D flushing
++
++ SMT control and L1D flush control via the sysfs interface
++ is still possible after boot. Hypervisors will issue a
++ warning when the first VM is started in a potentially
++ insecure configuration, i.e. SMT enabled or L1D flush
++ disabled.
++
++ full,force Same as 'full', but disables SMT and L1D flush runtime
++ control. Implies the 'nosmt=force' command line option.
++ (i.e. sysfs control of SMT is disabled.)
++
++ flush Leaves SMT enabled and enables the default hypervisor
++ mitigation, i.e. conditional L1D flushing
++
++ SMT control and L1D flush control via the sysfs interface
++ is still possible after boot. Hypervisors will issue a
++ warning when the first VM is started in a potentially
++ insecure configuration, i.e. SMT enabled or L1D flush
++ disabled.
++
++ flush,nosmt Disables SMT and enables the default hypervisor mitigation,
++ i.e. conditional L1D flushing.
++
++ SMT control and L1D flush control via the sysfs interface
++ is still possible after boot. Hypervisors will issue a
++ warning when the first VM is started in a potentially
++ insecure configuration, i.e. SMT enabled or L1D flush
++ disabled.
++
++ flush,nowarn Same as 'flush', but hypervisors will not warn when a VM is
++ started in a potentially insecure configuration.
++
++ off Disables hypervisor mitigations and doesn't emit any
++ warnings.
++ ============ =============================================================
++
++The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
++
++
++.. _mitigation_control_kvm:
++
++Mitigation control for KVM - module parameter
++-------------------------------------------------------------
++
++The KVM hypervisor mitigation mechanism, flushing the L1D cache when
++entering a guest, can be controlled with a module parameter.
++
++The option/parameter is "kvm-intel.vmentry_l1d_flush=". It takes the
++following arguments:
++
++ ============ ==============================================================
++ always L1D cache flush on every VMENTER.
++
++ cond Flush L1D on VMENTER only when the code between VMEXIT and
++ VMENTER can leak host memory which is considered
++ interesting for an attacker. This still can leak host memory
++ which allows e.g. to determine the hosts address space layout.
++
++ never Disables the mitigation
++ ============ ==============================================================
++
++The parameter can be provided on the kernel command line, as a module
++parameter when loading the modules and at runtime modified via the sysfs
++file:
++
++/sys/module/kvm_intel/parameters/vmentry_l1d_flush
++
++The default is 'cond'. If 'l1tf=full,force' is given on the kernel command
++line, then 'always' is enforced and the kvm-intel.vmentry_l1d_flush
++module parameter is ignored and writes to the sysfs file are rejected.
++
++
++Mitigation selection guide
++--------------------------
++
++1. No virtualization in use
++^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++ The system is protected by the kernel unconditionally and no further
++ action is required.
++
++2. Virtualization with trusted guests
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++ If the guest comes from a trusted source and the guest OS kernel is
++ guaranteed to have the L1TF mitigations in place the system is fully
++ protected against L1TF and no further action is required.
++
++ To avoid the overhead of the default L1D flushing on VMENTER the
++ administrator can disable the flushing via the kernel command line and
++ sysfs control files. See :ref:`mitigation_control_command_line` and
++ :ref:`mitigation_control_kvm`.
++
++
++3. Virtualization with untrusted guests
++^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
++
++3.1. SMT not supported or disabled
++""""""""""""""""""""""""""""""""""
++
++ If SMT is not supported by the processor or disabled in the BIOS or by
++ the kernel, it's only required to enforce L1D flushing on VMENTER.
++
++ Conditional L1D flushing is the default behaviour and can be tuned. See
++ :ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`.
++
++3.2. EPT not supported or disabled
++""""""""""""""""""""""""""""""""""
++
++ If EPT is not supported by the processor or disabled in the hypervisor,
++ the system is fully protected. SMT can stay enabled and L1D flushing on
++ VMENTER is not required.
++
++ EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter.
++
++3.3. SMT and EPT supported and active
++"""""""""""""""""""""""""""""""""""""
++
++ If SMT and EPT are supported and active then various degrees of
++ mitigations can be employed:
++
++ - L1D flushing on VMENTER:
++
++ L1D flushing on VMENTER is the minimal protection requirement, but it
++ is only potent in combination with other mitigation methods.
++
++ Conditional L1D flushing is the default behaviour and can be tuned. See
++ :ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`.
++
++ - Guest confinement:
++
++ Confinement of guests to a single or a group of physical cores which
++ are not running any other processes, can reduce the attack surface
++ significantly, but interrupts, soft interrupts and kernel threads can
++ still expose valuable data to a potential attacker. See
++ :ref:`guest_confinement`.
++
++ - Interrupt isolation:
++
++ Isolating the guest CPUs from interrupts can reduce the attack surface
++ further, but still allows a malicious guest to explore a limited amount
++ of host physical memory. This can at least be used to gain knowledge
++ about the host address space layout. The interrupts which have a fixed
++ affinity to the CPUs which run the untrusted guests can depending on
++ the scenario still trigger soft interrupts and schedule kernel threads
++ which might expose valuable information. See
++ :ref:`interrupt_isolation`.
++
++The above three mitigation methods combined can provide protection to a
++certain degree, but the risk of the remaining attack surface has to be
++carefully analyzed. For full protection the following methods are
++available:
++
++ - Disabling SMT:
++
++ Disabling SMT and enforcing the L1D flushing provides the maximum
++ amount of protection. This mitigation is not depending on any of the
++ above mitigation methods.
++
++ SMT control and L1D flushing can be tuned by the command line
++ parameters 'nosmt', 'l1tf', 'kvm-intel.vmentry_l1d_flush' and at run
++ time with the matching sysfs control files. See :ref:`smt_control`,
++ :ref:`mitigation_control_command_line` and
++ :ref:`mitigation_control_kvm`.
++
++ - Disabling EPT:
++
++ Disabling EPT provides the maximum amount of protection as well. It is
++ not depending on any of the above mitigation methods. SMT can stay
++ enabled and L1D flushing is not required, but the performance impact is
++ significant.
++
++ EPT can be disabled in the hypervisor via the 'kvm-intel.ept'
++ parameter.
++
++
++.. _default_mitigations:
++
++Default mitigations
++-------------------
++
++ The kernel default mitigations for vulnerable processors are:
++
++ - PTE inversion to protect against malicious user space. This is done
++ unconditionally and cannot be controlled.
++
++ - L1D conditional flushing on VMENTER when EPT is enabled for
++ a guest.
++
++ The kernel does not by default enforce the disabling of SMT, which leaves
++ SMT systems vulnerable when running untrusted guests with EPT enabled.
++
++ The rationale for this choice is:
++
++ - Force disabling SMT can break existing setups, especially with
++ unattended updates.
++
++ - If regular users run untrusted guests on their machine, then L1TF is
++ just an add on to other malware which might be embedded in an untrusted
++ guest, e.g. spam-bots or attacks on the local network.
++
++ There is no technical way to prevent a user from running untrusted code
++ on their machines blindly.
++
++ - It's technically extremely unlikely and from today's knowledge even
++ impossible that L1TF can be exploited via the most popular attack
++ mechanisms like JavaScript because these mechanisms have no way to
++ control PTEs. If this would be possible and not other mitigation would
++ be possible, then the default might be different.
++
++ - The administrators of cloud and hosting setups have to carefully
++ analyze the risk for their scenarios and make the appropriate
++ mitigation choices, which might even vary across their deployed
++ machines and also result in other changes of their overall setup.
++ There is no way for the kernel to provide a sensible default for this
++ kind of scenarios.
diff --git a/patches.arch/0012-cpu-hotplug-detect-SMT-disabled-by-BIOS.patch b/patches.arch/0012-cpu-hotplug-detect-SMT-disabled-by-BIOS.patch
new file mode 100644
index 0000000000..061fa9002e
--- /dev/null
+++ b/patches.arch/0012-cpu-hotplug-detect-SMT-disabled-by-BIOS.patch
@@ -0,0 +1,49 @@
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+Subject: [PATCH] cpu/hotplug: detect SMT disabled by BIOS
+Patch-mainline: Not yet, under development
+References: bsc#1089343 CVE-2018-3646
+
+commit 73d5e2b472640b1fcdb61ae8be389912ef211bda upstream
+
+If SMT is disabled in BIOS, the CPU code doesn't properly detect it.
+The /sys/devices/system/cpu/smt/control file shows 'on', and the 'l1tf'
+vulnerabilities file shows SMT as vulnerable.
+
+Fix it by forcing 'cpu_smt_control' to CPU_SMT_NOT_SUPPORTED in such a
+case. Unfortunately the detection can only be done after bringing all
+the CPUs online, so we have to overwrite any previous writes to the
+variable.
+
+Reported-by: Joe Mario <jmario@redhat.com>
+Tested-by: Jiri Kosina <jkosina@suse.cz>
+Fixes: f048c399e0f7 ("x86/topology: Provide topology_smt_supported()")
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ kernel/cpu.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 39b9e0c65f12..4547bc72febb 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -2137,6 +2137,15 @@ static const struct attribute_group cpuhp_smt_attr_group = {
+
+ static int __init cpu_smt_state_init(void)
+ {
++ /*
++ * If SMT was disabled by BIOS, detect it here, after the CPUs have
++ * been brought online. This ensures the smt/l1tf sysfs entries are
++ * consistent with reality. Note this may overwrite cpu_smt_control's
++ * previous setting.
++ */
++ if (topology_max_smt_threads() == 1)
++ cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
++
+ return sysfs_create_group(&cpu_subsys.dev_root->kobj,
+ &cpuhp_smt_attr_group);
+ }
+--
+2.12.3
+
diff --git a/patches.arch/01-jump_label-fix-concurrent-static_key_enable-disable.patch b/patches.arch/01-jump_label-fix-concurrent-static_key_enable-disable.patch
new file mode 100644
index 0000000000..4b7032dcf4
--- /dev/null
+++ b/patches.arch/01-jump_label-fix-concurrent-static_key_enable-disable.patch
@@ -0,0 +1,148 @@
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Tue, 1 Aug 2017 17:24:04 +0200
+Subject: jump_label: Fix concurrent static_key_enable/disable()
+Git-commit: 1dbb6704de91b169a58d0c8221624afd6a95cfc7
+Patch-mainline: v4.14-rc1
+References: bsc#1089343
+
+static_key_enable/disable are trying to cap the static key count to
+0/1. However, their use of key->enabled is outside jump_label_lock
+so they do not really ensure that.
+
+Rewrite them to do a quick check for an already enabled (respectively,
+already disabled), and then recheck under the jump label lock. Unlike
+static_key_slow_inc/dec, a failed check under the jump label lock does
+not modify key->enabled.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+Cc: Jason Baron <jbaron@akamai.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1501601046-35683-2-git-send-email-pbonzini@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ include/linux/jump_label.h | 22 +++++++++--------
+ kernel/jump_label.c | 59 +++++++++++++++++++++++++++++-----------------
+ 2 files changed, 49 insertions(+), 32 deletions(-)
+
+diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
+index 2afd74b9d844..740a42ea7f7f 100644
+--- a/include/linux/jump_label.h
++++ b/include/linux/jump_label.h
+@@ -234,22 +234,24 @@ static inline int jump_label_apply_nops(struct module *mod)
+
+ static inline void static_key_enable(struct static_key *key)
+ {
+- int count = static_key_count(key);
+-
+- WARN_ON_ONCE(count < 0 || count > 1);
++ STATIC_KEY_CHECK_USE();
+
+- if (!count)
+- static_key_slow_inc(key);
++ if (atomic_read(&key->enabled) != 0) {
++ WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
++ return;
++ }
++ atomic_set(&key->enabled, 1);
+ }
+
+ static inline void static_key_disable(struct static_key *key)
+ {
+- int count = static_key_count(key);
+-
+- WARN_ON_ONCE(count < 0 || count > 1);
++ STATIC_KEY_CHECK_USE();
+
+- if (count)
+- static_key_slow_dec(key);
++ if (atomic_read(&key->enabled) != 1) {
++ WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
++ return;
++ }
++ atomic_set(&key->enabled, 0);
+ }
+
+ #define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
+diff --git a/kernel/jump_label.c b/kernel/jump_label.c
+index d11c506a6ac3..833eecae825e 100644
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -79,28 +79,6 @@ int static_key_count(struct static_key *key)
+ }
+ EXPORT_SYMBOL_GPL(static_key_count);
+
+-void static_key_enable(struct static_key *key)
+-{
+- int count = static_key_count(key);
+-
+- WARN_ON_ONCE(count < 0 || count > 1);
+-
+- if (!count)
+- static_key_slow_inc(key);
+-}
+-EXPORT_SYMBOL_GPL(static_key_enable);
+-
+-void static_key_disable(struct static_key *key)
+-{
+- int count = static_key_count(key);
+-
+- WARN_ON_ONCE(count < 0 || count > 1);
+-
+- if (count)
+- static_key_slow_dec(key);
+-}
+-EXPORT_SYMBOL_GPL(static_key_disable);
+-
+ void static_key_slow_inc(struct static_key *key)
+ {
+ int v, v1;
+@@ -139,6 +117,43 @@ void static_key_slow_inc(struct static_key *key)
+ }
+ EXPORT_SYMBOL_GPL(static_key_slow_inc);
+
++void static_key_enable(struct static_key *key)
++{
++ STATIC_KEY_CHECK_USE();
++ if (atomic_read(&key->enabled) > 0) {
++ WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
++ return;
++ }
++
++ cpus_read_lock();
++ jump_label_lock();
++ if (atomic_read(&key->enabled) == 0) {
++ atomic_set(&key->enabled, -1);
++ jump_label_update(key);
++ atomic_set(&key->enabled, 1);
++ }
++ jump_label_unlock();
++ cpus_read_unlock();
++}
++EXPORT_SYMBOL_GPL(static_key_enable);
++
++void static_key_disable(struct static_key *key)
++{
++ STATIC_KEY_CHECK_USE();
++ if (atomic_read(&key->enabled) != 1) {
++ WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
++ return;
++ }
++
++ cpus_read_lock();
++ jump_label_lock();
++ if (atomic_cmpxchg(&key->enabled, 1, 0))
++ jump_label_update(key);
++ jump_label_unlock();
++ cpus_read_unlock();
++}
++EXPORT_SYMBOL_GPL(static_key_disable);
++
+ static void __static_key_slow_dec(struct static_key *key,
+ unsigned long rate_limit, struct delayed_work *work)
+ {
+
diff --git a/patches.arch/01-x86-KVM-Warn-user-if-KVM-is-loaded-SMT-and-L1TF-CPU-.patch b/patches.arch/01-x86-KVM-Warn-user-if-KVM-is-loaded-SMT-and-L1TF-CPU-.patch
new file mode 100644
index 0000000000..b727d6851e
--- /dev/null
+++ b/patches.arch/01-x86-KVM-Warn-user-if-KVM-is-loaded-SMT-and-L1TF-CPU-.patch
@@ -0,0 +1,102 @@
+From c038a1e28449578f948cc2ab09758e8798c1c2c6 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 20 Jun 2018 11:29:53 -0400
+Subject: [PATCH 31/40] x86/KVM: Warn user if KVM is loaded SMT and L1TF CPU
+ bug being present
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit 26acfb666a473d960f0fd971fe68f3e3ad16c70b upstream
+
+If the L1TF CPU bug is present we allow the KVM module to be loaded as the
+major of users that use Linux and KVM have trusted guests and do not want a
+broken setup.
+
+Cloud vendors are the ones that are uncomfortable with CVE 2018-3620 and as
+such they are the ones that should set nosmt to one.
+
+Setting 'nosmt' means that the system administrator also needs to disable
+SMT (Hyper-threading) in the BIOS, or via the 'nosmt' command line
+parameter, or via the /sys/devices/system/cpu/smt/control. See commit
+05736e4ac13c ("cpu/hotplug: Provide knobs to control SMT").
+
+Other mitigations are to use task affinity, cpu sets, interrupt binding,
+etc - anything to make sure that _only_ the same guests vCPUs are running
+on sibling threads.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 6 ++++++
+ arch/x86/kvm/vmx.c | 19 +++++++++++++++++++
+ kernel/cpu.c | 1 +
+ 3 files changed, 26 insertions(+)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -70,6 +70,9 @@ static const struct x86_cpu_id vmx_cpu_i
+ };
+ MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
+
++static bool __read_mostly nosmt;
++module_param(nosmt, bool, S_IRUGO);
++
+ static bool __read_mostly enable_vpid = 1;
+ module_param_named(vpid, enable_vpid, bool, 0444);
+
+@@ -9835,6 +9838,20 @@ free_vcpu:
+ return ERR_PTR(err);
+ }
+
++#define L1TF_MSG "SMT enabled with L1TF CPU bug present. Refer to CVE-2018-3620 for details.\n"
++
++static int vmx_vm_init(struct kvm *kvm)
++{
++ if (boot_cpu_has(X86_BUG_L1TF) && cpu_smt_control == CPU_SMT_ENABLED) {
++ if (nosmt) {
++ pr_err(L1TF_MSG);
++ return -EOPNOTSUPP;
++ }
++ pr_warn(L1TF_MSG);
++ }
++ return 0;
++}
++
+ static void __init vmx_check_processor_compat(void *rtn)
+ {
+ struct vmcs_config vmcs_conf;
+@@ -12264,6 +12281,8 @@ static struct kvm_x86_ops vmx_x86_ops __
+ .cpu_has_accelerated_tpr = report_flexpriority,
+ .has_emulated_msr = vmx_has_emulated_msr,
+
++ .vm_init = vmx_vm_init,
++
+ .vcpu_create = vmx_create_vcpu,
+ .vcpu_free = vmx_free_vcpu,
+ .vcpu_reset = vmx_vcpu_reset,
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -1860,6 +1860,12 @@
+ [KVM,ARM] Allow use of GICv4 for direct injection of
+ LPIs.
+
++ kvm-intel.nosmt=[KVM,Intel] If the L1TF CPU bug is present (CVE-2018-3620)
++ and the system has SMT (aka Hyper-Threading) enabled then
++ don't allow guests to be created.
++
++ Default is 0 (allow guests to be created).
++
+ kvm-intel.ept= [KVM,Intel] Disable extended page tables
+ (virtualized MMU) support on capable Intel chips.
+ Default is 1 (enabled)
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -347,6 +347,7 @@ EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
+
+ #ifdef CONFIG_HOTPLUG_SMT
+ enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
++EXPORT_SYMBOL_GPL(cpu_smt_control);
+
+ static int __init smt_cmdline_disable(char *str)
+ {
diff --git a/patches.arch/02-jump_label-provide-hotplug-context-variants.patch b/patches.arch/02-jump_label-provide-hotplug-context-variants.patch
new file mode 100644
index 0000000000..c287a5db8e
--- /dev/null
+++ b/patches.arch/02-jump_label-provide-hotplug-context-variants.patch
@@ -0,0 +1,147 @@
+From: Marc Zyngier <marc.zyngier@arm.com>
+Date: Tue, 1 Aug 2017 09:02:56 +0100
+Subject: jump_label: Provide hotplug context variants
+Git-commit: 5a40527f8f0798553764fc8db4111d7d9c33ea51
+Patch-mainline: v4.14-rc1
+References: bsc#1089343
+
+As using the normal static key API under the hotplug lock is
+pretty much impossible, let's provide a variant of some of them
+that require the hotplug lock to have already been taken.
+
+These function are only meant to be used in CPU hotplug callbacks.
+
+Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Leo Yan <leo.yan@linaro.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-arm-kernel@lists.infradead.org
+Link: http://lkml.kernel.org/r/20170801080257.5056-4-marc.zyngier@arm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/static-keys.txt | 15 +++++++++++++++
+ include/linux/jump_label.h | 11 +++++++++--
+ kernel/jump_label.c | 22 ++++++++++++++++++----
+ 3 files changed, 42 insertions(+), 6 deletions(-)
+
+--- a/Documentation/static-keys.txt
++++ b/Documentation/static-keys.txt
+@@ -142,6 +142,21 @@ static_branch_inc(), will change the bra
+ key is initialized false, a 'static_branch_inc()', will change the branch to
+ true. And then a 'static_branch_dec()', will again make the branch false.
+
++Note that switching branches results in some locks being taken,
++particularly the CPU hotplug lock (in order to avoid races against
++CPUs being brought in the kernel whilst the kernel is getting
++patched). Calling the static key API from within a hotplug notifier is
++thus a sure deadlock recipe. In order to still allow use of the
++functionnality, the following functions are provided:
++
++ static_key_enable_cpuslocked()
++ static_key_disable_cpuslocked()
++ static_branch_enable_cpuslocked()
++ static_branch_disable_cpuslocked()
++
++These functions are *not* general purpose, and must only be used when
++you really know that you're in the above context, and no other.
++
+ Where an array of keys is required, it can be defined as:
+
+ DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
+--- a/include/linux/jump_label.h
++++ b/include/linux/jump_label.h
+@@ -163,6 +163,8 @@ extern void jump_label_apply_nops(struct
+ extern int static_key_count(struct static_key *key);
+ extern void static_key_enable(struct static_key *key);
+ extern void static_key_disable(struct static_key *key);
++extern void static_key_enable_cpuslocked(struct static_key *key);
++extern void static_key_disable_cpuslocked(struct static_key *key);
+
+ /*
+ * We should be using ATOMIC_INIT() for initializing .enabled, but
+@@ -254,6 +256,9 @@ static inline void static_key_disable(st
+ atomic_set(&key->enabled, 0);
+ }
+
++#define static_key_enable_cpuslocked(k) static_key_enable((k))
++#define static_key_disable_cpuslocked(k) static_key_disable((k))
++
+ #define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
+ #define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
+
+@@ -415,8 +420,10 @@ extern bool ____wrong_branch_error(void)
+ * Normal usage; boolean enable/disable.
+ */
+
+-#define static_branch_enable(x) static_key_enable(&(x)->key)
+-#define static_branch_disable(x) static_key_disable(&(x)->key)
++#define static_branch_enable(x) static_key_enable(&(x)->key)
++#define static_branch_disable(x) static_key_disable(&(x)->key)
++#define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key)
++#define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key)
+
+ #endif /* __ASSEMBLY__ */
+
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -117,15 +117,15 @@ void static_key_slow_inc(struct static_k
+ }
+ EXPORT_SYMBOL_GPL(static_key_slow_inc);
+
+-void static_key_enable(struct static_key *key)
++void static_key_enable_cpuslocked(struct static_key *key)
+ {
+ STATIC_KEY_CHECK_USE();
++
+ if (atomic_read(&key->enabled) > 0) {
+ WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
+ return;
+ }
+
+- cpus_read_lock();
+ jump_label_lock();
+ if (atomic_read(&key->enabled) == 0) {
+ atomic_set(&key->enabled, -1);
+@@ -133,23 +133,37 @@ void static_key_enable(struct static_key
+ atomic_set(&key->enabled, 1);
+ }
+ jump_label_unlock();
++}
++EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
++
++void static_key_enable(struct static_key *key)
++{
++ cpus_read_lock();
++ static_key_enable_cpuslocked(key);
+ cpus_read_unlock();
+ }
+ EXPORT_SYMBOL_GPL(static_key_enable);
+
+-void static_key_disable(struct static_key *key)
++void static_key_disable_cpuslocked(struct static_key *key)
+ {
+ STATIC_KEY_CHECK_USE();
++
+ if (atomic_read(&key->enabled) != 1) {
+ WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
+ return;
+ }
+
+- cpus_read_lock();
+ jump_label_lock();
+ if (atomic_cmpxchg(&key->enabled, 1, 0))
+ jump_label_update(key);
+ jump_label_unlock();
++}
++EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
++
++void static_key_disable(struct static_key *key)
++{
++ cpus_read_lock();
++ static_key_disable_cpuslocked(key);
+ cpus_read_unlock();
+ }
+ EXPORT_SYMBOL_GPL(static_key_disable);
diff --git a/patches.arch/02-sched-smt-update-sched_smt_present-at-runtime.patch b/patches.arch/02-sched-smt-update-sched_smt_present-at-runtime.patch
new file mode 100644
index 0000000000..71836d3f31
--- /dev/null
+++ b/patches.arch/02-sched-smt-update-sched_smt_present-at-runtime.patch
@@ -0,0 +1,87 @@
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 29 May 2018 16:43:46 +0200
+Subject: sched/smt: Update sched_smt_present at runtime
+Git-commit: ba2591a5993eabcc8e874e30f361d8ffbb10d6d4
+Patch-mainline: v4.18 or v4.18-rc3 (next release)
+References: bsc#1089343
+
+The static key sched_smt_present is only updated at boot time when SMT
+siblings have been detected. Booting with maxcpus=1 and bringing the
+siblings online after boot rebuilds the scheduling domains correctly but
+does not update the static key, so the SMT code is not enabled.
+
+Let the key be updated in the scheduler CPU hotplug code to fix this.
+
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ kernel/sched/core.c | 30 ++++++++++++------------------
+ kernel/sched/fair.c | 1 +
+ 2 files changed, 13 insertions(+), 18 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5832,6 +5832,18 @@ int sched_cpu_activate(unsigned int cpu)
+ struct rq *rq = cpu_rq(cpu);
+ struct rq_flags rf;
+
++#ifdef CONFIG_SCHED_SMT
++ /*
++ * The sched_smt_present static key needs to be evaluated on every
++ * hotplug event because at boot time SMT might be disabled when
++ * the number of booted CPUs is limited.
++ *
++ * If then later a sibling gets hotplugged, then the key would stay
++ * off and SMT scheduling would never be functional.
++ */
++ if (cpumask_weight(cpu_smt_mask(cpu)) > 1)
++ static_branch_enable_cpuslocked(&sched_smt_present);
++#endif
+ set_cpu_active(cpu, true);
+
+ if (sched_smp_initialized) {
+@@ -5933,22 +5945,6 @@ int sched_cpu_dying(unsigned int cpu)
+ }
+ #endif
+
+-#ifdef CONFIG_SCHED_SMT
+-DEFINE_STATIC_KEY_FALSE(sched_smt_present);
+-
+-static void sched_init_smt(void)
+-{
+- /*
+- * We've enumerated all CPUs and will assume that if any CPU
+- * has SMT siblings, CPU0 will too.
+- */
+- if (cpumask_weight(cpu_smt_mask(0)) > 1)
+- static_branch_enable(&sched_smt_present);
+-}
+-#else
+-static inline void sched_init_smt(void) { }
+-#endif
+-
+ void __init sched_init_smp(void)
+ {
+ cpumask_var_t non_isolated_cpus;
+@@ -5978,8 +5974,6 @@ void __init sched_init_smp(void)
+ init_sched_rt_class();
+ init_sched_dl_class();
+
+- sched_init_smt();
+-
+ sched_smp_initialized = true;
+ }
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -6044,6 +6044,7 @@ find_idlest_cpu(struct sched_group *grou
+ }
+
+ #ifdef CONFIG_SCHED_SMT
++DEFINE_STATIC_KEY_FALSE(sched_smt_present);
+
+ static inline void set_idle_cores(int cpu, int val)
+ {
diff --git a/patches.arch/02-x86-KVM-VMX-Add-module-argument-for-L1TF-mitigation.patch b/patches.arch/02-x86-KVM-VMX-Add-module-argument-for-L1TF-mitigation.patch
new file mode 100644
index 0000000000..8c2425829a
--- /dev/null
+++ b/patches.arch/02-x86-KVM-VMX-Add-module-argument-for-L1TF-mitigation.patch
@@ -0,0 +1,132 @@
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Subject: [PATCH 32/40] x86/KVM/VMX: Add module argument for L1TF mitigation
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit a399477e52c17e148746d3ce9a483f681c2aa9a0 upstream
+
+Add a mitigation mode parameter "vmentry_l1d_flush" for CVE-2018-3620, aka
+L1 terminal fault. The valid arguments are:
+
+ - "always" L1D cache flush on every VMENTER.
+ - "cond" Conditional L1D cache flush, explained below
+ - "never" Disable the L1D cache flush mitigation
+
+"cond" is trying to avoid L1D cache flushes on VMENTER if the code executed
+between VMEXIT and VMENTER is considered safe, i.e. is not bringing any
+interesting information into L1D which might exploited.
+
+[ tglx: Split out from a larger patch ]
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 12 ++++
+ arch/x86/kvm/vmx.c | 65 +++++++++++++++++++++++-
+ 2 files changed, 75 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -197,6 +197,54 @@ module_param(ple_window_max, int, S_IRUG
+
+ extern const ulong vmx_return;
+
++static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
++
++/* These MUST be in sync with vmentry_l1d_param order. */
++enum vmx_l1d_flush_state {
++ VMENTER_L1D_FLUSH_NEVER,
++ VMENTER_L1D_FLUSH_COND,
++ VMENTER_L1D_FLUSH_ALWAYS,
++};
++
++static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush = VMENTER_L1D_FLUSH_COND;
++
++static const struct {
++ const char *option;
++ enum vmx_l1d_flush_state cmd;
++} vmentry_l1d_param[] = {
++ {"never", VMENTER_L1D_FLUSH_NEVER},
++ {"cond", VMENTER_L1D_FLUSH_COND},
++ {"always", VMENTER_L1D_FLUSH_ALWAYS},
++};
++
++static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
++{
++ unsigned int i;
++
++ if (!s)
++ return -EINVAL;
++
++ for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
++ if (!strcmp(s, vmentry_l1d_param[i].option)) {
++ vmentry_l1d_flush = vmentry_l1d_param[i].cmd;
++ return 0;
++ }
++ }
++
++ return -EINVAL;
++}
++
++static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
++{
++ return sprintf(s, "%s\n", vmentry_l1d_param[vmentry_l1d_flush].option);
++}
++
++static const struct kernel_param_ops vmentry_l1d_flush_ops = {
++ .set = vmentry_l1d_flush_set,
++ .get = vmentry_l1d_flush_get,
++};
++module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, &vmentry_l1d_flush, S_IRUGO);
++
+ #define NR_AUTOLOAD_MSRS 8
+
+ struct vmcs {
+@@ -12404,10 +12452,23 @@ static struct kvm_x86_ops vmx_x86_ops __
+ .enable_smi_window = enable_smi_window,
+ };
+
++static void __init vmx_setup_l1d_flush(void)
++{
++ if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
++ !boot_cpu_has_bug(X86_BUG_L1TF))
++ return;
++
++ static_branch_enable(&vmx_l1d_should_flush);
++}
++
+ static int __init vmx_init(void)
+ {
+- int r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+- __alignof__(struct vcpu_vmx), THIS_MODULE);
++ int r;
++
++ vmx_setup_l1d_flush();
++
++ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
++ __alignof__(struct vcpu_vmx), THIS_MODULE);
+ if (r)
+ return r;
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -1887,6 +1887,18 @@
+ (virtualized real and unpaged mode) on capable
+ Intel chips. Default is 1 (enabled)
+
++ kvm-intel.vmentry_l1d_flush=[KVM,Intel] Mitigation for L1 Terminal Fault
++ CVE-2018-3620.
++
++ Valid arguments: never, cond, always
++
++ always: L1D cache flush on every VMENTER.
++ cond: Flush L1D on VMENTER only when the code between
++ VMEXIT and VMENTER can leak host memory.
++ never: Disables the mitigation
++
++ Default is cond (do L1 cache flush in specific instances)
++
+ kvm-intel.vpid= [KVM,Intel] Disable Virtual Processor Identification
+ feature (tagged TLBs) on capable Intel chips.
+ Default is 1 (enabled)
diff --git a/patches.arch/03-x86-KVM-VMX-Add-L1D-flush-algorithm.patch b/patches.arch/03-x86-KVM-VMX-Add-L1D-flush-algorithm.patch
new file mode 100644
index 0000000000..1100950301
--- /dev/null
+++ b/patches.arch/03-x86-KVM-VMX-Add-L1D-flush-algorithm.patch
@@ -0,0 +1,137 @@
+From b4d3b3f62cfa91e976c7bc1042ef81ca6801bfd1 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 2 Jul 2018 12:47:38 +0200
+Subject: [PATCH 33/40] x86/KVM/VMX: Add L1D flush algorithm
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit a47dd5f06714c844b33f3b5f517b6f3e81ce57b5 upstream
+
+To mitigate the L1 Terminal Fault vulnerability it's required to flush L1D
+on VMENTER to prevent rogue guests from snooping host memory.
+
+CPUs will have a new control MSR via a microcode update to flush L1D with a
+single MSR write, but in the absence of microcode a fallback to a software
+based flush algorithm is required.
+
+Add a software flush loop which is based on code from Intel.
+
+[ tglx: Split out from combo patch ]
+[ bpetkov: Polish the asm code ]
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 70 +++++++++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 66 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -9042,6 +9042,46 @@ static int vmx_handle_exit(struct kvm_vc
+ }
+ }
+
++/*
++ * Software based L1D cache flush which is used when microcode providing
++ * the cache control MSR is not loaded.
++ *
++ * The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
++ * flush it is required to read in 64 KiB because the replacement algorithm
++ * is not exactly LRU. This could be sized at runtime via topology
++ * information but as all relevant affected CPUs have 32KiB L1D cache size
++ * there is no point in doing so.
++ */
++#define L1D_CACHE_ORDER 4
++static void *vmx_l1d_flush_pages;
++
++static void __maybe_unused vmx_l1d_flush(void)
++{
++ int size = PAGE_SIZE << L1D_CACHE_ORDER;
++
++ asm volatile(
++ /* First ensure the pages are in the TLB */
++ "xorl %%eax, %%eax\n"
++ ".Lpopulate_tlb:\n\t"
++ "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
++ "addl $4096, %%eax\n\t"
++ "cmpl %%eax, %[size]\n\t"
++ "jne .Lpopulate_tlb\n\t"
++ "xorl %%eax, %%eax\n\t"
++ "cpuid\n\t"
++ /* Now fill the cache */
++ "xorl %%eax, %%eax\n"
++ ".Lfill_cache:\n"
++ "movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t"
++ "addl $64, %%eax\n\t"
++ "cmpl %%eax, %[size]\n\t"
++ "jne .Lfill_cache\n\t"
++ "lfence\n"
++ :: [empty_zp] "r" (vmx_l1d_flush_pages),
++ [size] "r" (size)
++ : "eax", "ebx", "ecx", "edx");
++}
++
+ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
+ {
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+@@ -12452,25 +12492,45 @@ static struct kvm_x86_ops vmx_x86_ops __
+ .enable_smi_window = enable_smi_window,
+ };
+
+-static void __init vmx_setup_l1d_flush(void)
++static int __init vmx_setup_l1d_flush(void)
+ {
++ struct page *page;
++
+ if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
+ !boot_cpu_has_bug(X86_BUG_L1TF))
+- return;
++ return 0;
+
++ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
++ if (!page)
++ return -ENOMEM;
++
++ vmx_l1d_flush_pages = page_address(page);
+ static_branch_enable(&vmx_l1d_should_flush);
++ return 0;
++}
++
++static void vmx_free_l1d_flush_pages(void)
++{
++ if (vmx_l1d_flush_pages) {
++ free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
++ vmx_l1d_flush_pages = NULL;
++ }
+ }
+
+ static int __init vmx_init(void)
+ {
+ int r;
+
+- vmx_setup_l1d_flush();
++ r = vmx_setup_l1d_flush();
++ if (r)
++ return r;
+
+ r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+ __alignof__(struct vcpu_vmx), THIS_MODULE);
+- if (r)
++ if (r) {
++ vmx_free_l1d_flush_pages();
+ return r;
++ }
+
+ #ifdef CONFIG_KEXEC_CORE
+ rcu_assign_pointer(crash_vmclear_loaded_vmcss,
+@@ -12488,6 +12548,8 @@ static void __exit vmx_exit(void)
+ #endif
+
+ kvm_exit();
++
++ vmx_free_l1d_flush_pages();
+ }
+
+ module_init(vmx_init)
diff --git a/patches.arch/03-x86-smp-provide-topology_is_primary_thread.patch b/patches.arch/03-x86-smp-provide-topology_is_primary_thread.patch
new file mode 100644
index 0000000000..7f599a5893
--- /dev/null
+++ b/patches.arch/03-x86-smp-provide-topology_is_primary_thread.patch
@@ -0,0 +1,104 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 29 May 2018 17:50:22 +0200
+Subject: x86/smp: Provide topology_is_primary_thread()
+Git-commit: 6a4d2657e048f096c7ffcad254010bd94891c8c0
+Patch-mainline: v4.18 or v4.18-rc3 (next release)
+References: bsc#1089343
+
+If the CPU is supporting SMT then the primary thread can be found by
+checking the lower APIC ID bits for zero. smp_num_siblings is used to build
+the mask for the APIC ID bits which need to be taken into account.
+
+This uses the MPTABLE or ACPI/MADT supplied APIC ID, which can be different
+than the initial APIC ID in CPUID. But according to AMD the lower bits have
+to be consistent. Intel gave a tentative confirmation as well.
+
+Preparatory patch to support disabling SMT at boot/runtime.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/apic.h | 6 ++++++
+ arch/x86/include/asm/topology.h | 4 +++-
+ arch/x86/kernel/apic/apic.c | 15 +++++++++++++++
+ arch/x86/kernel/smpboot.c | 9 +++++++++
+ 4 files changed, 33 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/apic.h
++++ b/arch/x86/include/asm/apic.h
+@@ -627,6 +627,12 @@ extern int default_check_phys_apicid_pre
+ #endif
+
+ #endif /* CONFIG_X86_LOCAL_APIC */
++#ifdef CONFIG_SMP
++bool apic_id_is_primary_thread(unsigned int id);
++#else
++static inline bool apic_id_is_primary_thread(unsigned int id) { return false; }
++#endif
++
+ extern void irq_enter(void);
+ extern void irq_exit(void);
+
+--- a/arch/x86/include/asm/topology.h
++++ b/arch/x86/include/asm/topology.h
+@@ -129,13 +129,15 @@ static inline int topology_max_smt_threa
+ }
+
+ int topology_update_package_map(unsigned int apicid, unsigned int cpu);
+-extern int topology_phys_to_logical_pkg(unsigned int pkg);
++int topology_phys_to_logical_pkg(unsigned int pkg);
++bool topology_is_primary_thread(unsigned int cpu);
+ #else
+ #define topology_max_packages() (1)
+ static inline int
+ topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; }
+ static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
+ static inline int topology_max_smt_threads(void) { return 1; }
++static inline bool topology_is_primary_thread(unsigned int cpu) { return true; }
+ #endif
+
+ static inline void arch_fix_phys_package_id(int num, u32 slot)
+--- a/arch/x86/kernel/apic/apic.c
++++ b/arch/x86/kernel/apic/apic.c
+@@ -2038,6 +2038,21 @@ static int cpuid_to_apicid[] = {
+ [0 ... NR_CPUS - 1] = -1,
+ };
+
++/**
++ * apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
++ * @id: APIC ID to check
++ */
++bool apic_id_is_primary_thread(unsigned int apicid)
++{
++ u32 mask;
++
++ if (smp_num_siblings == 1)
++ return true;
++ /* Isolate the SMT bit(s) in the APICID and check for 0 */
++ mask = (1U << (fls(smp_num_siblings) - 1)) - 1;
++ return !(apicid & mask);
++}
++
+ /*
+ * Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids
+ * and cpuid_to_apicid[] synchronized.
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -272,6 +272,15 @@ static void notrace start_secondary(void
+ }
+
+ /**
++ * topology_is_primary_thread - Check whether CPU is the primary SMT thread
++ * @cpu: CPU to check
++ */
++bool topology_is_primary_thread(unsigned int cpu)
++{
++ return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu));
++}
++
++/**
+ * topology_phys_to_logical_pkg - Map a physical package id to a logical
+ *
+ * Returns logical package id or -1 if not found
diff --git a/patches.arch/04-x86-KVM-VMX-Add-L1D-MSR-based-flush.patch b/patches.arch/04-x86-KVM-VMX-Add-L1D-MSR-based-flush.patch
new file mode 100644
index 0000000000..8b08e6bede
--- /dev/null
+++ b/patches.arch/04-x86-KVM-VMX-Add-L1D-MSR-based-flush.patch
@@ -0,0 +1,84 @@
+From ab38da8677245ccbb76767a4ed7e39b5106e00cf Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 2 Jul 2018 13:03:48 +0200
+Subject: [PATCH 34/40] x86/KVM/VMX: Add L1D MSR based flush
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit 3fa045be4c720146b18a19cea7a767dc6ad5df94 upstream
+
+336996-Speculative-Execution-Side-Channel-Mitigations.pdf defines a new MSR
+(IA32_FLUSH_CMD aka 0x10B) which has similar write-only semantics to other
+MSRs defined in the document.
+
+The semantics of this MSR is to allow "finer granularity invalidation of
+caching structures than existing mechanisms like WBINVD. It will writeback
+and invalidate the L1 data cache, including all cachelines brought in by
+preceding instructions, without invalidating all caches (eg. L2 or
+LLC). Some processors may also invalidate the first level level instruction
+cache on a L1D_FLUSH command. The L1 data and instruction caches may be
+shared across the logical processors of a core."
+
+Use it instead of the loop based L1 flush algorithm.
+
+A copy of this document is available at
+ https://bugzilla.kernel.org/show_bug.cgi?id=199511
+
+[ tglx: Avoid allocating pages when the MSR is available ]
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/include/asm/msr-index.h | 6 ++++++
+ arch/x86/kvm/vmx.c | 15 +++++++++++----
+ 2 files changed, 17 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -75,6 +75,12 @@
+ * control required.
+ */
+
++#define MSR_IA32_FLUSH_CMD 0x0000010b
++#define L1D_FLUSH (1 << 0) /*
++ * Writeback and invalidate the
++ * L1 data cache.
++ */
++
+ #define MSR_IA32_BBL_CR_CTL 0x00000119
+ #define MSR_IA32_BBL_CR_CTL3 0x0000011e
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -9059,6 +9059,11 @@ static void __maybe_unused vmx_l1d_flush
+ {
+ int size = PAGE_SIZE << L1D_CACHE_ORDER;
+
++ if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
++ wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
++ return;
++ }
++
+ asm volatile(
+ /* First ensure the pages are in the TLB */
+ "xorl %%eax, %%eax\n"
+@@ -12500,11 +12505,13 @@ static int __init vmx_setup_l1d_flush(vo
+ !boot_cpu_has_bug(X86_BUG_L1TF))
+ return 0;
+
+- page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
+- if (!page)
+- return -ENOMEM;
++ if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
++ page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
++ if (!page)
++ return -ENOMEM;
++ vmx_l1d_flush_pages = page_address(page);
++ }
+
+- vmx_l1d_flush_pages = page_address(page);
+ static_branch_enable(&vmx_l1d_should_flush);
+ return 0;
+ }
diff --git a/patches.arch/04-x86-topology-provide-topology_smt_supported.patch b/patches.arch/04-x86-topology-provide-topology_smt_supported.patch
new file mode 100644
index 0000000000..f1c9a73172
--- /dev/null
+++ b/patches.arch/04-x86-topology-provide-topology_smt_supported.patch
@@ -0,0 +1,54 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 21 Jun 2018 10:37:20 +0200
+Subject: x86/topology: Provide topology_smt_supported()
+Git-commit: f048c399e0f7490ab7296bc2c255d37eb14a9675
+Patch-mainline: v4.18 or v4.18-rc3 (next release)
+References: bsc#1089343
+
+Provide information whether SMT is supoorted by the CPUs. Preparatory patch
+for SMT control mechanism.
+
+Suggested-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/topology.h | 2 ++
+ arch/x86/kernel/smpboot.c | 8 ++++++++
+ 2 files changed, 10 insertions(+)
+
+--- a/arch/x86/include/asm/topology.h
++++ b/arch/x86/include/asm/topology.h
+@@ -131,6 +131,7 @@ static inline int topology_max_smt_threa
+ int topology_update_package_map(unsigned int apicid, unsigned int cpu);
+ int topology_phys_to_logical_pkg(unsigned int pkg);
+ bool topology_is_primary_thread(unsigned int cpu);
++bool topology_smt_supported(void);
+ #else
+ #define topology_max_packages() (1)
+ static inline int
+@@ -138,6 +139,7 @@ topology_update_package_map(unsigned int
+ static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
+ static inline int topology_max_smt_threads(void) { return 1; }
+ static inline bool topology_is_primary_thread(unsigned int cpu) { return true; }
++static inline bool topology_smt_supported(void) { return false; }
+ #endif
+
+ static inline void arch_fix_phys_package_id(int num, u32 slot)
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -281,6 +281,14 @@ bool topology_is_primary_thread(unsigned
+ }
+
+ /**
++ * topology_smt_supported - Check whether SMT is supported by the CPUs
++ */
++bool topology_smt_supported(void)
++{
++ return smp_num_siblings > 1;
++}
++
++/**
+ * topology_phys_to_logical_pkg - Map a physical package id to a logical
+ *
+ * Returns logical package id or -1 if not found
diff --git a/patches.arch/05-cpu-hotplug-make-bringup-teardown-of-smp-threads-symmetric.patch b/patches.arch/05-cpu-hotplug-make-bringup-teardown-of-smp-threads-symmetric.patch
new file mode 100644
index 0000000000..32f226ba34
--- /dev/null
+++ b/patches.arch/05-cpu-hotplug-make-bringup-teardown-of-smp-threads-symmetric.patch
@@ -0,0 +1,39 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 29 May 2018 19:05:25 +0200
+Subject: cpu/hotplug: Make bringup/teardown of smp threads symmetric
+Git-commit: c4de65696d865c225fda3b9913b31284ea65ea96
+Patch-mainline: v4.18 or v4.18-rc3 (next release)
+References: bsc#1089343
+
+The asymmetry caused a warning to trigger if the bootup was stopped in state
+CPUHP_AP_ONLINE_IDLE. The warning no longer triggers as kthread_park() can
+now be invoked on already or still parked threads. But there is still no
+reason to have this be asymmetric.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ kernel/cpu.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -679,7 +679,6 @@ static int takedown_cpu(unsigned int cpu
+
+ /* Park the smpboot threads */
+ kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
+- smpboot_park_threads(cpu);
+
+ /*
+ * Prevent irq alloc/free while the dying cpu reorganizes the
+@@ -1228,7 +1227,7 @@ static struct cpuhp_step cpuhp_ap_states
+ [CPUHP_AP_SMPBOOT_THREADS] = {
+ .name = "smpboot/threads:online",
+ .startup.single = smpboot_unpark_threads,
+- .teardown.single = NULL,
++ .teardown.single = smpboot_park_threads,
+ },
+ [CPUHP_AP_PERF_ONLINE] = {
+ .name = "perf:online",
diff --git a/patches.arch/05-x86-KVM-VMX-Add-L1D-flush-logic.patch b/patches.arch/05-x86-KVM-VMX-Add-L1D-flush-logic.patch
new file mode 100644
index 0000000000..1aacc17bb1
--- /dev/null
+++ b/patches.arch/05-x86-KVM-VMX-Add-L1D-flush-logic.patch
@@ -0,0 +1,178 @@
+From c441e50d54c697c6b5e309efd9de3740972a6975 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Mon, 2 Jul 2018 13:07:14 +0200
+Subject: [PATCH 35/40] x86/KVM/VMX: Add L1D flush logic
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit c595ceee45707f00f64f61c54fb64ef0cc0b4e85 upstream
+
+Add the logic for flushing L1D on VMENTER. The flush depends on the static
+key being enabled and the new l1tf_flush_l1d flag being set.
+
+The flags is set:
+ - Always, if the flush module parameter is 'always'
+
+ - Conditionally at:
+ - Entry to vcpu_run(), i.e. after executing user space
+
+ - From the sched_in notifier, i.e. when switching to a vCPU thread.
+
+ - From vmexit handlers which are considered unsafe, i.e. where
+ sensitive data can be brought into L1D:
+
+ - The emulator, which could be a good target for other speculative
+ execution-based threats,
+
+ - The MMU, which can bring host page tables in the L1 cache.
+
+ - External interrupts
+
+ - Nested operations that require the MMU (see above). That is
+ vmptrld, vmptrst, vmclear,vmwrite,vmread.
+
+ - When handling invept,invvpid
+
+[ tglx: Split out from combo patch and reduced to a single flag ]
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/include/asm/kvm_host.h | 4 ++++
+ arch/x86/kvm/mmu.c | 1 +
+ arch/x86/kvm/vmx.c | 23 ++++++++++++++++++++++-
+ arch/x86/kvm/x86.c | 8 ++++++++
+ 4 files changed, 35 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -706,6 +706,9 @@ struct kvm_vcpu_arch {
+
+ /* be preempted when it's in kernel-mode(cpl=0) */
+ bool preempted_in_kernel;
++
++ /* Flush the L1 Data cache for L1TF mitigation on VMENTER */
++ bool l1tf_flush_l1d;
+ };
+
+ struct kvm_lpage_info {
+@@ -886,6 +889,7 @@ struct kvm_vcpu_stat {
+ u64 signal_exits;
+ u64 irq_window_exits;
+ u64 nmi_window_exits;
++ u64 l1d_flush;
+ u64 halt_exits;
+ u64 halt_successful_poll;
+ u64 halt_attempted_poll;
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -3836,6 +3836,7 @@ int kvm_handle_page_fault(struct kvm_vcp
+ {
+ int r = 1;
+
++ vcpu->arch.l1tf_flush_l1d = true;
+ switch (vcpu->arch.apf.host_apf_reason) {
+ default:
+ trace_kvm_page_fault(fault_address, error_code);
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -9001,9 +9001,20 @@ static int vmx_handle_exit(struct kvm_vc
+ #define L1D_CACHE_ORDER 4
+ static void *vmx_l1d_flush_pages;
+
+-static void __maybe_unused vmx_l1d_flush(void)
++static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
+ {
+ int size = PAGE_SIZE << L1D_CACHE_ORDER;
++ bool always;
++
++ /*
++ * If the mitigation mode is 'flush always', keep the flush bit
++ * set, otherwise clear it. It gets set again either from
++ * vcpu_run() or from one of the unsafe VMEXIT handlers.
++ */
++ always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
++ vcpu->arch.l1tf_flush_l1d = always;
++
++ vcpu->stat.l1d_flush++;
+
+ if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
+ wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
+@@ -9274,6 +9285,7 @@ static void vmx_handle_external_intr(str
+ [ss]"i"(__KERNEL_DS),
+ [cs]"i"(__KERNEL_CS)
+ );
++ vcpu->arch.l1tf_flush_l1d = true;
+ }
+ }
+ STACK_FRAME_NON_STANDARD(vmx_handle_external_intr);
+@@ -9528,6 +9540,12 @@ static void __noclone vmx_vcpu_run(struc
+ x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
+
+ vmx->__launched = vmx->loaded_vmcs->launched;
++
++ if (static_branch_unlikely(&vmx_l1d_should_flush)) {
++ if (vcpu->arch.l1tf_flush_l1d)
++ vmx_l1d_flush(vcpu);
++ }
++
+ asm(
+ /* Store host registers */
+ "push %%" _ASM_DX "; push %%" _ASM_BP ";"
+@@ -11241,6 +11259,9 @@ static int nested_vmx_run(struct kvm_vcp
+ return 1;
+ }
+
++ /* Hide L1D cache contents from the nested guest. */
++ vmx->vcpu.arch.l1tf_flush_l1d = true;
++
+ /*
+ * We're finally done with prerequisite checking, and can start with
+ * the nested entry.
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -184,6 +184,7 @@ struct kvm_stats_debugfs_item debugfs_en
+ { "irq_injections", VCPU_STAT(irq_injections) },
+ { "nmi_injections", VCPU_STAT(nmi_injections) },
+ { "req_event", VCPU_STAT(req_event) },
++ { "l1d_flush", VCPU_STAT(l1d_flush) },
+ { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
+ { "mmu_pte_write", VM_STAT(mmu_pte_write) },
+ { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
+@@ -4594,6 +4595,9 @@ static int kvm_write_guest_virt_helper(g
+ void *data = val;
+ int r = X86EMUL_CONTINUE;
+
++ /* kvm_write_guest_virt_system can pull in tons of pages. */
++ vcpu->arch.l1tf_flush_l1d = true;
++
+ while (bytes) {
+ gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
+ access,
+@@ -5756,6 +5760,8 @@ int x86_emulate_instruction(struct kvm_v
+ bool writeback = true;
+ bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
+
++ vcpu->arch.l1tf_flush_l1d = true;
++
+ /*
+ * Clear write_fault_to_shadow_pgtable here to ensure it is
+ * never reused.
+@@ -7204,6 +7210,7 @@ static int vcpu_run(struct kvm_vcpu *vcp
+ struct kvm *kvm = vcpu->kvm;
+
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
++ vcpu->arch.l1tf_flush_l1d = true;
+
+ for (;;) {
+ if (kvm_vcpu_running(vcpu)) {
+@@ -8217,6 +8224,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcp
+
+ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
+ {
++ vcpu->arch.l1tf_flush_l1d = true;
+ kvm_x86_ops->sched_in(vcpu, cpu);
+ }
+
diff --git a/patches.arch/06-cpu-hotplug-split-do_cpu_down.patch b/patches.arch/06-cpu-hotplug-split-do_cpu_down.patch
new file mode 100644
index 0000000000..47af1c5bf8
--- /dev/null
+++ b/patches.arch/06-cpu-hotplug-split-do_cpu_down.patch
@@ -0,0 +1,51 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 29 May 2018 17:49:05 +0200
+Subject: cpu/hotplug: Split do_cpu_down()
+Git-commit: cc1fe215e1efa406b03aa4389e6269b61342dec5
+Patch-mainline: v4.18 or v4.18-rc3 (next release)
+References: bsc#1089343
+
+Split out the inner workings of do_cpu_down() to allow reuse of that
+function for the upcoming SMT disabling mechanism.
+
+No functional change.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ kernel/cpu.c | 17 ++++++++---------
+ 1 file changed, 8 insertions(+), 9 deletions(-)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -803,20 +803,19 @@ out:
+ return ret;
+ }
+
++static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
++{
++ if (cpu_hotplug_disabled)
++ return -EBUSY;
++ return _cpu_down(cpu, 0, target);
++}
++
+ static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
+ {
+ int err;
+
+ cpu_maps_update_begin();
+-
+- if (cpu_hotplug_disabled) {
+- err = -EBUSY;
+- goto out;
+- }
+-
+- err = _cpu_down(cpu, 0, target);
+-
+-out:
++ err = cpu_down_maps_locked(cpu, target);
+ cpu_maps_update_done();
+ return err;
+ }
diff --git a/patches.arch/06-x86-KVM-VMX-Split-the-VMX-MSR-LOAD-structures-to-hav.patch b/patches.arch/06-x86-KVM-VMX-Split-the-VMX-MSR-LOAD-structures-to-hav.patch
new file mode 100644
index 0000000000..90daafe417
--- /dev/null
+++ b/patches.arch/06-x86-KVM-VMX-Split-the-VMX-MSR-LOAD-structures-to-hav.patch
@@ -0,0 +1,149 @@
+From 31d19a2332560749924b844557db2042e490433e Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 20 Jun 2018 13:58:37 -0400
+Subject: [PATCH 36/40] x86/KVM/VMX: Split the VMX MSR LOAD structures to have
+ an host/guest numbers
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit 33966dd6b2d2c352fae55412db2ea8cfff5df13a upstream
+
+There is no semantic change but this change allows an unbalanced amount of
+MSRs to be loaded on VMEXIT and VMENTER, i.e. the number of MSRs to save or
+restore on VMEXIT or VMENTER may be different.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 65 ++++++++++++++++++++++++++++-------------------------
+ 1 file changed, 35 insertions(+), 30 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -629,6 +629,11 @@ static inline int pi_test_sn(struct pi_d
+ (unsigned long *)&pi_desc->control);
+ }
+
++struct vmx_msrs {
++ unsigned int nr;
++ struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
++};
++
+ struct vcpu_vmx {
+ struct kvm_vcpu vcpu;
+ unsigned long host_rsp;
+@@ -662,9 +667,8 @@ struct vcpu_vmx {
+ struct loaded_vmcs *loaded_vmcs;
+ bool __launched; /* temporary, used in vmx_vcpu_run */
+ struct msr_autoload {
+- unsigned nr;
+- struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
+- struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
++ struct vmx_msrs guest;
++ struct vmx_msrs host;
+ } msr_autoload;
+ struct {
+ int loaded;
+@@ -2046,18 +2050,18 @@ static void clear_atomic_switch_msr(stru
+ }
+ break;
+ }
+-
+- for (i = 0; i < m->nr; ++i)
+- if (m->guest[i].index == msr)
++ for (i = 0; i < m->guest.nr; ++i)
++ if (m->guest.val[i].index == msr)
+ break;
+
+- if (i == m->nr)
++ if (i == m->guest.nr)
+ return;
+- --m->nr;
+- m->guest[i] = m->guest[m->nr];
+- m->host[i] = m->host[m->nr];
+- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
+- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
++ --m->guest.nr;
++ --m->host.nr;
++ m->guest.val[i] = m->guest.val[m->guest.nr];
++ m->host.val[i] = m->host.val[m->host.nr];
++ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
++ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+ }
+
+ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
+@@ -2109,24 +2113,25 @@ static void add_atomic_switch_msr(struct
+ wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
+ }
+
+- for (i = 0; i < m->nr; ++i)
+- if (m->guest[i].index == msr)
++ for (i = 0; i < m->guest.nr; ++i)
++ if (m->guest.val[i].index == msr)
+ break;
+
+ if (i == NR_AUTOLOAD_MSRS) {
+ printk_once(KERN_WARNING "Not enough msr switch entries. "
+ "Can't add msr %x\n", msr);
+ return;
+- } else if (i == m->nr) {
+- ++m->nr;
+- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
+- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
++ } else if (i == m->guest.nr) {
++ ++m->guest.nr;
++ ++m->host.nr;
++ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
++ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+ }
+
+- m->guest[i].index = msr;
+- m->guest[i].value = guest_val;
+- m->host[i].index = msr;
+- m->host[i].value = host_val;
++ m->guest.val[i].index = msr;
++ m->guest.val[i].value = guest_val;
++ m->host.val[i].index = msr;
++ m->host.val[i].value = host_val;
+ }
+
+ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
+@@ -5767,9 +5772,9 @@ static void vmx_vcpu_setup(struct vcpu_v
+
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+- vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
++ vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
+- vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
++ vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
+
+ if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
+ vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
+@@ -10892,10 +10897,10 @@ static int prepare_vmcs02(struct kvm_vcp
+ * Set the MSR load/store lists to match L0's settings.
+ */
+ vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
+- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+- vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
+- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+- vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
++ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
++ vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
++ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
++ vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
+
+ /*
+ * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
+@@ -11833,8 +11838,8 @@ static void nested_vmx_vmexit(struct kvm
+ vmx_segment_cache_clear(vmx);
+
+ /* Update any VMCS fields that might have changed while L2 ran */
+- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
+- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
++ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
++ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
+ vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
+ if (vmx->hv_deadline_tsc == -1)
+ vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
diff --git a/patches.arch/07-cpu-hotplug-provide-knobs-to-control-smt.patch b/patches.arch/07-cpu-hotplug-provide-knobs-to-control-smt.patch
new file mode 100644
index 0000000000..e0d9620157
--- /dev/null
+++ b/patches.arch/07-cpu-hotplug-provide-knobs-to-control-smt.patch
@@ -0,0 +1,341 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 29 May 2018 17:48:27 +0200
+Subject: cpu/hotplug: Provide knobs to control SMT
+Git-commit: 05736e4ac13c08a4a9b1ef2de26dd31a32cbee57
+Patch-mainline: v4.18 or v4.18-rc3 (next release)
+References: bsc#1089343
+
+Provide a command line and a sysfs knob to control SMT.
+
+The command line options are:
+
+ 'nosmt': Enumerate secondary threads, but do not online them
+
+ 'nosmt=force': Ignore secondary threads completely during enumeration
+ via MP table and ACPI/MADT.
+
+The sysfs control file has the following states (read/write):
+
+ 'on': SMT is enabled. Secondary threads can be freely onlined
+ 'off': SMT is disabled. Secondary threads, even if enumerated
+ cannot be onlined
+ 'forceoff': SMT is permanentely disabled. Writes to the control
+ file are rejected.
+ 'notsupported': SMT is not supported by the CPU
+
+The command line option 'nosmt' sets the sysfs control to 'off'. This
+can be changed to 'on' to reenable SMT during runtime.
+
+The command line option 'nosmt=force' sets the sysfs control to
+'forceoff'. This cannot be changed during runtime.
+
+When SMT is 'on' and the control file is changed to 'off' then all online
+secondary threads are offlined and attempts to online a secondary thread
+later on are rejected.
+
+When SMT is 'off' and the control file is changed to 'on' then secondary
+threads can be onlined again. The 'off' -> 'on' transition does not
+automatically online the secondary threads.
+
+When the control file is set to 'forceoff', the behaviour is the same as
+setting it to 'off', but the operation is irreversible and later writes to
+the control file are rejected.
+
+When the control status is 'notsupported' then writes to the control file
+are rejected.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ Documentation/ABI/testing/sysfs-devices-system-cpu | 20 ++
+ Documentation/admin-guide/kernel-parameters.txt | 8
+ arch/Kconfig | 3
+ arch/x86/Kconfig | 1
+ include/linux/cpu.h | 13 +
+ kernel/cpu.c | 170 +++++++++++++++++++++
+ 6 files changed, 215 insertions(+)
+
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -12,6 +12,9 @@ config KEXEC_CORE
+ config HAVE_IMA_KEXEC
+ bool
+
++config HOTPLUG_SMT
++ bool
++
+ config OPROFILE
+ tristate "OProfile system profiling"
+ depends on PROFILING
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -167,6 +167,7 @@ config X86
+ select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_UNSTABLE_SCHED_CLOCK
+ select HAVE_USER_RETURN_NOTIFIER
++ select HOTPLUG_SMT if SMP
+ select IRQ_FORCED_THREADING
+ select PERF_EVENTS
+ select RTC_LIB
+--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
++++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
+@@ -390,3 +390,23 @@ Description: Information about CPU vulne
+ "Not affected" CPU is not affected by the vulnerability
+ "Vulnerable" CPU is affected and no mitigation in effect
+ "Mitigation: $M" CPU is affected and mitigation $M is in effect
++
++What: /sys/devices/system/cpu/smt
++ /sys/devices/system/cpu/smt/active
++ /sys/devices/system/cpu/smt/control
++Date: June 2018
++Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
++Description: Control Symetric Multi Threading (SMT)
++
++ active: Tells whether SMT is active (enabled and siblings online)
++
++ control: Read/write interface to control SMT. Possible
++ values:
++
++ "on" SMT is enabled
++ "off" SMT is disabled
++ "forceoff" SMT is force disabled. Cannot be changed.
++ "notsupported" SMT is not supported by the CPU
++
++ If control status is "forceoff" or "notsupported" writes
++ are rejected.
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2589,6 +2589,14 @@
+ nosmt [KNL,S390] Disable symmetric multithreading (SMT).
+ Equivalent to smt=1.
+
++ [KNL,x86] Disable symmetric multithreading (SMT).
++ nosmt=force: Force disable SMT, similar to disabling
++ it in the BIOS except that some of the
++ resource partitioning effects which are
++ caused by having SMT enabled in the BIOS
++ cannot be undone. Depending on the CPU
++ type this might have a performance impact.
++
+ nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
+ (indirect branch prediction) vulnerability. System may
+ allow data leaks with this option, which is equivalent
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -175,4 +175,17 @@ void cpuhp_report_idle_dead(void);
+ static inline void cpuhp_report_idle_dead(void) { }
+ #endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
++enum cpuhp_smt_control {
++ CPU_SMT_ENABLED,
++ CPU_SMT_DISABLED,
++ CPU_SMT_FORCE_DISABLED,
++ CPU_SMT_NOT_SUPPORTED,
++};
++
++#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
++extern enum cpuhp_smt_control cpu_smt_control;
++#else
++# define cpu_smt_control (CPU_SMT_ENABLED)
++#endif
++
+ #endif /* _LINUX_CPU_H_ */
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -826,6 +826,29 @@ int cpu_down(unsigned int cpu)
+ EXPORT_SYMBOL(cpu_down);
+ #endif /*CONFIG_HOTPLUG_CPU*/
+
++#ifdef CONFIG_HOTPLUG_SMT
++enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
++
++static int __init smt_cmdline_disable(char *str)
++{
++ cpu_smt_control = CPU_SMT_DISABLED;
++ if (str && !strcmp(str, "force")) {
++ pr_info("SMT: Force disabled\n");
++ cpu_smt_control = CPU_SMT_FORCE_DISABLED;
++ }
++ return 0;
++}
++early_param("nosmt", smt_cmdline_disable);
++
++static inline bool cpu_smt_allowed(unsigned int cpu)
++{
++ return cpu_smt_control == CPU_SMT_ENABLED ||
++ topology_is_primary_thread(cpu);
++}
++#else
++static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
++#endif
++
+ /**
+ * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
+ * @cpu: cpu that just started
+@@ -944,6 +967,10 @@ static int do_cpu_up(unsigned int cpu, e
+ err = -EBUSY;
+ goto out;
+ }
++ if (!cpu_smt_allowed(cpu)) {
++ err = -EPERM;
++ goto out;
++ }
+
+ err = _cpu_up(cpu, 0, target);
+ out:
+@@ -1708,10 +1735,153 @@ static struct attribute_group cpuhp_cpu_
+ NULL
+ };
+
++#ifdef CONFIG_HOTPLUG_SMT
++
++static const char *smt_states[] = {
++ [CPU_SMT_ENABLED] = "on",
++ [CPU_SMT_DISABLED] = "off",
++ [CPU_SMT_FORCE_DISABLED] = "forceoff",
++ [CPU_SMT_NOT_SUPPORTED] = "notsupported",
++};
++
++static ssize_t
++show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
++}
++
++static void cpuhp_offline_cpu_device(unsigned int cpu)
++{
++ struct device *dev = get_cpu_device(cpu);
++
++ dev->offline = true;
++ /* Tell user space about the state change */
++ kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
++}
++
++static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
++{
++ int cpu, ret = 0;
++
++ cpu_maps_update_begin();
++ for_each_online_cpu(cpu) {
++ if (topology_is_primary_thread(cpu))
++ continue;
++ ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
++ if (ret)
++ break;
++ /*
++ * As this needs to hold the cpu maps lock it's impossible
++ * to call device_offline() because that ends up calling
++ * cpu_down() which takes cpu maps lock. cpu maps lock
++ * needs to be held as this might race against in kernel
++ * abusers of the hotplug machinery (thermal management).
++ *
++ * So nothing would update device:offline state. That would
++ * leave the sysfs entry stale and prevent onlining after
++ * smt control has been changed to 'off' again. This is
++ * called under the sysfs hotplug lock, so it is properly
++ * serialized against the regular offline usage.
++ */
++ cpuhp_offline_cpu_device(cpu);
++ }
++ if (!ret)
++ cpu_smt_control = ctrlval;
++ cpu_maps_update_done();
++ return ret;
++}
++
++static void cpuhp_smt_enable(void)
++{
++ cpu_maps_update_begin();
++ cpu_smt_control = CPU_SMT_ENABLED;
++ cpu_maps_update_done();
++}
++
++static ssize_t
++store_smt_control(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ int ctrlval, ret;
++
++ if (sysfs_streq(buf, "on"))
++ ctrlval = CPU_SMT_ENABLED;
++ else if (sysfs_streq(buf, "off"))
++ ctrlval = CPU_SMT_DISABLED;
++ else if (sysfs_streq(buf, "forceoff"))
++ ctrlval = CPU_SMT_FORCE_DISABLED;
++ else
++ return -EINVAL;
++
++ if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
++ return -EPERM;
++
++ if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
++ return -ENODEV;
++
++ ret = lock_device_hotplug_sysfs();
++ if (ret)
++ return ret;
++
++ if (ctrlval != cpu_smt_control) {
++ switch (ctrlval) {
++ case CPU_SMT_ENABLED:
++ cpuhp_smt_enable();
++ break;
++ case CPU_SMT_DISABLED:
++ case CPU_SMT_FORCE_DISABLED:
++ ret = cpuhp_smt_disable(ctrlval);
++ break;
++ }
++ }
++
++ unlock_device_hotplug();
++ return ret ? ret : count;
++}
++static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
++
++static ssize_t
++show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ bool active = topology_max_smt_threads() > 1;
++
++ return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
++}
++static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
++
++static struct attribute *cpuhp_smt_attrs[] = {
++ &dev_attr_control.attr,
++ &dev_attr_active.attr,
++ NULL
++};
++
++static const struct attribute_group cpuhp_smt_attr_group = {
++ .attrs = cpuhp_smt_attrs,
++ .name = "smt",
++ NULL
++};
++
++static int __init cpu_smt_state_init(void)
++{
++ if (!topology_smt_supported())
++ cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
++
++ return sysfs_create_group(&cpu_subsys.dev_root->kobj,
++ &cpuhp_smt_attr_group);
++}
++
++#else
++static inline int cpu_smt_state_init(void) { return 0; }
++#endif
++
+ static int __init cpuhp_sysfs_init(void)
+ {
+ int cpu, ret;
+
++ ret = cpu_smt_state_init();
++ if (ret)
++ return ret;
++
+ ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
+ &cpuhp_cpu_root_attr_group);
+ if (ret)
diff --git a/patches.arch/07-x86-KVM-VMX-Add-find_msr-helper-function.patch b/patches.arch/07-x86-KVM-VMX-Add-find_msr-helper-function.patch
new file mode 100644
index 0000000000..ebf684b01c
--- /dev/null
+++ b/patches.arch/07-x86-KVM-VMX-Add-find_msr-helper-function.patch
@@ -0,0 +1,85 @@
+From 0b1d2e2aae1ab6c0980e099d18604445780cc8bd Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 20 Jun 2018 20:11:39 -0400
+Subject: [PATCH 37/40] x86/KVM/VMX: Add find_msr() helper function
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit ca83b4a7f2d068da79a029d323024aa45decb250 upstream
+
+.. to help find the MSR on either the guest or host MSR list.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 31 ++++++++++++++++++-------------
+ 1 file changed, 18 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2027,9 +2027,20 @@ static void clear_atomic_switch_msr_spec
+ vm_exit_controls_clearbit(vmx, exit);
+ }
+
++static int find_msr(struct vmx_msrs *m, unsigned int msr)
++{
++ unsigned int i;
++
++ for (i = 0; i < m->nr; ++i) {
++ if (m->val[i].index == msr)
++ return i;
++ }
++ return -ENOENT;
++}
++
+ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
+ {
+- unsigned i;
++ int i;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ switch (msr) {
+@@ -2050,11 +2061,8 @@ static void clear_atomic_switch_msr(stru
+ }
+ break;
+ }
+- for (i = 0; i < m->guest.nr; ++i)
+- if (m->guest.val[i].index == msr)
+- break;
+-
+- if (i == m->guest.nr)
++ i = find_msr(&m->guest, msr);
++ if (i < 0)
+ return;
+ --m->guest.nr;
+ --m->host.nr;
+@@ -2078,7 +2086,7 @@ static void add_atomic_switch_msr_specia
+ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+ u64 guest_val, u64 host_val)
+ {
+- unsigned i;
++ int i;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ switch (msr) {
+@@ -2113,16 +2121,13 @@ static void add_atomic_switch_msr(struct
+ wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
+ }
+
+- for (i = 0; i < m->guest.nr; ++i)
+- if (m->guest.val[i].index == msr)
+- break;
+-
++ i = find_msr(&m->guest, msr);
+ if (i == NR_AUTOLOAD_MSRS) {
+ printk_once(KERN_WARNING "Not enough msr switch entries. "
+ "Can't add msr %x\n", msr);
+ return;
+- } else if (i == m->guest.nr) {
+- ++m->guest.nr;
++ } else if (i < 0) {
++ i = m->guest.nr++;
+ ++m->host.nr;
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
diff --git a/patches.arch/08-x86-KVM-VMX-Separate-the-VMX-AUTOLOAD-guest-host-num.patch b/patches.arch/08-x86-KVM-VMX-Separate-the-VMX-AUTOLOAD-guest-host-num.patch
new file mode 100644
index 0000000000..b9f46b15d0
--- /dev/null
+++ b/patches.arch/08-x86-KVM-VMX-Separate-the-VMX-AUTOLOAD-guest-host-num.patch
@@ -0,0 +1,84 @@
+From e50b30378d2e96fc136bb545aa8b0769417422f5 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 20 Jun 2018 22:00:47 -0400
+Subject: [PATCH 38/40] x86/KVM/VMX: Separate the VMX AUTOLOAD guest/host
+ number accounting
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit 3190709335dd31fe1aeeebfe4ffb6c7624ef971f upstream
+
+This allows to load a different number of MSRs depending on the context:
+VMEXIT or VMENTER.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 29 +++++++++++++++++++----------
+ 1 file changed, 19 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2063,12 +2063,18 @@ static void clear_atomic_switch_msr(stru
+ }
+ i = find_msr(&m->guest, msr);
+ if (i < 0)
+- return;
++ goto skip_guest;
+ --m->guest.nr;
+- --m->host.nr;
+ m->guest.val[i] = m->guest.val[m->guest.nr];
+- m->host.val[i] = m->host.val[m->host.nr];
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
++
++skip_guest:
++ i = find_msr(&m->host, msr);
++ if (i < 0)
++ return;
++
++ --m->host.nr;
++ m->host.val[i] = m->host.val[m->host.nr];
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+ }
+
+@@ -2086,7 +2092,7 @@ static void add_atomic_switch_msr_specia
+ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+ u64 guest_val, u64 host_val)
+ {
+- int i;
++ int i, j;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ switch (msr) {
+@@ -2122,21 +2128,24 @@ static void add_atomic_switch_msr(struct
+ }
+
+ i = find_msr(&m->guest, msr);
+- if (i == NR_AUTOLOAD_MSRS) {
++ j = find_msr(&m->host, msr);
++ if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
+ printk_once(KERN_WARNING "Not enough msr switch entries. "
+ "Can't add msr %x\n", msr);
+ return;
+- } else if (i < 0) {
++ }
++ if (i < 0) {
+ i = m->guest.nr++;
+- ++m->host.nr;
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
++ }
++ if (j < 0) {
++ j = m->host.nr++;
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+ }
+-
+ m->guest.val[i].index = msr;
+ m->guest.val[i].value = guest_val;
+- m->host.val[i].index = msr;
+- m->host.val[i].value = host_val;
++ m->host.val[j].index = msr;
++ m->host.val[j].value = host_val;
+ }
+
+ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
diff --git a/patches.arch/08-x86-cpu-remove-the-pointless-cpu-printout.patch b/patches.arch/08-x86-cpu-remove-the-pointless-cpu-printout.patch
new file mode 100644
index 0000000000..06aa6a6ba1
--- /dev/null
+++ b/patches.arch/08-x86-cpu-remove-the-pointless-cpu-printout.patch
@@ -0,0 +1,99 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Jun 2018 00:36:15 +0200
+Subject: x86/cpu: Remove the pointless CPU printout
+Git-commit: 55e6d279abd92cfd7576bba031e7589be8475edb
+Patch-mainline: v4.18 or v4.18-rc3 (next release)
+References: bsc#1089343
+
+The value of this printout is dubious at best and there is no point in
+having it in two different places along with convoluted ways to reach it.
+
+Remove it completely.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/common.c | 20 +++++---------------
+ arch/x86/kernel/cpu/topology.c | 10 ----------
+ 2 files changed, 5 insertions(+), 25 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -619,13 +619,12 @@ void detect_ht(struct cpuinfo_x86 *c)
+ #ifdef CONFIG_SMP
+ u32 eax, ebx, ecx, edx;
+ int index_msb, core_bits;
+- static bool printed;
+
+ if (!cpu_has(c, X86_FEATURE_HT))
+ return;
+
+ if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
+- goto out;
++ return;
+
+ if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
+ return;
+@@ -634,14 +633,14 @@ void detect_ht(struct cpuinfo_x86 *c)
+
+ smp_num_siblings = (ebx & 0xff0000) >> 16;
+
++ if (!smp_num_siblings)
++ smp_num_siblings = 1;
++
+ if (smp_num_siblings == 1) {
+ pr_info_once("CPU0: Hyper-Threading is disabled\n");
+- goto out;
++ return;
+ }
+
+- if (smp_num_siblings <= 1)
+- goto out;
+-
+ index_msb = get_count_order(smp_num_siblings);
+ c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
+
+@@ -653,15 +652,6 @@ void detect_ht(struct cpuinfo_x86 *c)
+
+ c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
+ ((1 << core_bits) - 1);
+-
+-out:
+- if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
+- pr_info("CPU: Physical Processor ID: %d\n",
+- c->phys_proc_id);
+- pr_info("CPU: Processor Core ID: %d\n",
+- c->cpu_core_id);
+- printed = 1;
+- }
+ #endif
+ }
+
+--- a/arch/x86/kernel/cpu/topology.c
++++ b/arch/x86/kernel/cpu/topology.c
+@@ -32,7 +32,6 @@ void detect_extended_topology(struct cpu
+ unsigned int eax, ebx, ecx, edx, sub_index;
+ unsigned int ht_mask_width, core_plus_mask_width;
+ unsigned int core_select_mask, core_level_siblings;
+- static bool printed;
+
+ if (c->cpuid_level < 0xb)
+ return;
+@@ -85,15 +84,6 @@ void detect_extended_topology(struct cpu
+ c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
+
+ c->x86_max_cores = (core_level_siblings / smp_num_siblings);
+-
+- if (!printed) {
+- pr_info("CPU: Physical Processor ID: %d\n",
+- c->phys_proc_id);
+- if (c->x86_max_cores > 1)
+- pr_info("CPU: Processor Core ID: %d\n",
+- c->cpu_core_id);
+- printed = 1;
+- }
+ return;
+ #endif
+ }
diff --git a/patches.arch/09-x86-KVM-VMX-Extend-add_atomic_switch_msr-to-allow-VM.patch b/patches.arch/09-x86-KVM-VMX-Extend-add_atomic_switch_msr-to-allow-VM.patch
new file mode 100644
index 0000000000..f93b251ebe
--- /dev/null
+++ b/patches.arch/09-x86-KVM-VMX-Extend-add_atomic_switch_msr-to-allow-VM.patch
@@ -0,0 +1,92 @@
+From d2f78e25d473d98dd1703f1d07cdcdd3bb49dd24 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Wed, 20 Jun 2018 22:01:22 -0400
+Subject: [PATCH 39/40] x86/KVM/VMX: Extend add_atomic_switch_msr() to allow
+ VMENTER only MSRs
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit 989e3992d2eca32c3f1404f2bc91acda3aa122d8 upstream
+
+The IA32_FLUSH_CMD MSR needs only to be written on VMENTER. Extend
+add_atomic_switch_msr() with an entry_only parameter to allow storing the
+MSR only in the guest (ENTRY) MSR array.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 22 ++++++++++++++--------
+ 1 file changed, 14 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -2090,9 +2090,9 @@ static void add_atomic_switch_msr_specia
+ }
+
+ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+- u64 guest_val, u64 host_val)
++ u64 guest_val, u64 host_val, bool entry_only)
+ {
+- int i, j;
++ int i, j = 0;
+ struct msr_autoload *m = &vmx->msr_autoload;
+
+ switch (msr) {
+@@ -2128,7 +2128,9 @@ static void add_atomic_switch_msr(struct
+ }
+
+ i = find_msr(&m->guest, msr);
+- j = find_msr(&m->host, msr);
++ if (!entry_only)
++ j = find_msr(&m->host, msr);
++
+ if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
+ printk_once(KERN_WARNING "Not enough msr switch entries. "
+ "Can't add msr %x\n", msr);
+@@ -2138,12 +2140,16 @@ static void add_atomic_switch_msr(struct
+ i = m->guest.nr++;
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
+ }
++ m->guest.val[i].index = msr;
++ m->guest.val[i].value = guest_val;
++
++ if (entry_only)
++ return;
++
+ if (j < 0) {
+ j = m->host.nr++;
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
+ }
+- m->guest.val[i].index = msr;
+- m->guest.val[i].value = guest_val;
+ m->host.val[j].index = msr;
+ m->host.val[j].value = host_val;
+ }
+@@ -2189,7 +2195,7 @@ static bool update_transition_efer(struc
+ guest_efer &= ~EFER_LME;
+ if (guest_efer != host_efer)
+ add_atomic_switch_msr(vmx, MSR_EFER,
+- guest_efer, host_efer);
++ guest_efer, host_efer, false);
+ return false;
+ } else {
+ guest_efer &= ~ignore_bits;
+@@ -3594,7 +3600,7 @@ static int vmx_set_msr(struct kvm_vcpu *
+ vcpu->arch.ia32_xss = data;
+ if (vcpu->arch.ia32_xss != host_xss)
+ add_atomic_switch_msr(vmx, MSR_IA32_XSS,
+- vcpu->arch.ia32_xss, host_xss);
++ vcpu->arch.ia32_xss, host_xss, false);
+ else
+ clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
+ break;
+@@ -9521,7 +9527,7 @@ static void atomic_switch_perf_msrs(stru
+ clear_atomic_switch_msr(vmx, msrs[i].msr);
+ else
+ add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
+- msrs[i].host);
++ msrs[i].host, false);
+ }
+
+ static void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
diff --git a/patches.arch/09-x86-cpu-amd-remove-the-pointless-detect_ht-call.patch b/patches.arch/09-x86-cpu-amd-remove-the-pointless-detect_ht-call.patch
new file mode 100644
index 0000000000..dab64d31e4
--- /dev/null
+++ b/patches.arch/09-x86-cpu-amd-remove-the-pointless-detect_ht-call.patch
@@ -0,0 +1,31 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Jun 2018 00:47:10 +0200
+Subject: x86/cpu/AMD: Remove the pointless detect_ht() call
+Git-commit: 44ca36de56d1bf196dca2eb67cd753a46961ffe6
+Patch-mainline: v4.18 or v4.18-rc3 (next release)
+References: bsc#1089343
+
+Real 32bit AMD CPUs do not have SMT and the only value of the call was to
+reach the magic printout which got removed.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/amd.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -860,10 +860,6 @@ static void init_amd(struct cpuinfo_x86
+ srat_detect_node(c);
+ }
+
+-#ifdef CONFIG_X86_32
+- detect_ht(c);
+-#endif
+-
+ init_amd_cacheinfo(c);
+
+ if (c->x86 >= 0xf)
diff --git a/patches.arch/10-x86-KVM-VMX-Use-MSR-save-list-for-IA32_FLUSH_CMD-if-.patch b/patches.arch/10-x86-KVM-VMX-Use-MSR-save-list-for-IA32_FLUSH_CMD-if-.patch
new file mode 100644
index 0000000000..361aa7db82
--- /dev/null
+++ b/patches.arch/10-x86-KVM-VMX-Use-MSR-save-list-for-IA32_FLUSH_CMD-if-.patch
@@ -0,0 +1,93 @@
+From 3661874b1c4810195c070e05abe9b9504ead59e2 Mon Sep 17 00:00:00 2001
+From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Date: Thu, 28 Jun 2018 17:10:36 -0400
+Subject: [PATCH 40/40] x86/KVM/VMX: Use MSR save list for IA32_FLUSH_CMD if
+ required
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit 390d975e0c4e60ce70d4157e0dd91ede37824603 upstream
+
+If the L1D flush module parameter is set to 'always' and the IA32_FLUSH_CMD
+MSR is available, optimize the VMENTER code with the MSR save list.
+
+Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ arch/x86/kvm/vmx.c | 42 +++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 37 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -5716,6 +5716,16 @@ static void ept_set_mmio_spte_mask(void)
+ VMX_EPT_MISCONFIG_WX_VALUE);
+ }
+
++static bool vmx_l1d_use_msr_save_list(void)
++{
++ if (!enable_ept || !boot_cpu_has_bug(X86_BUG_L1TF) ||
++ static_cpu_has(X86_FEATURE_HYPERVISOR) ||
++ !static_cpu_has(X86_FEATURE_FLUSH_L1D))
++ return false;
++
++ return vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
++}
++
+ #define VMX_XSS_EXIT_BITMAP 0
+ /*
+ * Sets up the vmcs for emulated real mode.
+@@ -6063,6 +6073,12 @@ static void vmx_set_nmi_mask(struct kvm_
+ vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
+ }
++ /*
++ * If flushing the L1D cache on every VMENTER is enforced and the
++ * MSR is available, use the MSR save list.
++ */
++ if (vmx_l1d_use_msr_save_list())
++ add_atomic_switch_msr(vmx, MSR_IA32_FLUSH_CMD, L1D_FLUSH, 0, true);
+ }
+
+ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
+@@ -9086,11 +9102,26 @@ static void vmx_l1d_flush(struct kvm_vcp
+ bool always;
+
+ /*
+- * If the mitigation mode is 'flush always', keep the flush bit
+- * set, otherwise clear it. It gets set again either from
+- * vcpu_run() or from one of the unsafe VMEXIT handlers.
++ * This code is only executed when:
++ * - the flush mode is 'cond'
++ * - the flush mode is 'always' and the flush MSR is not
++ * available
++ *
++ * If the CPU has the flush MSR then clear the flush bit because
++ * 'always' mode is handled via the MSR save list.
++ *
++ * If the MSR is not avaibable then act depending on the mitigation
++ * mode: If 'flush always', keep the flush bit set, otherwise clear
++ * it.
++ *
++ * The flush bit gets set again either from vcpu_run() or from one
++ * of the unsafe VMEXIT handlers.
+ */
+- always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
++ if (static_cpu_has(X86_FEATURE_FLUSH_L1D))
++ always = false;
++ else
++ always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
++
+ vcpu->arch.l1tf_flush_l1d = always;
+
+ vcpu->stat.l1d_flush++;
+@@ -12548,7 +12579,8 @@ static int __init vmx_setup_l1d_flush(vo
+ struct page *page;
+
+ if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
+- !boot_cpu_has_bug(X86_BUG_L1TF))
++ !boot_cpu_has_bug(X86_BUG_L1TF) ||
++ vmx_l1d_use_msr_save_list())
+ return 0;
+
+ if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
diff --git a/patches.arch/10-x86-cpu-common-provide-detect_ht_early.patch b/patches.arch/10-x86-cpu-common-provide-detect_ht_early.patch
new file mode 100644
index 0000000000..4e9cbaf97f
--- /dev/null
+++ b/patches.arch/10-x86-cpu-common-provide-detect_ht_early.patch
@@ -0,0 +1,78 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Jun 2018 00:53:57 +0200
+Subject: x86/cpu/common: Provide detect_ht_early()
+Git-commit: 545401f4448a807b963ff17b575e0a393e68b523
+Patch-mainline: v4.18 or v4.18-rc3 (next release)
+References: bsc#1089343
+
+To support force disabling of SMT it's required to know the number of
+thread siblings early. detect_ht() cannot be called before the APIC driver
+is selected, so split out the part which initializes smp_num_siblings.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/common.c | 24 ++++++++++++++----------
+ arch/x86/kernel/cpu/cpu.h | 1 +
+ 2 files changed, 15 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -614,32 +614,36 @@ static void cpu_detect_tlb(struct cpuinf
+ tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
+ }
+
+-void detect_ht(struct cpuinfo_x86 *c)
++int detect_ht_early(struct cpuinfo_x86 *c)
+ {
+ #ifdef CONFIG_SMP
+ u32 eax, ebx, ecx, edx;
+- int index_msb, core_bits;
+
+ if (!cpu_has(c, X86_FEATURE_HT))
+- return;
++ return -1;
+
+ if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
+- return;
++ return -1;
+
+ if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
+- return;
++ return -1;
+
+ cpuid(1, &eax, &ebx, &ecx, &edx);
+
+ smp_num_siblings = (ebx & 0xff0000) >> 16;
++ if (smp_num_siblings == 1)
++ pr_info_once("CPU0: Hyper-Threading is disabled\n");
++#endif
++ return 0;
++}
+
+- if (!smp_num_siblings)
+- smp_num_siblings = 1;
++void detect_ht(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++ int index_msb, core_bits;
+
+- if (smp_num_siblings == 1) {
+- pr_info_once("CPU0: Hyper-Threading is disabled\n");
++ if (detect_ht_early(c) < 0)
+ return;
+- }
+
+ index_msb = get_count_order(smp_num_siblings);
+ c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
+--- a/arch/x86/kernel/cpu/cpu.h
++++ b/arch/x86/kernel/cpu/cpu.h
+@@ -47,5 +47,6 @@ extern const struct cpu_dev *const __x86
+ extern void get_cpu_cap(struct cpuinfo_x86 *c);
+ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
+ extern void x86_spec_ctrl_setup_ap(void);
++extern int detect_ht_early(struct cpuinfo_x86 *c);
+
+ #endif /* ARCH_X86_CPU_H */
diff --git a/patches.arch/11-x86-cpu-topology-provide-detect_extended_topology_early.patch b/patches.arch/11-x86-cpu-topology-provide-detect_extended_topology_early.patch
new file mode 100644
index 0000000000..c9347e5d98
--- /dev/null
+++ b/patches.arch/11-x86-cpu-topology-provide-detect_extended_topology_early.patch
@@ -0,0 +1,117 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Jun 2018 00:55:39 +0200
+Subject: x86/cpu/topology: Provide detect_extended_topology_early()
+Git-commit: 95f3d39ccf7aaea79d1ffdac1c887c2e100ec1b6
+Patch-mainline: v4.18 or v4.18-rc3 (next release)
+References: bsc#1089343
+
+To support force disabling of SMT it's required to know the number of
+thread siblings early. detect_extended_topology() cannot be called before
+the APIC driver is selected, so split out the part which initializes
+smp_num_siblings.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/include/asm/processor.h | 1 -
+ arch/x86/kernel/cpu/cpu.h | 2 ++
+ arch/x86/kernel/cpu/topology.c | 37 +++++++++++++++++++++++++------------
+ 3 files changed, 27 insertions(+), 13 deletions(-)
+
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -194,7 +194,6 @@ extern u32 get_scattered_cpuid_leaf(unsi
+ extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
+ extern void init_amd_cacheinfo(struct cpuinfo_x86 *c);
+
+-extern void detect_extended_topology(struct cpuinfo_x86 *c);
+ extern void detect_ht(struct cpuinfo_x86 *c);
+
+ #ifdef CONFIG_X86_32
+--- a/arch/x86/kernel/cpu/cpu.h
++++ b/arch/x86/kernel/cpu/cpu.h
+@@ -47,6 +47,8 @@ extern const struct cpu_dev *const __x86
+ extern void get_cpu_cap(struct cpuinfo_x86 *c);
+ extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
+ extern void x86_spec_ctrl_setup_ap(void);
++extern int detect_extended_topology_early(struct cpuinfo_x86 *c);
++extern int detect_extended_topology(struct cpuinfo_x86 *c);
+ extern int detect_ht_early(struct cpuinfo_x86 *c);
+
+ #endif /* ARCH_X86_CPU_H */
+--- a/arch/x86/kernel/cpu/topology.c
++++ b/arch/x86/kernel/cpu/topology.c
+@@ -21,20 +21,13 @@
+ #define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
+ #define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
+
+-/*
+- * Check for extended topology enumeration cpuid leaf 0xb and if it
+- * exists, use it for populating initial_apicid and cpu topology
+- * detection.
+- */
+-void detect_extended_topology(struct cpuinfo_x86 *c)
++int detect_extended_topology_early(struct cpuinfo_x86 *c)
+ {
+ #ifdef CONFIG_SMP
+- unsigned int eax, ebx, ecx, edx, sub_index;
+- unsigned int ht_mask_width, core_plus_mask_width;
+- unsigned int core_select_mask, core_level_siblings;
++ unsigned int eax, ebx, ecx, edx;
+
+ if (c->cpuid_level < 0xb)
+- return;
++ return -1;
+
+ cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
+
+@@ -42,7 +35,7 @@ void detect_extended_topology(struct cpu
+ * check if the cpuid leaf 0xb is actually implemented.
+ */
+ if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
+- return;
++ return -1;
+
+ set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
+
+@@ -50,10 +43,30 @@ void detect_extended_topology(struct cpu
+ * initial apic id, which also represents 32-bit extended x2apic id.
+ */
+ c->initial_apicid = edx;
++ smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
++#endif
++ return 0;
++}
++
++/*
++ * Check for extended topology enumeration cpuid leaf 0xb and if it
++ * exists, use it for populating initial_apicid and cpu topology
++ * detection.
++ */
++int detect_extended_topology(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++ unsigned int eax, ebx, ecx, edx, sub_index;
++ unsigned int ht_mask_width, core_plus_mask_width;
++ unsigned int core_select_mask, core_level_siblings;
++
++ if (detect_extended_topology_early(c) < 0)
++ return -1;
+
+ /*
+ * Populate HT related information from sub-leaf level 0.
+ */
++ cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
+ core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
+ core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
+
+@@ -84,6 +97,6 @@ void detect_extended_topology(struct cpu
+ c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
+
+ c->x86_max_cores = (core_level_siblings / smp_num_siblings);
+- return;
+ #endif
++ return 0;
+ }
diff --git a/patches.arch/12-x86-cpu-intel-evaluate-smp_num_siblings-early.patch b/patches.arch/12-x86-cpu-intel-evaluate-smp_num_siblings-early.patch
new file mode 100644
index 0000000000..15b699618d
--- /dev/null
+++ b/patches.arch/12-x86-cpu-intel-evaluate-smp_num_siblings-early.patch
@@ -0,0 +1,35 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Jun 2018 01:00:55 +0200
+Subject: x86/cpu/intel: Evaluate smp_num_siblings early
+Git-commit: 1910ad5624968f93be48e8e265513c54d66b897c
+Patch-mainline: v4.18 or v4.18-rc3 (next release)
+References: bsc#1089343
+
+Make use of the new early detection function to initialize smp_num_siblings
+on the boot cpu before the MP-Table or ACPI/MADT scan happens. That's
+required for force disabling SMT.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/intel.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -315,6 +315,13 @@ static void early_init_intel(struct cpui
+ }
+
+ check_mpx_erratum(c);
++
++ /*
++ * Get the number of SMT siblings early from the extended topology
++ * leaf, if available. Otherwise try the legacy SMT detection.
++ */
++ if (detect_extended_topology_early(c) < 0)
++ detect_ht_early(c);
+ }
+
+ #ifdef CONFIG_X86_32
diff --git a/patches.arch/13-x86-cpu-amd-do-not-check-cpuid-max-ext-level-before-parsing-smp-info.patch b/patches.arch/13-x86-cpu-amd-do-not-check-cpuid-max-ext-level-before-parsing-smp-info.patch
new file mode 100644
index 0000000000..ec63d6d9f4
--- /dev/null
+++ b/patches.arch/13-x86-cpu-amd-do-not-check-cpuid-max-ext-level-before-parsing-smp-info.patch
@@ -0,0 +1,36 @@
+From: Borislav Petkov <bp@suse.de>
+Date: Fri, 15 Jun 2018 20:48:39 +0200
+Subject: x86/CPU/AMD: Do not check CPUID max ext level before parsing SMP info
+Git-commit: 119bff8a9c9bb00116a844ec68be7bc4b1c768f5
+Patch-mainline: v4.18 or v4.18-rc3 (next release)
+References: bsc#1089343
+
+Old code used to check whether CPUID ext max level is >= 0x80000008 because
+that last leaf contains the number of cores of the physical CPU. The three
+functions called there now do not depend on that leaf anymore so the check
+can go.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+---
+ arch/x86/kernel/cpu/amd.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -854,12 +854,8 @@ static void init_amd(struct cpuinfo_x86
+
+ cpu_detect_cache_sizes(c);
+
+- /* Multi core CPU? */
+- if (c->extended_cpuid_level >= 0x80000008) {
+- amd_detect_cmp(c);
+- srat_detect_node(c);
+- }
+-
++ amd_detect_cmp(c);
++ srat_detect_node(c);
+ init_amd_cacheinfo(c);
+
+ if (c->x86 >= 0xf)
diff --git a/patches.arch/14-x86-cpu-amd-evaluate-smp_num_siblings-early.patch b/patches.arch/14-x86-cpu-amd-evaluate-smp_num_siblings-early.patch
new file mode 100644
index 0000000000..7a5a1f6cc2
--- /dev/null
+++ b/patches.arch/14-x86-cpu-amd-evaluate-smp_num_siblings-early.patch
@@ -0,0 +1,48 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 6 Jun 2018 00:57:38 +0200
+Subject: x86/cpu/AMD: Evaluate smp_num_siblings early
+Git-commit: 1e1d7e25fd759eddf96d8ab39d0a90a1979b2d8c
+Patch-mainline: v4.18 or v4.18-rc3 (next release)
+References: bsc#1089343
+
+To support force disabling of SMT it's required to know the number of
+thread siblings early. amd_get_topology() cannot be called before the APIC
+driver is selected, so split out the part which initializes
+smp_num_siblings and invoke it from amd_early_init().
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+---
+ arch/x86/kernel/cpu/amd.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -313,6 +313,17 @@ static void legacy_fixup_core_id(struct
+ c->cpu_core_id %= cus_per_node;
+ }
+
++
++static void amd_get_topology_early(struct cpuinfo_x86 *c)
++{
++ if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
++ u32 eax, ebx, ecx, edx;
++
++ cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
++ smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
++ }
++}
++
+ /*
+ * Fixup core topology information for
+ * (1) AMD multi-node processors
+@@ -693,6 +704,8 @@ static void early_init_amd(struct cpuinf
+ set_cpu_bug(c, X86_BUG_AMD_E400);
+
+ early_detect_mem_encrypt(c);
++
++ amd_get_topology_early(c);
+ }
+
+ static void init_amd_k8(struct cpuinfo_x86 *c)
diff --git a/patches.arch/16-x86-cpu-amd-move-topoext-reenablement-before-reading-smp_num_siblings.patch b/patches.arch/16-x86-cpu-amd-move-topoext-reenablement-before-reading-smp_num_siblings.patch
new file mode 100644
index 0000000000..494051f3ad
--- /dev/null
+++ b/patches.arch/16-x86-cpu-amd-move-topoext-reenablement-before-reading-smp_num_siblings.patch
@@ -0,0 +1,95 @@
+From: Borislav Petkov <bp@suse.de>
+Date: Fri, 22 Jun 2018 11:34:11 +0200
+Subject: x86/CPU/AMD: Move TOPOEXT reenablement before reading smp_num_siblings
+Git-commit: 7ce2f0393ea2396142b7faf6ee9b1f3676d08a5f
+Patch-mainline: v4.18 or v4.18-rc3 (next release)
+References: bsc#1089343
+
+The TOPOEXT reenablement is a workaround for broken BIOSen which didn't
+enable the CPUID bit. amd_get_topology_early(), however, relies on
+that bit being set so that it can read out the CPUID leaf and set
+smp_num_siblings properly.
+
+Move the reenablement up to early_init_amd(). While at it, simplify
+amd_get_topology_early().
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/kernel/cpu/amd.c | 37 +++++++++++++++++--------------------
+ 1 file changed, 17 insertions(+), 20 deletions(-)
+
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -316,12 +316,8 @@ static void legacy_fixup_core_id(struct
+
+ static void amd_get_topology_early(struct cpuinfo_x86 *c)
+ {
+- if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
+- u32 eax, ebx, ecx, edx;
+-
+- cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
+- smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
+- }
++ if (cpu_has(c, X86_FEATURE_TOPOEXT))
++ smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
+ }
+
+ /*
+@@ -343,7 +339,6 @@ static void amd_get_topology(struct cpui
+ cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
+
+ node_id = ecx & 0xff;
+- smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
+
+ if (c->x86 == 0x15)
+ c->cu_id = ebx & 0xff;
+@@ -634,6 +629,7 @@ clear_sev:
+
+ static void early_init_amd(struct cpuinfo_x86 *c)
+ {
++ u64 value;
+ u32 dummy;
+
+ early_init_amd_mc(c);
+@@ -705,6 +701,20 @@ static void early_init_amd(struct cpuinf
+
+ early_detect_mem_encrypt(c);
+
++ /* Re-enable TopologyExtensions if switched off by BIOS */
++ if (c->x86 == 0x15 &&
++ (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
++ !cpu_has(c, X86_FEATURE_TOPOEXT)) {
++
++ if (msr_set_bit(0xc0011005, 54) > 0) {
++ rdmsrl(0xc0011005, value);
++ if (value & BIT_64(54)) {
++ set_cpu_cap(c, X86_FEATURE_TOPOEXT);
++ pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
++ }
++ }
++ }
++
+ amd_get_topology_early(c);
+ }
+
+@@ -797,19 +807,6 @@ static void init_amd_bd(struct cpuinfo_x
+ {
+ u64 value;
+
+- /* re-enable TopologyExtensions if switched off by BIOS */
+- if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
+- !cpu_has(c, X86_FEATURE_TOPOEXT)) {
+-
+- if (msr_set_bit(0xc0011005, 54) > 0) {
+- rdmsrl(0xc0011005, value);
+- if (value & BIT_64(54)) {
+- set_cpu_cap(c, X86_FEATURE_TOPOEXT);
+- pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
+- }
+- }
+- }
+-
+ /*
+ * The way access filter has a performance penalty on some workloads.
+ * Disable it on the affected CPUs.
diff --git a/patches.arch/17-cpu-hotplug-Boot-HT-siblings-at-least-once.patch b/patches.arch/17-cpu-hotplug-Boot-HT-siblings-at-least-once.patch
new file mode 100644
index 0000000000..81913b3dec
--- /dev/null
+++ b/patches.arch/17-cpu-hotplug-Boot-HT-siblings-at-least-once.patch
@@ -0,0 +1,142 @@
+From 69a72a13d40494f12684450ec6cdb1e1450185e6 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 29 Jun 2018 16:05:48 +0200
+Subject: [PATCH 30/40] cpu/hotplug: Boot HT siblings at least once
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit 0cc3cd21657be04cb0559fe8063f2130493f92cf upstream
+
+Due to the way Machine Check Exceptions work on X86 hyperthreads it's
+required to boot up _all_ logical cores at least once in order to set the
+CR4.MCE bit.
+
+So instead of ignoring the sibling threads right away, let them boot up
+once so they can configure themselves. After they came out of the initial
+boot stage check whether its a "secondary" sibling and cancel the operation
+which puts the CPU back into offline state.
+
+Reported-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ kernel/cpu.c | 72 +++++++++++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 48 insertions(+), 24 deletions(-)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -56,6 +56,7 @@ struct cpuhp_cpu_state {
+ bool rollback;
+ bool single;
+ bool bringup;
++ bool booted_once;
+ struct hlist_node *node;
+ enum cpuhp_state cb_state;
+ int result;
+@@ -344,6 +345,40 @@ void cpu_hotplug_enable(void)
+ EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
+ #endif /* CONFIG_HOTPLUG_CPU */
+
++#ifdef CONFIG_HOTPLUG_SMT
++enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
++
++static int __init smt_cmdline_disable(char *str)
++{
++ cpu_smt_control = CPU_SMT_DISABLED;
++ if (str && !strcmp(str, "force")) {
++ pr_info("SMT: Force disabled\n");
++ cpu_smt_control = CPU_SMT_FORCE_DISABLED;
++ }
++ return 0;
++}
++early_param("nosmt", smt_cmdline_disable);
++
++static inline bool cpu_smt_allowed(unsigned int cpu)
++{
++ if (cpu_smt_control == CPU_SMT_ENABLED)
++ return true;
++
++ if (topology_is_primary_thread(cpu))
++ return true;
++
++ /*
++ * On x86 it's required to boot all logical CPUs at least once so
++ * that the init code can get a chance to set CR4.MCE on each
++ * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
++ * core will shutdown the machine.
++ */
++ return !per_cpu(cpuhp_state, cpu).booted_once;
++}
++#else
++static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
++#endif
++
+ /* Notifier wrappers for transitioning to state machine */
+
+ static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st);
+@@ -361,6 +396,16 @@ static int bringup_wait_for_ap(unsigned
+ stop_machine_unpark(cpu);
+ kthread_unpark(st->thread);
+
++ /*
++ * SMT soft disabling on X86 requires to bring the CPU out of the
++ * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
++ * CPU marked itself as booted_once in cpu_notify_starting() so the
++ * cpu_smt_allowed() check will now return false if this is not the
++ * primary sibling.
++ */
++ if (!cpu_smt_allowed(cpu))
++ return -ECANCELED;
++
+ /* Should we go further up ? */
+ if (st->target > CPUHP_AP_ONLINE_IDLE) {
+ __cpuhp_kick_ap_work(st);
+@@ -826,29 +871,6 @@ int cpu_down(unsigned int cpu)
+ EXPORT_SYMBOL(cpu_down);
+ #endif /*CONFIG_HOTPLUG_CPU*/
+
+-#ifdef CONFIG_HOTPLUG_SMT
+-enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
+-
+-static int __init smt_cmdline_disable(char *str)
+-{
+- cpu_smt_control = CPU_SMT_DISABLED;
+- if (str && !strcmp(str, "force")) {
+- pr_info("SMT: Force disabled\n");
+- cpu_smt_control = CPU_SMT_FORCE_DISABLED;
+- }
+- return 0;
+-}
+-early_param("nosmt", smt_cmdline_disable);
+-
+-static inline bool cpu_smt_allowed(unsigned int cpu)
+-{
+- return cpu_smt_control == CPU_SMT_ENABLED ||
+- topology_is_primary_thread(cpu);
+-}
+-#else
+-static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
+-#endif
+-
+ /**
+ * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
+ * @cpu: cpu that just started
+@@ -862,6 +884,7 @@ void notify_cpu_starting(unsigned int cp
+ enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
+
+ rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
++ st->booted_once = true;
+ while (st->state < target) {
+ st->state++;
+ cpuhp_invoke_callback(cpu, st->state, true, NULL);
+@@ -1984,7 +2007,8 @@ void __init boot_cpu_init(void)
+ */
+ void __init boot_cpu_hotplug_init(void)
+ {
+- per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
++ this_cpu_write(cpuhp_state.booted_once, true);
++ this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
+ }
+
+ /* kabi */
diff --git a/patches.arch/18-cpu-hotplug-Online-siblings-when-SMT-control-is-turn.patch b/patches.arch/18-cpu-hotplug-Online-siblings-when-SMT-control-is-turn.patch
new file mode 100644
index 0000000000..ca6a3f693c
--- /dev/null
+++ b/patches.arch/18-cpu-hotplug-Online-siblings-when-SMT-control-is-turn.patch
@@ -0,0 +1,77 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Subject: [PATCH] cpu/hotplug: Online siblings when SMT control is turned on
+Patch-mainline: not yet, under discussion
+References: bsc#1089343 CVE-2018-3646
+
+commit 215af5499d9e2b55f111d2431ea20218115f29b3 upstream
+
+Writing 'off' to /sys/devices/system/cpu/smt/control offlines all SMT
+siblings. Writing 'on' merily enables the abilify to online them, but does
+not online them automatically.
+
+Make 'on' more useful by onlining all offline siblings.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+---
+ kernel/cpu.c | 26 ++++++++++++++++++++++++--
+ 1 file changed, 24 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 5a00ebdf98c6..d79e24df2420 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1979,6 +1979,15 @@ static void cpuhp_offline_cpu_device(unsigned int cpu)
+ kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
+ }
+
++static void cpuhp_online_cpu_device(unsigned int cpu)
++{
++ struct device *dev = get_cpu_device(cpu);
++
++ dev->offline = false;
++ /* Tell user space about the state change */
++ kobject_uevent(&dev->kobj, KOBJ_ONLINE);
++}
++
+ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+ {
+ int cpu, ret = 0;
+@@ -2011,11 +2020,24 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+ return ret;
+ }
+
+-static void cpuhp_smt_enable(void)
++static int cpuhp_smt_enable(void)
+ {
++ int cpu, ret = 0;
++
+ cpu_maps_update_begin();
+ cpu_smt_control = CPU_SMT_ENABLED;
++ for_each_present_cpu(cpu) {
++ /* Skip online CPUs and CPUs on offline nodes */
++ if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
++ continue;
++ ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
++ if (ret)
++ break;
++ /* See comment in cpuhp_smt_disable() */
++ cpuhp_online_cpu_device(cpu);
++ }
+ cpu_maps_update_done();
++ return ret;
+ }
+
+ static ssize_t
+@@ -2046,7 +2068,7 @@ store_smt_control(struct device *dev, struct device_attribute *attr,
+ if (ctrlval != cpu_smt_control) {
+ switch (ctrlval) {
+ case CPU_SMT_ENABLED:
+- cpuhp_smt_enable();
++ ret = cpuhp_smt_enable();
+ break;
+ case CPU_SMT_DISABLED:
+ case CPU_SMT_FORCE_DISABLED:
+--
+2.12.3
+
diff --git a/patches.arch/x86-l1tf-01-increase-32bitPAE-__PHYSICAL_PAGE_MASK.patch b/patches.arch/x86-l1tf-01-increase-32bitPAE-__PHYSICAL_PAGE_MASK.patch
new file mode 100644
index 0000000000..71a53daec6
--- /dev/null
+++ b/patches.arch/x86-l1tf-01-increase-32bitPAE-__PHYSICAL_PAGE_MASK.patch
@@ -0,0 +1,72 @@
+From: Andi Kleen <ak@linux.intel.com>
+Date: Thu, 3 May 2018 08:35:42 -0700
+Subject: [PATCH 1/8] x86, l1tf: Increase 32bit PAE __PHYSICAL_PAGE_MASK
+Patch-mainline: not yet (under discussion)
+References: bnc#1087081, CVE-2018-3620
+
+We need to protect memory inside the guest's memory against L1TF
+by inverting the right bits to point to non existing memory.
+
+The hypervisor should already protect itself against the guest by flushing
+the caches as needed, but pages inside the guest are not protected against
+attacks from other processes in that guest.
+
+Our inverted PTE mask has to match the host to provide the full
+protection for all pages the host could possibly map into our guest.
+The host is likely 64bit and may use more than 43 bits of
+memory. We want to set all possible bits to be safe here.
+
+On 32bit PAE the max PTE mask is currently set to 44 bit because that is
+the limit imposed by 32bit unsigned long PFNs in the VMs. This limits
+the mask to be below what the host could possible use for physical
+pages.
+
+The L1TF PROT_NONE protection code uses the PTE masks to determine
+what bits to invert to make sure the higher bits are set for unmapped
+entries to prevent L1TF speculation attacks against EPT inside guests.
+
+We want to invert all bits that could be used by the host.
+
+So increase the mask on 32bit PAE to 52 to match 64bit.
+
+The real limit for a 32bit OS is still 44 bits.
+
+All Linux PTEs are created from unsigned long PFNs, so cannot be
+higher than 44 bits on a 32bit kernel. So these extra PFN
+bits should be never set. The only users of this macro are using
+it to look at PTEs, so it's safe.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-By: Dave Hansen <dave.hansen@intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+
+---
+
+v2: Improve commit message.
+---
+ arch/x86/include/asm/page_32_types.h | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
+index aa30c3241ea7..0d5c739eebd7 100644
+--- a/arch/x86/include/asm/page_32_types.h
++++ b/arch/x86/include/asm/page_32_types.h
+@@ -29,8 +29,13 @@
+ #define N_EXCEPTION_STACKS 1
+
+ #ifdef CONFIG_X86_PAE
+-/* 44=32+12, the limit we can fit into an unsigned long pfn */
+-#define __PHYSICAL_MASK_SHIFT 44
++/*
++ * This is beyond the 44 bit limit imposed by the 32bit long pfns,
++ * but we need the full mask to make sure inverted PROT_NONE
++ * entries have all the host bits set in a guest.
++ * The real limit is still 44 bits.
++ */
++#define __PHYSICAL_MASK_SHIFT 52
+ #define __VIRTUAL_MASK_SHIFT 32
+
+ #else /* !CONFIG_X86_PAE */
+--
+2.14.4
diff --git a/patches.arch/x86-l1tf-02-change-order-of-offset-type.patch b/patches.arch/x86-l1tf-02-change-order-of-offset-type.patch
new file mode 100644
index 0000000000..1d5fe1d2ca
--- /dev/null
+++ b/patches.arch/x86-l1tf-02-change-order-of-offset-type.patch
@@ -0,0 +1,89 @@
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Fri, 27 Apr 2018 09:06:34 -0700
+Subject: [PATCH 2/8] x86/speculation/l1tf: Change order of offset/type in swap
+ entry
+Patch-mainline: not yet (under discussion)
+References: bnc#1087081, CVE-2018-3620
+
+Here's a patch that switches the order of "type" and
+"offset" in the x86-64 encoding in preparation of the next
+patch which inverts the swap entry to protect against L1TF.
+
+That means that now the offset is bits 9-58 in the page table, and that
+the type is in the bits that hardware generally doesn't care about.
+
+That, in turn, means that if you have a desktop chip with only 40 bits of
+physical addressing, now that the offset starts at bit 9, you still have
+to have 30 bits of offset actually *in use* until bit 39 ends up being
+clear.
+
+So that's 4 terabyte of swap space (because the offset is counted in
+pages, so 30 bits of offset is 42 bits of actual coverage). With bigger
+physical addressing, that obviously grows further, until you hit the limit
+of the offset (at 50 bits of offset - 62 bits of actual swap file
+coverage).
+
+[updated description and minor tweaks by AK]
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Tested-by: Andi Kleen <ak@linux.intel.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Acked-by: Dave Hansen <dave.hansen@intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+
+---
+ arch/x86/include/asm/pgtable_64.h | 31 ++++++++++++++++++++-----------
+ 1 file changed, 20 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -274,7 +274,7 @@ static inline int pgd_large(pgd_t pgd) {
+ *
+ * | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0| <- bit number
+ * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names
+- * | OFFSET (14->63) | TYPE (9-13) |0|X|X|X| X| X|X|X|0| <- swp entry
++ * | TYPE (59-63) | OFFSET (9-58) |0|X|X|X| X| X|X|SD|0| <- swp entry
+ *
+ * G (8) is aliased and used as a PROT_NONE indicator for
+ * !present ptes. We need to start storing swap entries above
+@@ -282,19 +282,28 @@ static inline int pgd_large(pgd_t pgd) {
+ * erratum where they can be incorrectly set by hardware on
+ * non-present PTEs.
+ */
+-#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
+-#define SWP_TYPE_BITS 5
+-/* Place the offset above the type: */
+-#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS)
++#define SWP_TYPE_BITS 5
++
++#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
++
++/* We always extract/encode the offset by shifting it all the way up, and then down again */
++#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
+
+ #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
+
+-#define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \
+- & ((1U << SWP_TYPE_BITS) - 1))
+-#define __swp_offset(x) ((x).val >> SWP_OFFSET_FIRST_BIT)
+-#define __swp_entry(type, offset) ((swp_entry_t) { \
+- ((type) << (SWP_TYPE_FIRST_BIT)) \
+- | ((offset) << SWP_OFFSET_FIRST_BIT) })
++/* Extract the high bits for type */
++#define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
++
++/* Shift up (to get rid of type), then down to get value */
++#define __swp_offset(x) ((x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
++
++/*
++ * Shift the offset up "too far" by TYPE bits, then down again
++ */
++#define __swp_entry(type, offset) ((swp_entry_t) { \
++ ((unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
++ | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
++
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
+ #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
+
diff --git a/patches.arch/x86-l1tf-03-protect-swap-entries.patch b/patches.arch/x86-l1tf-03-protect-swap-entries.patch
new file mode 100644
index 0000000000..3ae6bcd01b
--- /dev/null
+++ b/patches.arch/x86-l1tf-03-protect-swap-entries.patch
@@ -0,0 +1,78 @@
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Fri, 27 Apr 2018 09:06:34 -0700
+Subject: [PATCH 2/8] x86, l1tf: Protect swap entries against L1TF
+Patch-mainline: not yet (under discussion)
+References: bnc#1087081, CVE-2018-3620
+
+With L1 terminal fault the CPU speculates into unmapped PTEs, and
+resulting side effects allow to read the memory the PTE is pointing
+too, if its values are still in the L1 cache.
+
+For swapped out pages Linux uses unmapped PTEs and stores a swap entry
+into them.
+
+We need to make sure the swap entry is not pointing to valid memory,
+which requires setting higher bits (between bit 36 and bit 45) that
+are inside the CPUs physical address space, but outside any real
+memory.
+
+To do this we invert the offset to make sure the higher bits are always
+set, as long as the swap file is not too big.
+
+Note there is no workaround for 32bit !PAE, or on systems which
+have more than MAX_PA/2 worth of memory. The later case is very unlikely
+to happen on real systems.
+
+[updated description and minor tweaks by AK]
+
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Tested-by: Andi Kleen <ak@linux.intel.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Acked-By: Dave Hansen <dave.hansen@intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+---
+v2: Split out patch that swaps fields.
+---
+ arch/x86/include/asm/pgtable_64.h | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -274,13 +274,16 @@ static inline int pgd_large(pgd_t pgd) {
+ *
+ * | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0| <- bit number
+ * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names
+- * | TYPE (59-63) | OFFSET (9-58) |0|X|X|X| X| X|X|SD|0| <- swp entry
++ * | TYPE (59-63) | ~OFFSET (9-58) |0|X|X|X| X| X|X|SD|0| <- swp entry
+ *
+ * G (8) is aliased and used as a PROT_NONE indicator for
+ * !present ptes. We need to start storing swap entries above
+ * there. We also need to avoid using A and D because of an
+ * erratum where they can be incorrectly set by hardware on
+ * non-present PTEs.
++ *
++ * The offset is inverted by a binary not operation to make the high
++ * physical bits set.
+ */
+ #define SWP_TYPE_BITS 5
+
+@@ -295,13 +298,15 @@ static inline int pgd_large(pgd_t pgd) {
+ #define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
+
+ /* Shift up (to get rid of type), then down to get value */
+-#define __swp_offset(x) ((x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
++#define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
+
+ /*
+ * Shift the offset up "too far" by TYPE bits, then down again
++ * The offset is inverted by a binary not operation to make the high
++ * physical bits set.
+ */
+ #define __swp_entry(type, offset) ((swp_entry_t) { \
+- ((unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
++ (~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
+ | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
+
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
diff --git a/patches.arch/x86-l1tf-04-protect-PROT_NONE-ptes.patch b/patches.arch/x86-l1tf-04-protect-PROT_NONE-ptes.patch
new file mode 100644
index 0000000000..5c723b2295
--- /dev/null
+++ b/patches.arch/x86-l1tf-04-protect-PROT_NONE-ptes.patch
@@ -0,0 +1,254 @@
+From 73a8594bdc5d88bdb125e458a4147669b8ff1cd1 Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Fri, 27 Apr 2018 09:47:37 -0700
+Subject: [PATCH 3/8] x86, l1tf: Protect PROT_NONE PTEs against speculation
+Patch-mainline: not yet (under discussion)
+References: bnc#1087081, CVE-2018-3620
+
+We also need to protect PTEs that are set to PROT_NONE against
+L1TF speculation attacks.
+
+This is important inside guests, because L1TF speculation
+bypasses physical page remapping. While the VM has its own
+migitations preventing leaking data from other VMs into
+the guest, this would still risk leaking the wrong page
+inside the current guest.
+
+This uses the same technique as Linus' swap entry patch:
+while an entry is is in PROTNONE state we invert the
+complete PFN part part of it. This ensures that the
+the highest bit will point to non existing memory.
+
+The invert is done by pte/pmd_modify and pfn/pmd/pud_pte for
+PROTNONE and pte/pmd/pud_pfn undo it.
+
+We assume that noone tries to touch the PFN part of
+a PTE without using these primitives.
+
+This doesn't handle the case that MMIO is on the top
+of the CPU physical memory. If such an MMIO region
+was exposed by an unpriviledged driver for mmap
+it would be possible to attack some real memory.
+However this situation is all rather unlikely.
+
+For 32bit non PAE we don't try inversion because
+there are really not enough bits to protect anything.
+
+Q: Why does the guest need to be protected when the
+HyperVisor already has L1TF mitigations?
+A: Here's an example:
+You have physical pages 1 2. They get mapped into a guest as
+GPA 1 -> PA 2
+GPA 2 -> PA 1
+through EPT.
+
+The L1TF speculation ignores the EPT remapping.
+
+Now the guest kernel maps GPA 1 to process A and GPA 2 to process B,
+and they belong to different users and should be isolated.
+
+A sets the GPA 1 PA 2 PTE to PROT_NONE to bypass the EPT remapping
+and gets read access to the underlying physical page. Which
+in this case points to PA 2, so it can read process B's data,
+if it happened to be in L1.
+
+So we broke isolation inside the guest.
+
+There's nothing the hypervisor can do about this. This
+mitigation has to be done in the guest.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Acked-By: Dave Hansen <dave.hansen@intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+
+---
+v2: Use new helper to generate XOR mask to invert (Linus)
+v3: Use inline helper for protnone mask checking
+v4: Use inline helpers to check for PROT_NONE changes
+---
+ arch/x86/include/asm/pgtable-2level.h | 17 +++++++++++++
+ arch/x86/include/asm/pgtable-3level.h | 2 +
+ arch/x86/include/asm/pgtable-invert.h | 32 ++++++++++++++++++++++++
+ arch/x86/include/asm/pgtable.h | 44 +++++++++++++++++++++++-----------
+ arch/x86/include/asm/pgtable_64.h | 2 +
+ 5 files changed, 84 insertions(+), 13 deletions(-)
+ create mode 100644 arch/x86/include/asm/pgtable-invert.h
+
+--- a/arch/x86/include/asm/pgtable-2level.h
++++ b/arch/x86/include/asm/pgtable-2level.h
+@@ -94,4 +94,21 @@ static inline unsigned long pte_bitop(un
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
+ #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
+
++/* No inverted PFNs on 2 level page tables */
++
++static inline u64 protnone_mask(u64 val)
++{
++ return 0;
++}
++
++static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
++{
++ return val;
++}
++
++static inline bool __pte_needs_invert(u64 val)
++{
++ return false;
++}
++
+ #endif /* _ASM_X86_PGTABLE_2LEVEL_H */
+--- a/arch/x86/include/asm/pgtable-3level.h
++++ b/arch/x86/include/asm/pgtable-3level.h
+@@ -212,4 +212,6 @@ static inline pud_t native_pudp_get_and_
+ #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
+ #define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
+
++#include <asm/pgtable-invert.h>
++
+ #endif /* _ASM_X86_PGTABLE_3LEVEL_H */
+--- /dev/null
++++ b/arch/x86/include/asm/pgtable-invert.h
+@@ -0,0 +1,32 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_PGTABLE_INVERT_H
++#define _ASM_PGTABLE_INVERT_H 1
++
++#ifndef __ASSEMBLY__
++
++static inline bool __pte_needs_invert(u64 val)
++{
++ return (val & (_PAGE_PRESENT|_PAGE_PROTNONE)) == _PAGE_PROTNONE;
++}
++
++/* Get a mask to xor with the page table entry to get the correct pfn. */
++static inline u64 protnone_mask(u64 val)
++{
++ return __pte_needs_invert(val) ? ~0ull : 0;
++}
++
++static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
++{
++ /*
++ * When a PTE transitions from NONE to !NONE or vice-versa
++ * invert the PFN part to stop speculation.
++ * pte_pfn undoes this when needed.
++ */
++ if (__pte_needs_invert(oldval) != __pte_needs_invert(val))
++ val = (val & ~mask) | (~val & mask);
++ return val;
++}
++
++#endif /* __ASSEMBLY__ */
++
++#endif
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -188,19 +188,29 @@ static inline int pte_special(pte_t pte)
+ return pte_flags(pte) & _PAGE_SPECIAL;
+ }
+
++/* Entries that were set to PROT_NONE are inverted */
++
++static inline u64 protnone_mask(u64 val);
++
+ static inline unsigned long pte_pfn(pte_t pte)
+ {
+- return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
++ unsigned long pfn = pte_val(pte);
++ pfn ^= protnone_mask(pfn);
++ return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
+ }
+
+ static inline unsigned long pmd_pfn(pmd_t pmd)
+ {
+- return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
++ unsigned long pfn = pmd_val(pmd);
++ pfn ^= protnone_mask(pfn);
++ return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
+ }
+
+ static inline unsigned long pud_pfn(pud_t pud)
+ {
+- return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
++ unsigned long pfn = pud_val(pud);
++ pfn ^= protnone_mask(pfn);
++ return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
+ }
+
+ static inline unsigned long p4d_pfn(p4d_t p4d)
+@@ -526,25 +536,33 @@ static inline pgprotval_t massage_pgprot
+
+ static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
+ {
+- return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
+- massage_pgprot(pgprot));
++ phys_addr_t pfn = page_nr << PAGE_SHIFT;
++ pfn ^= protnone_mask(pgprot_val(pgprot));
++ pfn &= PTE_PFN_MASK;
++ return __pte(pfn | massage_pgprot(pgprot));
+ }
+
+ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
+ {
+- return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
+- massage_pgprot(pgprot));
++ phys_addr_t pfn = page_nr << PAGE_SHIFT;
++ pfn ^= protnone_mask(pgprot_val(pgprot));
++ pfn &= PHYSICAL_PMD_PAGE_MASK;
++ return __pmd(pfn | massage_pgprot(pgprot));
+ }
+
+ static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
+ {
+- return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) |
+- massage_pgprot(pgprot));
++ phys_addr_t pfn = page_nr << PAGE_SHIFT;
++ pfn ^= protnone_mask(pgprot_val(pgprot));
++ pfn &= PHYSICAL_PUD_PAGE_MASK;
++ return __pud(pfn | massage_pgprot(pgprot));
+ }
+
++static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
++
+ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+ {
+- pteval_t val = pte_val(pte);
++ pteval_t val = pte_val(pte), oldval = val;
+
+ /*
+ * Chop off the NX bit (if present), and add the NX portion of
+@@ -552,17 +570,17 @@ static inline pte_t pte_modify(pte_t pte
+ */
+ val &= _PAGE_CHG_MASK;
+ val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
+-
++ val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
+ return __pte(val);
+ }
+
+ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+ {
+- pmdval_t val = pmd_val(pmd);
++ pmdval_t val = pmd_val(pmd), oldval = val;
+
+ val &= _HPAGE_CHG_MASK;
+ val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
+-
++ val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
+ return __pmd(val);
+ }
+
+--- a/arch/x86/include/asm/pgtable_64.h
++++ b/arch/x86/include/asm/pgtable_64.h
+@@ -335,6 +335,8 @@ extern void cleanup_highmap(void);
+ extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
+ extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
+
++#include <asm/pgtable-invert.h>
++
+ #endif /* !__ASSEMBLY__ */
+
+ #endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/patches.arch/x86-l1tf-05-make-sure-first-pages-is-reserved.patch b/patches.arch/x86-l1tf-05-make-sure-first-pages-is-reserved.patch
new file mode 100644
index 0000000000..507e3fc447
--- /dev/null
+++ b/patches.arch/x86-l1tf-05-make-sure-first-pages-is-reserved.patch
@@ -0,0 +1,46 @@
+From 17df1843b8d59783742f2c0becad3eb9f275b76a Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Mon, 23 Apr 2018 15:57:54 -0700
+Subject: [PATCH 4/8] x86, l1tf: Make sure the first page is always reserved
+Patch-mainline: not yet (under discussion)
+References: bnc#1087081, CVE-2018-3620
+
+The L1TF workaround doesn't make any attempt to mitigate speculate
+accesses to the first physical page for zeroed PTEs. Normally
+it only contains some data from the early real mode BIOS.
+
+I couldn't convince myself we always reserve the first page in
+all configurations, so add an extra reservation call to
+make sure it is really reserved. In most configurations (e.g.
+with the standard reservations) it's likely a nop.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Acked-By: Dave Hansen <dave.hansen@intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+
+---
+v2: improve comment
+---
+ arch/x86/kernel/setup.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index 5c623dfe39d1..89fd35349412 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -823,6 +823,12 @@ void __init setup_arch(char **cmdline_p)
+ memblock_reserve(__pa_symbol(_text),
+ (unsigned long)__bss_stop - (unsigned long)_text);
+
++ /*
++ * Make sure page 0 is always reserved because on systems with
++ * L1TF its contents can be leaked to user processes.
++ */
++ memblock_reserve(0, PAGE_SIZE);
++
+ early_reserve_initrd();
+
+ /*
+--
+2.14.4
diff --git a/patches.arch/x86-l1tf-06-add-sysfs-report.patch b/patches.arch/x86-l1tf-06-add-sysfs-report.patch
new file mode 100644
index 0000000000..057526863a
--- /dev/null
+++ b/patches.arch/x86-l1tf-06-add-sysfs-report.patch
@@ -0,0 +1,230 @@
+From 8865a468fa92e1e507b820f74e8d051c50ef49dc Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Fri, 27 Apr 2018 14:44:53 -0700
+Subject: [PATCH 5/8] x86, l1tf: Add sysfs report for l1tf
+Patch-mainline: not yet (under discussion)
+References: bnc#1087081, CVE-2018-3620
+
+L1TF core kernel workarounds are cheap and normally always enabled,
+However we still want to report in sysfs if the system is vulnerable
+or mitigated. Add the necessary checks.
+
+- We extend the existing checks for Meltdowns to determine if the system is
+vulnerable. This excludes some Atom CPUs which don't have this
+problem.
+- We check for 32bit non PAE and warn
+- If the system has more than MAX_PA/2 physical memory the
+invert page workarounds don't protect the system against
+the L1TF attack anymore, because an inverted physical address
+will point to valid memory. Print a warning in this case
+and report that the system is vulnerable.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Acked-By: Dave Hansen <dave.hansen@intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+
+---
+v2: Use positive instead of negative flag for WA. Fix override
+reporting.
+v3: Fix L1TF_WA flag settting
+v4: Rebase to SSB tree
+v5: Minor cleanups. No functional changes.
+Don't mark atoms and knights as vulnerable
+v6: Change _WA to _FIX
+v7: Use common sysfs function
+v8: Improve commit message
+Move mitigation check into check_bugs.
+Integrate memory size checking into this patch
+White space changes. Move l1tf_pfn_limit here.
+---
+ arch/x86/include/asm/cpufeatures.h | 2 +
+ arch/x86/include/asm/processor.h | 5 ++++
+ arch/x86/kernel/cpu/bugs.c | 40 +++++++++++++++++++++++++++++++++++++
+ arch/x86/kernel/cpu/common.c | 20 ++++++++++++++++++
+ drivers/base/cpu.c | 8 +++++++
+ include/linux/cpu.h | 2 +
+ 6 files changed, 77 insertions(+)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -221,6 +221,7 @@
+ #define X86_FEATURE_IBPB ( 7*32+27) /* Indirect Branch Prediction Barrier */
+ #define X86_FEATURE_STIBP ( 7*32+28) /* Single Thread Indirect Branch Predictors */
+ #define X86_FEATURE_ZEN ( 7*32+29) /* "" CPU is AMD family 0x17 (Zen) */
++#define X86_FEATURE_L1TF_FIX ( 7*32+30) /* "" L1TF workaround used */
+
+ /* Virtualization flags: Linux defined, word 8 */
+ #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+@@ -369,4 +370,5 @@
+ #define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
+ #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
+ #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
++#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
+ #endif /* _ASM_X86_CPUFEATURES_H */
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -182,6 +182,11 @@ extern const struct seq_operations cpuin
+
+ extern void cpu_detect(struct cpuinfo_x86 *c);
+
++static inline unsigned long l1tf_pfn_limit(void)
++{
++ return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
++}
++
+ extern void early_cpu_init(void);
+ extern void identify_boot_cpu(void);
+ extern void identify_secondary_cpu(struct cpuinfo_x86 *);
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -27,9 +27,11 @@
+ #include <asm/set_memory.h>
+ #include <asm/intel-family.h>
+ #include <asm/hypervisor.h>
++#include <asm/e820/api.h>
+
+ static void __init spectre_v2_select_mitigation(void);
+ static void __init ssb_select_mitigation(void);
++static void __init l1tf_select_mitigation(void);
+
+ /*
+ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
+@@ -81,6 +83,8 @@ void __init check_bugs(void)
+ */
+ ssb_select_mitigation();
+
++ l1tf_select_mitigation();
++
+ #ifdef CONFIG_X86_32
+ /*
+ * Check whether we are able to run this kernel safely on SMP.
+@@ -207,6 +211,32 @@ static void x86_amd_ssb_disable(void)
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
+ }
+
++static void __init l1tf_select_mitigation(void)
++{
++ u64 half_pa;
++
++ if (!boot_cpu_has_bug(X86_BUG_L1TF))
++ return;
++
++#if CONFIG_PGTABLE_LEVELS == 2
++ pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
++ return;
++#endif
++
++ /*
++ * This is extremely unlikely to happen because almost all
++ * systems have far more MAX_PA/2 than RAM can be fit into
++ * DIMM slots.
++ */
++ half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
++ if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
++ pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
++ return;
++ }
++
++ setup_force_cpu_cap(X86_FEATURE_L1TF_FIX);
++}
++
+ #ifdef RETPOLINE
+ static bool spectre_v2_bad_module;
+
+@@ -704,6 +734,11 @@ static ssize_t cpu_show_common(struct de
+ case X86_BUG_SPEC_STORE_BYPASS:
+ return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
+
++ case X86_BUG_L1TF:
++ if (boot_cpu_has(X86_FEATURE_L1TF_FIX))
++ return sprintf(buf, "Mitigation: Page Table Inversion\n");
++ break;
++
+ default:
+ break;
+ }
+@@ -730,4 +765,9 @@ ssize_t cpu_show_spec_store_bypass(struc
+ {
+ return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
+ }
++
++ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
++}
+ #endif
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -933,6 +933,21 @@ static const __initconst struct x86_cpu_
+ {}
+ };
+
++static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
++ /* in addition to cpu_no_speculation */
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
++ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
++ {}
++};
++
+ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+ {
+ u64 ia32_cap = 0;
+@@ -958,6 +973,11 @@ static void __init cpu_set_bug_bits(stru
+ return;
+
+ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
++
++ if (x86_match_cpu(cpu_no_l1tf))
++ return;
++
++ setup_force_cpu_bug(X86_BUG_L1TF);
+ }
+
+ /*
+--- a/drivers/base/cpu.c
++++ b/drivers/base/cpu.c
+@@ -527,16 +527,24 @@ ssize_t __weak cpu_show_spec_store_bypas
+ return sprintf(buf, "Not affected\n");
+ }
+
++ssize_t __weak cpu_show_l1tf(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ return sprintf(buf, "Not affected\n");
++}
++
+ static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
+ static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
+ static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+ static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
++static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
+
+ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
+ &dev_attr_meltdown.attr,
+ &dev_attr_spectre_v1.attr,
+ &dev_attr_spectre_v2.attr,
+ &dev_attr_spec_store_bypass.attr,
++ &dev_attr_l1tf.attr,
+ NULL
+ };
+
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -54,6 +54,8 @@ extern ssize_t cpu_show_spectre_v2(struc
+ struct device_attribute *attr, char *buf);
+ extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
+ struct device_attribute *attr, char *buf);
++extern ssize_t cpu_show_l1tf(struct device *dev,
++ struct device_attribute *attr, char *buf);
+
+ extern __printf(4, 5)
+ struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/patches.arch/x86-l1tf-07-limit-swap-file-size.patch b/patches.arch/x86-l1tf-07-limit-swap-file-size.patch
new file mode 100644
index 0000000000..b35b4aba63
--- /dev/null
+++ b/patches.arch/x86-l1tf-07-limit-swap-file-size.patch
@@ -0,0 +1,140 @@
+From aaedeb15cb5c75e44b29e895b60c2dbffa1a7e14 Mon Sep 17 00:00:00 2001
+From: Andi Kleen <ak@linux.intel.com>
+Date: Fri, 27 Apr 2018 15:29:17 -0700
+Subject: [PATCH 7/8] x86, l1tf: Limit swap file size to MAX_PA/2
+Patch-mainline: not yet (under discussion)
+References: bnc#1087081, CVE-2018-3620
+
+For the L1TF workaround we want to limit the swap file size to below
+MAX_PA/2, so that the higher bits of the swap offset inverted never
+point to valid memory.
+
+Add a way for the architecture to override the swap file
+size check in swapfile.c and add a x86 specific max swapfile check
+function that enforces that limit.
+
+The check is only enabled if the CPU is vulnerable to L1TF.
+
+In VMs with 42bit MAX_PA the typical limit is 2TB now,
+on a native system with 46bit PA it is 32TB. The limit
+is only per individual swap file, so it's always possible
+to exceed these limits with multiple swap files or
+partitions.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-By: Dave Hansen <dave.hansen@intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+
+---
+v2: Use new helper for maxpa_mask computation.
+v3: Use l1tf_pfn_limit (Thomas)
+Reformat comment
+v4: Use boot_cpu_has_bug
+v5: Move l1tf_pfn_limit to earlier patch
+---
+ arch/x86/mm/init.c | 15 +++++++++++++++
+ include/linux/swapfile.h | 2 ++
+ mm/swapfile.c | 46 ++++++++++++++++++++++++++++++----------------
+ 3 files changed, 47 insertions(+), 16 deletions(-)
+
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -4,6 +4,8 @@
+ #include <linux/swap.h>
+ #include <linux/memblock.h>
+ #include <linux/bootmem.h> /* for max_low_pfn */
++#include <linux/swapfile.h>
++#include <linux/swapops.h>
+
+ #include <asm/set_memory.h>
+ #include <asm/e820/api.h>
+@@ -880,3 +882,16 @@ void update_cache_mode_entry(unsigned en
+ __cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
+ __pte2cachemode_tbl[entry] = cache;
+ }
++
++unsigned long max_swapfile_size(void)
++{
++ unsigned long pages;
++
++ pages = generic_max_swapfile_size();
++
++ if (boot_cpu_has_bug(X86_BUG_L1TF)) {
++ /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
++ pages = min_t(unsigned long, l1tf_pfn_limit() + 1, pages);
++ }
++ return pages;
++}
+--- a/include/linux/swapfile.h
++++ b/include/linux/swapfile.h
+@@ -9,5 +9,7 @@ extern spinlock_t swap_lock;
+ extern struct plist_head swap_active_head;
+ extern struct swap_info_struct *swap_info[];
+ extern int try_to_unuse(unsigned int, bool, unsigned long);
++extern unsigned long generic_max_swapfile_size(void);
++extern unsigned long max_swapfile_size(void);
+
+ #endif /* _LINUX_SWAPFILE_H */
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2519,6 +2519,35 @@ static int claim_swapfile(struct swap_in
+ return 0;
+ }
+
++
++/*
++ * Find out how many pages are allowed for a single swap device. There
++ * are two limiting factors:
++ * 1) the number of bits for the swap offset in the swp_entry_t type, and
++ * 2) the number of bits in the swap pte, as defined by the different
++ * architectures.
++ *
++ * In order to find the largest possible bit mask, a swap entry with
++ * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
++ * decoded to a swp_entry_t again, and finally the swap offset is
++ * extracted.
++ *
++ * This will mask all the bits from the initial ~0UL mask that can't
++ * be encoded in either the swp_entry_t or the architecture definition
++ * of a swap pte.
++ */
++unsigned long generic_max_swapfile_size(void)
++{
++ return swp_offset(pte_to_swp_entry(
++ swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
++}
++
++/* Can be overridden by an architecture for additional checks. */
++__weak unsigned long max_swapfile_size(void)
++{
++ return generic_max_swapfile_size();
++}
++
+ static unsigned long read_swap_header(struct swap_info_struct *p,
+ union swap_header *swap_header,
+ struct inode *inode)
+@@ -2554,22 +2583,7 @@ static unsigned long read_swap_header(st
+ p->cluster_next = 1;
+ p->cluster_nr = 0;
+
+- /*
+- * Find out how many pages are allowed for a single swap
+- * device. There are two limiting factors: 1) the number
+- * of bits for the swap offset in the swp_entry_t type, and
+- * 2) the number of bits in the swap pte as defined by the
+- * different architectures. In order to find the
+- * largest possible bit mask, a swap entry with swap type 0
+- * and swap offset ~0UL is created, encoded to a swap pte,
+- * decoded to a swp_entry_t again, and finally the swap
+- * offset is extracted. This will mask all the bits from
+- * the initial ~0UL mask that can't be encoded in either
+- * the swp_entry_t or the architecture definition of a
+- * swap pte.
+- */
+- maxpages = swp_offset(pte_to_swp_entry(
+- swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
++ maxpages = max_swapfile_size();
+ last_page = swap_header->info.last_page;
+ if (!last_page) {
+ pr_warn("Empty swap-file\n");
diff --git a/patches.arch/x86-l1tf-08-disallow-non-privileged-high-MMIO-PROT_NONE.patch b/patches.arch/x86-l1tf-08-disallow-non-privileged-high-MMIO-PROT_NONE.patch
new file mode 100644
index 0000000000..30ce17f45b
--- /dev/null
+++ b/patches.arch/x86-l1tf-08-disallow-non-privileged-high-MMIO-PROT_NONE.patch
@@ -0,0 +1,301 @@
+From: Andi Kleen <ak@linux.intel.com>
+Date: Thu, 3 May 2018 16:39:51 -0700
+Subject: [PATCH 8/8] mm, l1tf: Disallow non privileged high MMIO PROT_NONE
+ mappings
+Patch-mainline: not yet (under discussion)
+References: bnc#1087081, CVE-2018-3620
+
+For L1TF PROT_NONE mappings are protected by inverting the PFN in the
+page table entry. This sets the high bits in the CPU's address space,
+thus making sure to point to not point an unmapped entry to valid
+cached memory.
+
+Some server system BIOS put the MMIO mappings high up in the physical
+address space. If such an high mapping was mapped to an unprivileged
+user they could attack low memory by setting such a mapping to
+PROT_NONE. This could happen through a special device driver
+which is not access protected. Normal /dev/mem is of course
+access protect.
+
+To avoid this we forbid PROT_NONE mappings or mprotect for high MMIO
+mappings.
+
+Valid page mappings are allowed because the system is then unsafe
+anyways.
+
+We don't expect users to commonly use PROT_NONE on MMIO. But
+to minimize any impact here we only do this if the mapping actually
+refers to a high MMIO address (defined as the MAX_PA-1 bit being set),
+and also skip the check for root.
+
+For mmaps this is straight forward and can be handled in vm_insert_pfn
+and in remap_pfn_range().
+
+For mprotect it's a bit trickier. At the point we're looking at the
+actual PTEs a lot of state has been changed and would be difficult
+to undo on an error. Since this is a uncommon case we use a separate
+early page talk walk pass for MMIO PROT_NONE mappings that
+checks for this condition early. For non MMIO and non PROT_NONE
+there are no changes.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Acked-By: Dave Hansen <dave.hansen@intel.com>
+Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+
+---
+v2: Use new helpers added earlier
+v3: Fix inverted check added in v3
+v4: Use l1tf_pfn_limit (Thomas)
+Add comment for locked down kernels
+v5: Use boot_cpu_has_bug. Check bug early in arch_has_pfn_modify_check
+---
+ arch/x86/include/asm/pgtable.h | 8 ++++++
+ arch/x86/mm/mmap.c | 21 +++++++++++++++++
+ include/asm-generic/pgtable.h | 12 ++++++++++
+ mm/memory.c | 37 ++++++++++++++++++++++--------
+ mm/mprotect.c | 49 +++++++++++++++++++++++++++++++++++++++++
+ 5 files changed, 117 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -1245,6 +1245,14 @@ static inline u16 pte_flags_pkey(unsigne
+ #endif
+ }
+
++#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
++extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
++
++static inline bool arch_has_pfn_modify_check(void)
++{
++ return boot_cpu_has_bug(X86_BUG_L1TF);
++}
++
+ #include <asm-generic/pgtable.h>
+ #endif /* __ASSEMBLY__ */
+
+--- a/arch/x86/mm/mmap.c
++++ b/arch/x86/mm/mmap.c
+@@ -172,3 +172,24 @@ const char *arch_vma_name(struct vm_area
+ return "[mpx]";
+ return NULL;
+ }
++
++/*
++ * Only allow root to set high MMIO mappings to PROT_NONE.
++ * This prevents an unpriv. user to set them to PROT_NONE and invert
++ * them, then pointing to valid memory for L1TF speculation.
++ *
++ * Note: for locked down kernels may want to disable the root override.
++ */
++bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
++{
++ if (!boot_cpu_has_bug(X86_BUG_L1TF))
++ return true;
++ if (!__pte_needs_invert(pgprot_val(prot)))
++ return true;
++ /* If it's real memory always allow */
++ if (pfn_valid(pfn))
++ return true;
++ if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
++ return false;
++ return true;
++}
+--- a/include/asm-generic/pgtable.h
++++ b/include/asm-generic/pgtable.h
+@@ -995,4 +995,16 @@ static inline void init_espfix_bsp(void)
+ #endif
+ #endif
+
++#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
++static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
++{
++ return true;
++}
++
++static inline bool arch_has_pfn_modify_check(void)
++{
++ return false;
++}
++#endif
++
+ #endif /* _ASM_GENERIC_PGTABLE_H */
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1874,6 +1874,9 @@ int vm_insert_pfn_prot(struct vm_area_st
+ if (addr < vma->vm_start || addr >= vma->vm_end)
+ return -EFAULT;
+
++ if (!pfn_modify_allowed(pfn, pgprot))
++ return -EACCES;
++
+ track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
+
+ ret = insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
+@@ -1895,6 +1898,9 @@ static int __vm_insert_mixed(struct vm_a
+
+ track_pfn_insert(vma, &pgprot, pfn);
+
++ if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
++ return -EACCES;
++
+ /*
+ * If we don't have pte special, then we have to use the pfn_valid()
+ * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
+@@ -1942,6 +1948,7 @@ static int remap_pte_range(struct mm_str
+ {
+ pte_t *pte;
+ spinlock_t *ptl;
++ int err = 0;
+
+ pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
+ if (!pte)
+@@ -1949,12 +1956,16 @@ static int remap_pte_range(struct mm_str
+ arch_enter_lazy_mmu_mode();
+ do {
+ BUG_ON(!pte_none(*pte));
++ if (!pfn_modify_allowed(pfn, prot)) {
++ err = -EACCES;
++ break;
++ }
+ set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
+ pfn++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ arch_leave_lazy_mmu_mode();
+ pte_unmap_unlock(pte - 1, ptl);
+- return 0;
++ return err;
+ }
+
+ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
+@@ -1963,6 +1974,7 @@ static inline int remap_pmd_range(struct
+ {
+ pmd_t *pmd;
+ unsigned long next;
++ int err;
+
+ pfn -= addr >> PAGE_SHIFT;
+ pmd = pmd_alloc(mm, pud, addr);
+@@ -1971,9 +1983,10 @@ static inline int remap_pmd_range(struct
+ VM_BUG_ON(pmd_trans_huge(*pmd));
+ do {
+ next = pmd_addr_end(addr, end);
+- if (remap_pte_range(mm, pmd, addr, next,
+- pfn + (addr >> PAGE_SHIFT), prot))
+- return -ENOMEM;
++ err = remap_pte_range(mm, pmd, addr, next,
++ pfn + (addr >> PAGE_SHIFT), prot);
++ if (err)
++ return err;
+ } while (pmd++, addr = next, addr != end);
+ return 0;
+ }
+@@ -1984,6 +1997,7 @@ static inline int remap_pud_range(struct
+ {
+ pud_t *pud;
+ unsigned long next;
++ int err;
+
+ pfn -= addr >> PAGE_SHIFT;
+ pud = pud_alloc(mm, p4d, addr);
+@@ -1991,9 +2005,10 @@ static inline int remap_pud_range(struct
+ return -ENOMEM;
+ do {
+ next = pud_addr_end(addr, end);
+- if (remap_pmd_range(mm, pud, addr, next,
+- pfn + (addr >> PAGE_SHIFT), prot))
+- return -ENOMEM;
++ err = remap_pmd_range(mm, pud, addr, next,
++ pfn + (addr >> PAGE_SHIFT), prot);
++ if (err)
++ return err;
+ } while (pud++, addr = next, addr != end);
+ return 0;
+ }
+@@ -2004,6 +2019,7 @@ static inline int remap_p4d_range(struct
+ {
+ p4d_t *p4d;
+ unsigned long next;
++ int err;
+
+ pfn -= addr >> PAGE_SHIFT;
+ p4d = p4d_alloc(mm, pgd, addr);
+@@ -2011,9 +2027,10 @@ static inline int remap_p4d_range(struct
+ return -ENOMEM;
+ do {
+ next = p4d_addr_end(addr, end);
+- if (remap_pud_range(mm, p4d, addr, next,
+- pfn + (addr >> PAGE_SHIFT), prot))
+- return -ENOMEM;
++ err = remap_pud_range(mm, p4d, addr, next,
++ pfn + (addr >> PAGE_SHIFT), prot);
++ if (err)
++ return err;
+ } while (p4d++, addr = next, addr != end);
+ return 0;
+ }
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -307,6 +307,42 @@ unsigned long change_protection(struct v
+ return pages;
+ }
+
++static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
++ unsigned long next, struct mm_walk *walk)
++{
++ return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
++ 0 : -EACCES;
++}
++
++static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
++ unsigned long addr, unsigned long next,
++ struct mm_walk *walk)
++{
++ return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
++ 0 : -EACCES;
++}
++
++static int prot_none_test(unsigned long addr, unsigned long next,
++ struct mm_walk *walk)
++{
++ return 0;
++}
++
++static int prot_none_walk(struct vm_area_struct *vma, unsigned long start,
++ unsigned long end, unsigned long newflags)
++{
++ pgprot_t new_pgprot = vm_get_page_prot(newflags);
++ struct mm_walk prot_none_walk = {
++ .pte_entry = prot_none_pte_entry,
++ .hugetlb_entry = prot_none_hugetlb_entry,
++ .test_walk = prot_none_test,
++ .mm = current->mm,
++ .private = &new_pgprot,
++ };
++
++ return walk_page_range(start, end, &prot_none_walk);
++}
++
+ int
+ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
+ unsigned long start, unsigned long end, unsigned long newflags)
+@@ -325,6 +361,19 @@ mprotect_fixup(struct vm_area_struct *vm
+ }
+
+ /*
++ * Do PROT_NONE PFN permission checks here when we can still
++ * bail out without undoing a lot of state. This is a rather
++ * uncommon case, so doesn't need to be very optimized.
++ */
++ if (arch_has_pfn_modify_check() &&
++ (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
++ (newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) {
++ error = prot_none_walk(vma, start, end, newflags);
++ if (error)
++ return error;
++ }
++
++ /*
+ * If we make a private mapping writable we increase our commit;
+ * but (without finer accounting) cannot reduce our commit if we
+ * make it unwritable again. hugetlb mapping were accounted for
diff --git a/patches.drivers/ACPI-PCI-Bail-early-in-acpi_pci_add_bus-if-there-is- b/patches.drivers/ACPI-PCI-Bail-early-in-acpi_pci_add_bus-if-there-is-
new file mode 100644
index 0000000000..02293ba8f9
--- /dev/null
+++ b/patches.drivers/ACPI-PCI-Bail-early-in-acpi_pci_add_bus-if-there-is-
@@ -0,0 +1,45 @@
+From a0040c0145945d3bd203df8fa97f6dfa819f3f7d Mon Sep 17 00:00:00 2001
+From: Vitaly Kuznetsov <vkuznets@redhat.com>
+Date: Thu, 14 Sep 2017 16:50:14 +0200
+Subject: [PATCH] ACPI / PCI: Bail early in acpi_pci_add_bus() if there is no ACPI handle
+Git-commit: a0040c0145945d3bd203df8fa97f6dfa819f3f7d
+Patch-mainline: v4.15-rc1
+References: bsc#1051510
+
+Hyper-V instances support PCI pass-through which is implemented through PV
+pci-hyperv driver. When a device is passed through, a new root PCI bus is
+created in the guest. The bus sits on top of VMBus and has no associated
+information in ACPI. acpi_pci_add_bus() in this case proceeds all the way
+to acpi_evaluate_dsm(), which reports
+
+ ACPI: \: failed to evaluate _DSM (0x1001)
+
+While acpi_pci_slot_enumerate() and acpiphp_enumerate_slots() are protected
+against ACPI_HANDLE() being NULL and do nothing, acpi_evaluate_dsm() is not
+and gives us the error. It seems the correct fix is to not do anything in
+acpi_pci_add_bus() in such cases.
+
+Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/pci/pci-acpi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
+index a8da543b3814..4708eb9df71b 100644
+--- a/drivers/pci/pci-acpi.c
++++ b/drivers/pci/pci-acpi.c
+@@ -624,7 +624,7 @@ void acpi_pci_add_bus(struct pci_bus *bus)
+ union acpi_object *obj;
+ struct pci_host_bridge *bridge;
+
+- if (acpi_pci_disabled || !bus->bridge)
++ if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
+ return;
+
+ acpi_pci_slot_enumerate(bus);
+--
+2.18.0
+
diff --git a/patches.drivers/EDAC-altera-Fix-ARM64-build-warning b/patches.drivers/EDAC-altera-Fix-ARM64-build-warning
new file mode 100644
index 0000000000..38d448a3c8
--- /dev/null
+++ b/patches.drivers/EDAC-altera-Fix-ARM64-build-warning
@@ -0,0 +1,49 @@
+From 9ef20753e044f7468c4113e5aecd785419b0b3cc Mon Sep 17 00:00:00 2001
+From: Thor Thayer <thor.thayer@linux.intel.com>
+Date: Mon, 14 May 2018 12:04:01 -0500
+Subject: [PATCH] EDAC, altera: Fix ARM64 build warning
+Git-commit: 9ef20753e044f7468c4113e5aecd785419b0b3cc
+Patch-mainline: v4.18-rc1
+References: bsc#1051510
+
+The kbuild test robot reported the following warning:
+
+ drivers/edac/altera_edac.c: In function 'ocram_free_mem':
+ drivers/edac/altera_edac.c:1410:42: warning: cast from pointer to integer
+ of different size [-Wpointer-to-int-cast]
+ gen_pool_free((struct gen_pool *)other, (u32)p, size);
+ ^
+
+After adding support for ARM64 architectures, the unsigned long
+parameter is 64 bits and causes a build warning on 64-bit configs. Fix
+by casting to the correct size (unsigned long) instead of u32.
+
+Reported-by: kbuild test robot <lkp@intel.com>
+Signed-off-by: Thor Thayer <thor.thayer@linux.intel.com>
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: linux-edac <linux-edac@vger.kernel.org>
+Fixes: c3eea1942a16 ("EDAC, altera: Add Altera L2 cache and OCRAM support")
+Link: http://lkml.kernel.org/r/1526317441-4996-1-git-send-email-thor.thayer@linux.intel.com
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/edac/altera_edac.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
+index 1f12fdf2ed00..d0d5c4dbe097 100644
+--- a/drivers/edac/altera_edac.c
++++ b/drivers/edac/altera_edac.c
+@@ -1415,7 +1415,7 @@ static void *ocram_alloc_mem(size_t size, void **other)
+
+ static void ocram_free_mem(void *p, size_t size, void *other)
+ {
+- gen_pool_free((struct gen_pool *)other, (u32)p, size);
++ gen_pool_free((struct gen_pool *)other, (unsigned long)p, size);
+ }
+
+ static const struct edac_device_prv_data ocramecc_data = {
+--
+2.18.0
+
diff --git a/patches.drivers/EDAC-mv64x60-Fix-an-error-handling-path b/patches.drivers/EDAC-mv64x60-Fix-an-error-handling-path
new file mode 100644
index 0000000000..95cc28fe15
--- /dev/null
+++ b/patches.drivers/EDAC-mv64x60-Fix-an-error-handling-path
@@ -0,0 +1,40 @@
+From 68fa24f9121c04ef146b5158f538c8b32f285be5 Mon Sep 17 00:00:00 2001
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Date: Sun, 7 Jan 2018 21:54:00 +0100
+Subject: [PATCH] EDAC, mv64x60: Fix an error handling path
+Git-commit: 68fa24f9121c04ef146b5158f538c8b32f285be5
+Patch-mainline: v4.16-rc1
+References: bsc#1051510
+
+We should not call edac_mc_del_mc() if a corresponding call to
+edac_mc_add_mc() has not been performed yet.
+
+So here, we should go to err instead of err2 to branch at the right
+place of the error handling path.
+
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Cc: linux-edac <linux-edac@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20180107205400.14068-1-christophe.jaillet@wanadoo.fr
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/edac/mv64x60_edac.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
+index ec5d695bbb72..3c68bb525d5d 100644
+--- a/drivers/edac/mv64x60_edac.c
++++ b/drivers/edac/mv64x60_edac.c
+@@ -758,7 +758,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
+ /* Non-ECC RAM? */
+ printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
+ res = -ENODEV;
+- goto err2;
++ goto err;
+ }
+
+ edac_dbg(3, "init mci\n");
+--
+2.18.0
+
diff --git a/patches.drivers/EDAC-octeon-Fix-an-uninitialized-variable-warning b/patches.drivers/EDAC-octeon-Fix-an-uninitialized-variable-warning
new file mode 100644
index 0000000000..454259bbd4
--- /dev/null
+++ b/patches.drivers/EDAC-octeon-Fix-an-uninitialized-variable-warning
@@ -0,0 +1,52 @@
+From 544e92581a2ac44607d7cc602c6b54d18656f56d Mon Sep 17 00:00:00 2001
+From: James Hogan <jhogan@kernel.org>
+Date: Mon, 13 Nov 2017 16:12:06 +0000
+Subject: [PATCH] EDAC, octeon: Fix an uninitialized variable warning
+Mime-version: 1.0
+Content-type: text/plain; charset=UTF-8
+Content-transfer-encoding: 8bit
+Git-commit: 544e92581a2ac44607d7cc602c6b54d18656f56d
+Patch-mainline: v4.16-rc1
+References: bsc#1051510
+
+Fix an uninitialized variable warning in the Octeon EDAC driver, as seen
+in MIPS cavium_octeon_defconfig builds since v4.14 with Codescape GNU
+Tools 2016.05-03:
+
+ drivers/edac/octeon_edac-lmc.c In function ‘octeon_lmc_edac_poll_o2’:
+ drivers/edac/octeon_edac-lmc.c:87:24: warning: ‘((long unsigned int*)&int_reg)[1]’ may \
+ be used uninitialized in this function [-Wmaybe-uninitialized]
+ if (int_reg.s.sec_err || int_reg.s.ded_err) {
+ ^
+Iinitialise the whole int_reg variable to zero before the conditional
+assignments in the error injection case.
+
+Signed-off-by: James Hogan <jhogan@kernel.org>
+Acked-by: David Daney <david.daney@cavium.com>
+Cc: linux-edac <linux-edac@vger.kernel.org>
+Cc: linux-mips@linux-mips.org
+Cc: <stable@vger.kernel.org> # 3.15+
+Fixes: 1bc021e81565 ("EDAC: Octeon: Add error injection support")
+Link: http://lkml.kernel.org/r/20171113161206.20990-1-james.hogan@mips.com
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/edac/octeon_edac-lmc.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/edac/octeon_edac-lmc.c b/drivers/edac/octeon_edac-lmc.c
+index 9c1ffe3e912b..aeb222ca3ed1 100644
+--- a/drivers/edac/octeon_edac-lmc.c
++++ b/drivers/edac/octeon_edac-lmc.c
+@@ -78,6 +78,7 @@ static void octeon_lmc_edac_poll_o2(struct mem_ctl_info *mci)
+ if (!pvt->inject)
+ int_reg.u64 = cvmx_read_csr(CVMX_LMCX_INT(mci->mc_idx));
+ else {
++ int_reg.u64 = 0;
+ if (pvt->error_type == 1)
+ int_reg.s.sec_err = 1;
+ if (pvt->error_type == 2)
+--
+2.18.0
+
diff --git a/patches.drivers/EDAC-sb_edac-Fix-missing-break-in-switch b/patches.drivers/EDAC-sb_edac-Fix-missing-break-in-switch
new file mode 100644
index 0000000000..0ea680c804
--- /dev/null
+++ b/patches.drivers/EDAC-sb_edac-Fix-missing-break-in-switch
@@ -0,0 +1,37 @@
+From a8e9b186f153a44690ad0363a56716e7077ad28c Mon Sep 17 00:00:00 2001
+From: "Gustavo A. R. Silva" <garsilva@embeddedor.com>
+Date: Mon, 16 Oct 2017 12:40:29 -0500
+Subject: [PATCH] EDAC, sb_edac: Fix missing break in switch
+Git-commit: a8e9b186f153a44690ad0363a56716e7077ad28c
+Patch-mainline: v4.15-rc1
+References: bsc#1051510
+
+Add missing break statement in order to prevent the code from falling
+through.
+
+Signed-off-by: Gustavo A. R. Silva <garsilva@embeddedor.com>
+Cc: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
+Cc: linux-edac <linux-edac@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20171016174029.GA19757@embeddedor.com
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/edac/sb_edac.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
+index 72b98a081d2b..f34430f99fd8 100644
+--- a/drivers/edac/sb_edac.c
++++ b/drivers/edac/sb_edac.c
+@@ -2485,6 +2485,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
+ pvt->pci_ta = pdev;
++ break;
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
+ pvt->pci_ras = pdev;
+--
+2.18.0
+
diff --git a/patches.drivers/media-omap3isp-fix-unbalanced-dma_iommu_mapping b/patches.drivers/media-omap3isp-fix-unbalanced-dma_iommu_mapping
new file mode 100644
index 0000000000..025c821d16
--- /dev/null
+++ b/patches.drivers/media-omap3isp-fix-unbalanced-dma_iommu_mapping
@@ -0,0 +1,61 @@
+From b7e1e6859fbf60519fd82d7120cee106a6019512 Mon Sep 17 00:00:00 2001
+From: Suman Anna <s-anna@ti.com>
+Date: Wed, 14 Mar 2018 11:41:36 -0400
+Subject: [PATCH] media: omap3isp: fix unbalanced dma_iommu_mapping
+Git-commit: b7e1e6859fbf60519fd82d7120cee106a6019512
+Patch-mainline: v4.18-rc1
+References: bsc#1051510
+
+The OMAP3 ISP driver manages its MMU mappings through the IOMMU-aware
+ARM DMA backend. The current code creates a dma_iommu_mapping and
+attaches this to the ISP device, but never detaches the mapping in
+either the probe failure paths or the driver remove path resulting
+in an unbalanced mapping refcount and a memory leak. Fix this properly.
+
+Reported-by: Pavel Machek <pavel@ucw.cz>
+Signed-off-by: Suman Anna <s-anna@ti.com>
+Tested-by: Pavel Machek <pavel@ucw.cz>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@s-opensource.com>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/media/platform/omap3isp/isp.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
+index 16c50099cccd..9952f95cfe4a 100644
+--- a/drivers/media/platform/omap3isp/isp.c
++++ b/drivers/media/platform/omap3isp/isp.c
+@@ -1938,6 +1938,7 @@ static int isp_initialize_modules(struct isp_device *isp)
+
+ static void isp_detach_iommu(struct isp_device *isp)
+ {
++ arm_iommu_detach_device(isp->dev);
+ arm_iommu_release_mapping(isp->mapping);
+ isp->mapping = NULL;
+ }
+@@ -1954,8 +1955,7 @@ static int isp_attach_iommu(struct isp_device *isp)
+ mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G);
+ if (IS_ERR(mapping)) {
+ dev_err(isp->dev, "failed to create ARM IOMMU mapping\n");
+- ret = PTR_ERR(mapping);
+- goto error;
++ return PTR_ERR(mapping);
+ }
+
+ isp->mapping = mapping;
+@@ -1970,7 +1970,8 @@ static int isp_attach_iommu(struct isp_device *isp)
+ return 0;
+
+ error:
+- isp_detach_iommu(isp);
++ arm_iommu_release_mapping(isp->mapping);
++ isp->mapping = NULL;
+ return ret;
+ }
+
+--
+2.18.0
+
diff --git a/patches.drivers/media-videobuf2-core-don-t-call-memop-finish-when-qu b/patches.drivers/media-videobuf2-core-don-t-call-memop-finish-when-qu
new file mode 100644
index 0000000000..ee1bcea910
--- /dev/null
+++ b/patches.drivers/media-videobuf2-core-don-t-call-memop-finish-when-qu
@@ -0,0 +1,41 @@
+From 90b2da89a083e1395cb322521a42397c49ae4500 Mon Sep 17 00:00:00 2001
+From: Hans Verkuil <hans.verkuil@cisco.com>
+Date: Mon, 21 May 2018 08:43:02 -0400
+Subject: [PATCH] media: videobuf2-core: don't call memop 'finish' when queueing
+Git-commit: 90b2da89a083e1395cb322521a42397c49ae4500
+Patch-mainline: v4.18-rc1
+References: bsc#1051510
+
+When a buffer is queued or requeued in vb2_buffer_done, then don't
+call the finish memop. In this case the buffer is only returned to vb2,
+not to userspace.
+
+Calling 'finish' here will cause an unbalance when the queue is
+canceled, since the core will call the same memop again.
+
+Signed-off-by: Hans Verkuil <hans.verkuil@cisco.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/media/v4l2-core/videobuf2-core.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/media/v4l2-core/videobuf2-core.c
++++ b/drivers/media/v4l2-core/videobuf2-core.c
+@@ -914,9 +914,12 @@ void vb2_buffer_done(struct vb2_buffer *
+ dprintk(4, "done processing on buffer %d, state: %d\n",
+ vb->index, state);
+
+- /* sync buffers */
+- for (plane = 0; plane < vb->num_planes; ++plane)
+- call_void_memop(vb, finish, vb->planes[plane].mem_priv);
++ if (state != VB2_BUF_STATE_QUEUED &&
++ state != VB2_BUF_STATE_REQUEUEING) {
++ /* sync buffers */
++ for (plane = 0; plane < vb->num_planes; ++plane)
++ call_void_memop(vb, finish, vb->planes[plane].mem_priv);
++ }
+
+ spin_lock_irqsave(&q->done_lock, flags);
+ if (state == VB2_BUF_STATE_QUEUED ||
diff --git a/patches.drivers/xhci-Fix-perceived-dead-host-due-to-runtime-s.patch b/patches.drivers/xhci-Fix-perceived-dead-host-due-to-runtime-s.patch
new file mode 100644
index 0000000000..cd5eb77742
--- /dev/null
+++ b/patches.drivers/xhci-Fix-perceived-dead-host-due-to-runtime-s.patch
@@ -0,0 +1,130 @@
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+Date: Thu, 21 Jun 2018 16:19:41 +0300
+Subject: [PATCH] xhci: Fix perceived dead host due to runtime suspend race with event handler
+Git-commit: 229bc19fd7aca4f37964af06e3583c1c8f36b5d6
+Patch-mainline: 4.18-rc3
+References: bsc#1051510
+
+[ backport carried from 4.4.y stable tree ]
+
+Don't rely on event interrupt (EINT) bit alone to detect pending port
+change in resume. If no change event is detected the host may be suspended
+again, oterwise roothubs are resumed.
+
+There is a lag in xHC setting EINT. If we don't notice the pending change
+in resume, and the controller is runtime suspeded again, it causes the
+event handler to assume host is dead as it will fail to read xHC registers
+once PCI puts the controller to D3 state.
+
+[ 268.520969] xhci_hcd: xhci_resume: starting port polling.
+[ 268.520985] xhci_hcd: xhci_hub_status_data: stopping port polling.
+[ 268.521030] xhci_hcd: xhci_suspend: stopping port polling.
+[ 268.521040] xhci_hcd: // Setting command ring address to 0x349bd001
+[ 268.521139] xhci_hcd: Port Status Change Event for port 3
+[ 268.521149] xhci_hcd: resume root hub
+[ 268.521163] xhci_hcd: port resume event for port 3
+[ 268.521168] xhci_hcd: xHC is not running.
+[ 268.521174] xhci_hcd: handle_port_status: starting port polling.
+[ 268.596322] xhci_hcd: xhci_hc_died: xHCI host controller not responding, assume dead
+
+The EINT lag is described in a additional note in xhci specs 4.19.2:
+
+"Due to internal xHC scheduling and system delays, there will be a lag
+between a change bit being set and the Port Status Change Event that it
+generated being written to the Event Ring. If SW reads the PORTSC and
+sees a change bit set, there is no guarantee that the corresponding Port
+Status Change Event has already been written into the Event Ring."
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+---
+ drivers/usb/host/xhci.c | 40 +++++++++++++++++++++++++++++++++++++---
+ drivers/usb/host/xhci.h | 4 ++++
+ 2 files changed, 41 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index f2e9f59c90d6..2d837b6bd495 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -887,6 +887,41 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ }
+
++static bool xhci_pending_portevent(struct xhci_hcd *xhci)
++{
++ __le32 __iomem **port_array;
++ int port_index;
++ u32 status;
++ u32 portsc;
++
++ status = readl(&xhci->op_regs->status);
++ if (status & STS_EINT)
++ return true;
++ /*
++ * Checking STS_EINT is not enough as there is a lag between a change
++ * bit being set and the Port Status Change Event that it generated
++ * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
++ */
++
++ port_index = xhci->num_usb2_ports;
++ port_array = xhci->usb2_ports;
++ while (port_index--) {
++ portsc = readl(port_array[port_index]);
++ if (portsc & PORT_CHANGE_MASK ||
++ (portsc & PORT_PLS_MASK) == XDEV_RESUME)
++ return true;
++ }
++ port_index = xhci->num_usb3_ports;
++ port_array = xhci->usb3_ports;
++ while (port_index--) {
++ portsc = readl(port_array[port_index]);
++ if (portsc & PORT_CHANGE_MASK ||
++ (portsc & PORT_PLS_MASK) == XDEV_RESUME)
++ return true;
++ }
++ return false;
++}
++
+ /*
+ * Stop HC (not bus-specific)
+ *
+@@ -983,7 +1018,7 @@ EXPORT_SYMBOL_GPL(xhci_suspend);
+ */
+ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ {
+- u32 command, temp = 0, status;
++ u32 command, temp = 0;
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct usb_hcd *secondary_hcd;
+ int retval = 0;
+@@ -1105,8 +1140,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ done:
+ if (retval == 0) {
+ /* Resume root hubs only when have pending events. */
+- status = readl(&xhci->op_regs->status);
+- if (status & STS_EINT) {
++ if (xhci_pending_portevent(xhci)) {
+ usb_hcd_resume_root_hub(xhci->shared_hcd);
+ usb_hcd_resume_root_hub(hcd);
+ }
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 1715705acc59..84d8871755b7 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -382,6 +382,10 @@ struct xhci_op_regs {
+ #define PORT_PLC (1 << 22)
+ /* port configure error change - port failed to configure its link partner */
+ #define PORT_CEC (1 << 23)
++#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
++ PORT_RC | PORT_PLC | PORT_CEC)
++
++
+ /* Cold Attach Status - xHC can set this bit to report device attached during
+ * Sx state. Warm port reset should be perfomed to clear this bit and move port
+ * to connected state.
+--
+2.18.0
+
diff --git a/patches.fixes/0001-net-lan78xx-Fix-race-in-tx-pending-skb-size-calculat.patch b/patches.fixes/0001-net-lan78xx-Fix-race-in-tx-pending-skb-size-calculat.patch
new file mode 100644
index 0000000000..7b65cf97e2
--- /dev/null
+++ b/patches.fixes/0001-net-lan78xx-Fix-race-in-tx-pending-skb-size-calculat.patch
@@ -0,0 +1,58 @@
+From dea39aca1d7aef1e2b95b07edeacf04cc8863a2e Mon Sep 17 00:00:00 2001
+From: Stefan Wahren <stefan.wahren@i2se.com>
+Date: Sun, 15 Jul 2018 21:53:20 +0200
+Subject: [PATCH] net: lan78xx: Fix race in tx pending skb size calculation
+Git-commit: dea39aca1d7aef1e2b95b07edeacf04cc8863a2e
+Patch-mainline: v4.18
+References: bsc#1100132
+
+The skb size calculation in lan78xx_tx_bh is in race with the start_xmit,
+which could lead to rare kernel oopses. So protect the whole skb walk with
+a spin lock. As a benefit we can unlink the skb directly.
+
+This patch was tested on Raspberry Pi 3B+
+
+Link: https://github.com/raspberrypi/linux/issues/2608
+Fixes: 55d7de9de6c3 ("Microchip's LAN7800 family USB 2/3 to 10/100/1000 Ethernet")
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Floris Bos <bos@je-eigen-domein.nl>
+Signed-off-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+---
+ drivers/net/usb/lan78xx.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index 2e4130746c40..ed10d49eb5e0 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -3344,6 +3344,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
+ pkt_cnt = 0;
+ count = 0;
+ length = 0;
++ spin_lock_irqsave(&tqp->lock, flags);
+ for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
+ if (skb_is_gso(skb)) {
+ if (pkt_cnt) {
+@@ -3352,7 +3353,8 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
+ }
+ count = 1;
+ length = skb->len - TX_OVERHEAD;
+- skb2 = skb_dequeue(tqp);
++ __skb_unlink(skb, tqp);
++ spin_unlock_irqrestore(&tqp->lock, flags);
+ goto gso_skb;
+ }
+
+@@ -3361,6 +3363,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
+ skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
+ pkt_cnt++;
+ }
++ spin_unlock_irqrestore(&tqp->lock, flags);
+
+ /* copy to a single skb */
+ skb = alloc_skb(skb_totallen, GFP_ATOMIC);
+--
+2.16.4
+
diff --git a/patches.fixes/0001-net-lan78xx-fix-rx-handling-before-first-packet-is-s.patch b/patches.fixes/0001-net-lan78xx-fix-rx-handling-before-first-packet-is-s.patch
new file mode 100644
index 0000000000..d5e9e3a13f
--- /dev/null
+++ b/patches.fixes/0001-net-lan78xx-fix-rx-handling-before-first-packet-is-s.patch
@@ -0,0 +1,39 @@
+From 136f55f660192ce04af091642efc75d85e017364 Mon Sep 17 00:00:00 2001
+From: Stefan Wahren <stefan.wahren@i2se.com>
+Date: Sat, 28 Jul 2018 09:52:10 +0200
+Subject: [PATCH] net: lan78xx: fix rx handling before first packet is send
+Git-commit: 136f55f660192ce04af091642efc75d85e017364
+Patch-mainline: v4.18
+References: bsc#1100132
+
+As long the bh tasklet isn't scheduled once, no packet from the rx path
+will be handled. Since the tx path also schedule the same tasklet
+this situation only persits until the first packet transmission.
+So fix this issue by scheduling the tasklet after link reset.
+
+Link: https://github.com/raspberrypi/linux/issues/2617
+Fixes: 55d7de9de6c3 ("Microchip's LAN7800 family USB 2/3 to 10/100/1000 Ethernet")
+Suggested-by: Floris Bos <bos@je-eigen-domein.nl>
+Signed-off-by: Stefan Wahren <stefan.wahren@i2se.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+---
+ drivers/net/usb/lan78xx.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
+index ed10d49eb5e0..aeca484a75b8 100644
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -1242,6 +1242,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
+ mod_timer(&dev->stat_monitor,
+ jiffies + STAT_UPDATE_TIMER);
+ }
++
++ tasklet_schedule(&dev->bh);
+ }
+
+ return ret;
+--
+2.16.4
+
diff --git a/patches.fixes/0001-net-qmi_wwan-Add-Netgear-Aircard-779S.patch b/patches.fixes/0001-net-qmi_wwan-Add-Netgear-Aircard-779S.patch
new file mode 100644
index 0000000000..693cd2fd8e
--- /dev/null
+++ b/patches.fixes/0001-net-qmi_wwan-Add-Netgear-Aircard-779S.patch
@@ -0,0 +1,36 @@
+From 2415f3bd059fe050eb98aedf93664d000ceb4e92 Mon Sep 17 00:00:00 2001
+From: Josh Hill <josh@joshuajhill.com>
+Date: Sun, 27 May 2018 20:10:41 -0400
+Subject: [PATCH] net: qmi_wwan: Add Netgear Aircard 779S
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Git-commit: 2415f3bd059fe050eb98aedf93664d000ceb4e92
+Patch-mainline: v4.17
+References: bsc#1090888
+
+Add support for Netgear Aircard 779S
+
+Signed-off-by: Josh Hill <josh@joshuajhill.com>
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 42565dd33aa6..094680871687 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1103,6 +1103,7 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+ {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
+ {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
++ {QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */
+ {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
+ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
+ {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
+--
+2.16.4
+
diff --git a/patches.fixes/0001-net-qmi_wwan-add-BroadMobi-BM806U-2020-2033.patch b/patches.fixes/0001-net-qmi_wwan-add-BroadMobi-BM806U-2020-2033.patch
new file mode 100644
index 0000000000..488284b82c
--- /dev/null
+++ b/patches.fixes/0001-net-qmi_wwan-add-BroadMobi-BM806U-2020-2033.patch
@@ -0,0 +1,37 @@
+From 743989254ea9f132517806d8893ca9b6cf9dc86b Mon Sep 17 00:00:00 2001
+From: Pawel Dembicki <paweldembicki@gmail.com>
+Date: Sat, 24 Mar 2018 22:08:14 +0100
+Subject: [PATCH] net: qmi_wwan: add BroadMobi BM806U 2020:2033
+Git-commit: 743989254ea9f132517806d8893ca9b6cf9dc86b
+Patch-mainline: v4.16
+References: bsc#1087092
+
+BroadMobi BM806U is an Qualcomm MDM9225 based 3G/4G modem.
+Tested hardware BM806U is mounted on D-Link DWR-921-C3 router.
+The USB id is added to qmi_wwan.c to allow QMI communication with
+the BM806U.
+
+Tested on 4.14 kernel and OpenWRT.
+
+Signed-off-by: Pawel Dembicki <paweldembicki@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 76ac48095c29..7ced28859261 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1180,6 +1180,7 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
+ {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
+ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
++ {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
+ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
+ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
+ {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
+--
+2.16.4
+
diff --git a/patches.fixes/0001-net-usb-add-qmi_wwan-if-on-lte-modem-wistron-neweb-d.patch b/patches.fixes/0001-net-usb-add-qmi_wwan-if-on-lte-modem-wistron-neweb-d.patch
new file mode 100644
index 0000000000..02b8a2d462
--- /dev/null
+++ b/patches.fixes/0001-net-usb-add-qmi_wwan-if-on-lte-modem-wistron-neweb-d.patch
@@ -0,0 +1,70 @@
+From d4c4bc11353f3bea6754f7d21e3612c9f32d1d64 Mon Sep 17 00:00:00 2001
+From: Giuseppe Lippolis <giu.lippolis@gmail.com>
+Date: Mon, 26 Mar 2018 16:34:39 +0200
+Subject: [PATCH] net-usb: add qmi_wwan if on lte modem wistron neweb d18q1
+Git-commit: d4c4bc11353f3bea6754f7d21e3612c9f32d1d64
+Patch-mainline: v4.16
+References: bsc#1087092
+
+This modem is embedded on dlink dwr-921 router.
+ The oem configuration states:
+
+ T: Bus=02 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 2 Spd=480 MxCh= 0
+ D: Ver= 2.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs= 1
+ P: Vendor=1435 ProdID=0918 Rev= 2.32
+ S: Manufacturer=Android
+ S: Product=Android
+ S: SerialNumber=0123456789ABCDEF
+ C:* #Ifs= 7 Cfg#= 1 Atr=80 MxPwr=500mA
+ I:* If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option
+ E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+ E: Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+ I:* If#= 1 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=(none)
+ E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+ E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+ I:* If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option
+ E: Ad=84(I) Atr=03(Int.) MxPS= 64 Ivl=32ms
+ E: Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+ E: Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+ I:* If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan
+ E: Ad=86(I) Atr=03(Int.) MxPS= 64 Ivl=32ms
+ E: Ad=85(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+ E: Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+ I:* If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan
+ E: Ad=88(I) Atr=03(Int.) MxPS= 64 Ivl=32ms
+ E: Ad=87(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+ E: Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+ I:* If#= 5 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan
+ E: Ad=8a(I) Atr=03(Int.) MxPS= 64 Ivl=32ms
+ E: Ad=89(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+ E: Ad=06(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+ I:* If#= 6 Alt= 0 #EPs= 2 Cls=08(stor.) Sub=06 Prot=50 Driver=(none)
+ E: Ad=8b(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms
+ E: Ad=07(O) Atr=02(Bulk) MxPS= 512 Ivl=125us
+
+Tested on openwrt distribution
+
+Signed-off-by: Giuseppe Lippolis <giu.lippolis@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+---
+ drivers/net/usb/qmi_wwan.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 2508ab08fc5a..ca066b785e9f 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1104,6 +1104,9 @@ static const struct usb_device_id products[] = {
+ {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
+ {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
+ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
++ {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
++ {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */
++ {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */
+ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
+ {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
+ {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
+--
+2.16.4
+
diff --git a/patches.fixes/0001-net-usb-asix-replace-mii_nway_restart-in-resume-path.patch b/patches.fixes/0001-net-usb-asix-replace-mii_nway_restart-in-resume-path.patch
new file mode 100644
index 0000000000..0ec95d3815
--- /dev/null
+++ b/patches.fixes/0001-net-usb-asix-replace-mii_nway_restart-in-resume-path.patch
@@ -0,0 +1,46 @@
+From 5c968f48021a9b3faa61ac2543cfab32461c0e05 Mon Sep 17 00:00:00 2001
+From: Alexander Couzens <lynxis@fe80.eu>
+Date: Tue, 17 Jul 2018 13:17:09 +0200
+Subject: [PATCH] net: usb: asix: replace mii_nway_restart in resume path
+Git-commit: 5c968f48021a9b3faa61ac2543cfab32461c0e05
+Patch-mainline: v4.18
+References: bsc#1100132
+
+mii_nway_restart is not pm aware which results in a rtnl deadlock.
+Implement mii_nway_restart manual by setting BMCR_ANRESTART if
+BMCR_ANENABLE is set.
+
+To reproduce:
+* plug an asix based usb network interface
+* wait until the device enters PM (~5 sec)
+* `ip link set eth1 up` will never return
+
+Fixes: d9fe64e51114 ("net: asix: Add in_pm parameter")
+Signed-off-by: Alexander Couzens <lynxis@fe80.eu>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+---
+ drivers/net/usb/asix_devices.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index 3d4f7959dabb..b1b3d8f7e67d 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -642,10 +642,12 @@ static void ax88772_restore_phy(struct usbnet *dev)
+ priv->presvd_phy_advertise);
+
+ /* Restore BMCR */
++ if (priv->presvd_phy_bmcr & BMCR_ANENABLE)
++ priv->presvd_phy_bmcr |= BMCR_ANRESTART;
++
+ asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR,
+ priv->presvd_phy_bmcr);
+
+- mii_nway_restart(&dev->mii);
+ priv->presvd_phy_advertise = 0;
+ priv->presvd_phy_bmcr = 0;
+ }
+--
+2.16.4
+
diff --git a/patches.fixes/0001-typec-tcpm-fusb302-Resolve-out-of-order-messaging-ev.patch b/patches.fixes/0001-typec-tcpm-fusb302-Resolve-out-of-order-messaging-ev.patch
new file mode 100644
index 0000000000..e7c8740e2f
--- /dev/null
+++ b/patches.fixes/0001-typec-tcpm-fusb302-Resolve-out-of-order-messaging-ev.patch
@@ -0,0 +1,89 @@
+From ab69f61321140ff632d560775bc226259a78dfa2 Mon Sep 17 00:00:00 2001
+From: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
+Date: Tue, 21 Nov 2017 14:12:12 +0000
+Subject: [PATCH] typec: tcpm: fusb302: Resolve out of order messaging events
+Git-commit: ab69f61321140ff632d560775bc226259a78dfa2
+Patch-mainline: v4.16
+References: bsc#1087092
+
+The expectation in the FUSB302 driver is that a TX_SUCCESS event
+should occur after a message has been sent, but before a GCRCSENT
+event is raised to indicate successful receipt of a message from
+the partner. However in some circumstances it is possible to see
+the hardware raise a GCRCSENT event before a TX_SUCCESS event
+is raised. The upshot of this is that the GCRCSENT handling portion
+of code ends up reporting the GoodCRC message to TCPM because the
+TX_SUCCESS event hasn't yet arrived to trigger a consumption of it.
+When TX_SUCCESS is then raised by the chip it ends up consuming the
+actual message that was meant for TCPM, and this incorrect sequence
+results in a hard reset from TCPM.
+
+To avoid this problem, this commit updates the message reading
+code to check whether a GoodCRC message was received or not. Based
+on this check it will either report that the previous transmission
+has completed or it will pass the msg data to TCPM for futher
+processing. This way the incorrect ordering of the events no longer
+matters.
+
+Signed-off-by: Adam Thomson <Adam.Thomson.Opensource@diasemi.com>
+Reviewed-by: Guenter Roeck <linux@roeck-us.net>
+Acked-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+---
+ drivers/usb/typec/fusb302/fusb302.c | 21 +++++++++++++++++----
+ 1 file changed, 17 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/usb/typec/fusb302/fusb302.c b/drivers/usb/typec/fusb302/fusb302.c
+index 4ce1df248c2f..1877edef6584 100644
+--- a/drivers/staging/typec/fusb302/fusb302.c
++++ b/drivers/staging/typec/fusb302/fusb302.c
+@@ -1543,6 +1543,21 @@ static int fusb302_pd_read_message(struct fusb302_chip *chip,
+ fusb302_log(chip, "PD message header: %x", msg->header);
+ fusb302_log(chip, "PD message len: %d", len);
+
++ /*
++ * Check if we've read off a GoodCRC message. If so then indicate to
++ * TCPM that the previous transmission has completed. Otherwise we pass
++ * the received message over to TCPM for processing.
++ *
++ * We make this check here instead of basing the reporting decision on
++ * the IRQ event type, as it's possible for the chip to report the
++ * TX_SUCCESS and GCRCSENT events out of order on occasion, so we need
++ * to check the message type to ensure correct reporting to TCPM.
++ */
++ if ((!len) && (pd_header_type_le(msg->header) == PD_CTRL_GOOD_CRC))
++ tcpm_pd_transmit_complete(chip->tcpm_port, TCPC_TX_SUCCESS);
++ else
++ tcpm_pd_receive(chip->tcpm_port, msg);
++
+ return ret;
+ }
+
+@@ -1650,13 +1665,12 @@ static irqreturn_t fusb302_irq_intn(int irq, void *dev_id)
+
+ if (interrupta & FUSB_REG_INTERRUPTA_TX_SUCCESS) {
+ fusb302_log(chip, "IRQ: PD tx success");
+- /* read out the received good CRC */
+ ret = fusb302_pd_read_message(chip, &pd_msg);
+ if (ret < 0) {
+- fusb302_log(chip, "cannot read in GCRC, ret=%d", ret);
++ fusb302_log(chip,
++ "cannot read in PD message, ret=%d", ret);
+ goto done;
+ }
+- tcpm_pd_transmit_complete(chip->tcpm_port, TCPC_TX_SUCCESS);
+ }
+
+ if (interrupta & FUSB_REG_INTERRUPTA_HARDRESET) {
+@@ -1677,7 +1691,6 @@ static irqreturn_t fusb302_irq_intn(int irq, void *dev_id)
+ "cannot read in PD message, ret=%d", ret);
+ goto done;
+ }
+- tcpm_pd_receive(chip->tcpm_port, &pd_msg);
+ }
+ done:
+ mutex_unlock(&chip->lock);
+--
+2.16.4
+
diff --git a/patches.fixes/inet-frag-enforce-memory-limits-earlier.patch b/patches.fixes/inet-frag-enforce-memory-limits-earlier.patch
new file mode 100644
index 0000000000..6676452d0e
--- /dev/null
+++ b/patches.fixes/inet-frag-enforce-memory-limits-earlier.patch
@@ -0,0 +1,63 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 30 Jul 2018 20:09:11 -0700
+Subject: inet: frag: enforce memory limits earlier
+Patch-mainline: v4.18-rc8
+Git-commit: 56e2c94f055d328f5f6b0a5c1721cca2f2d4e0a1
+References: CVE-2018-5391 bsc#1103097
+
+We currently check current frags memory usage only when
+a new frag queue is created. This allows attackers to first
+consume the memory budget (default : 4 MB) creating thousands
+of frag queues, then sending tiny skbs to exceed high_thresh
+limit by 2 to 3 order of magnitude.
+
+Note that before commit 648700f76b03 ("inet: frags: use rhashtables
+for reassembly units"), work queue could be starved under DOS,
+getting no cpu cycles.
+After commit 648700f76b03, only the per frag queue timer can eventually
+remove an incomplete frag queue and its skbs.
+
+Fixes: b13d3cbfb8e8 ("inet: frag: move eviction of queues to work queue")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Jann Horn <jannh@google.com>
+Cc: Florian Westphal <fw@strlen.de>
+Cc: Peter Oskolkov <posk@google.com>
+Cc: Paolo Abeni <pabeni@redhat.com>
+Acked-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Michal Kubecek <mkubecek@suse.cz>
+
+---
+ net/ipv4/inet_fragment.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
+index 4a3572d9a9c3..4a95e32b3345 100644
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -356,11 +356,6 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
+ {
+ struct inet_frag_queue *q;
+
+- if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
+- inet_frag_schedule_worker(f);
+- return NULL;
+- }
+-
+ q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
+ if (!q)
+ return NULL;
+@@ -397,6 +392,10 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
+ struct inet_frag_queue *q;
+ int depth = 0;
+
++ if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
++ inet_frag_schedule_worker(f);
++ return NULL;
++ }
+ if (frag_mem_limit(nf) > nf->low_thresh)
+ inet_frag_schedule_worker(f);
+
+--
+2.18.0
+
diff --git a/patches.fixes/init-rename-and-re-order-boot_cpu_state_init.patch b/patches.fixes/init-rename-and-re-order-boot_cpu_state_init.patch
new file mode 100644
index 0000000000..550850aa60
--- /dev/null
+++ b/patches.fixes/init-rename-and-re-order-boot_cpu_state_init.patch
@@ -0,0 +1,91 @@
+From b5b1404d0815894de0690de8a1ab58269e56eae6 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Sun, 12 Aug 2018 12:19:42 -0700
+Subject: [PATCH] init: rename and re-order boot_cpu_state_init()
+Git-commit: b5b1404d0815894de0690de8a1ab58269e56eae6
+Patch-mainline: v4.18
+References: bsc#1104365
+
+This is purely a preparatory patch for upcoming changes during the 4.19
+merge window.
+
+We have a function called "boot_cpu_state_init()" that isn't really
+about the bootup cpu state: that is done much earlier by the similarly
+named "boot_cpu_init()" (note lack of "state" in name).
+
+This function initializes some hotplug CPU state, and needs to run after
+the percpu data has been properly initialized. It even has a comment to
+that effect.
+
+Except it _doesn't_ actually run after the percpu data has been properly
+initialized. On x86 it happens to do that, but on at least arm and
+arm64, the percpu base pointers are initialized by the arch-specific
+'smp_prepare_boot_cpu()' hook, which ran _after_ boot_cpu_state_init().
+
+This had some unexpected results, and in particular we have a patch
+pending for the merge window that did the obvious cleanup of using
+'this_cpu_write()' in the cpu hotplug init code:
+
+ - per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
+ + this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
+
+which is obviously the right thing to do. Except because of the
+ordering issue, it actually failed miserably and unexpectedly on arm64.
+
+So this just fixes the ordering, and changes the name of the function to
+be 'boot_cpu_hotplug_init()' to make it obvious that it's about cpu
+hotplug state, because the core CPU state was supposed to have already
+been done earlier.
+
+Marked for stable, since the (not yet merged) patch that will show this
+problem is marked for stable.
+
+Reported-by: Vlastimil Babka <vbabka@suse.cz>
+Reported-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
+Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ include/linux/cpu.h | 2 +-
+ init/main.c | 2 +-
+ kernel/cpu.c | 2 +-
+ 3 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -29,7 +29,7 @@ struct cpu {
+ };
+
+ extern void boot_cpu_init(void);
+-extern void boot_cpu_state_init(void);
++extern void boot_cpu_hotplug_init(void);
+ extern void cpu_init(void);
+ extern void trap_init(void);
+
+--- a/init/main.c
++++ b/init/main.c
+@@ -524,8 +524,8 @@ asmlinkage __visible void __init start_k
+ setup_command_line(command_line);
+ setup_nr_cpu_ids();
+ setup_per_cpu_areas();
+- boot_cpu_state_init();
+ smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
++ boot_cpu_hotplug_init();
+
+ build_all_zonelists(NULL, NULL);
+ page_alloc_init();
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1814,7 +1814,7 @@ void __init boot_cpu_init(void)
+ /*
+ * Must be called _AFTER_ setting up the per_cpu areas
+ */
+-void __init boot_cpu_state_init(void)
++void __init boot_cpu_hotplug_init(void)
+ {
+ per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
+ }
diff --git a/patches.fixes/ip-discard-IPv4-datagrams-with-overlapping-segments.patch b/patches.fixes/ip-discard-IPv4-datagrams-with-overlapping-segments.patch
new file mode 100644
index 0000000000..1fdcbf3eb1
--- /dev/null
+++ b/patches.fixes/ip-discard-IPv4-datagrams-with-overlapping-segments.patch
@@ -0,0 +1,158 @@
+From: Peter Oskolkov <posk@google.com>
+Date: Thu, 2 Aug 2018 23:34:37 +0000
+Subject: ip: discard IPv4 datagrams with overlapping segments.
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+Git-commit: 7969e5c40dfd04799d4341f1b7cd266b6e47f227
+References: CVE-2018-5391 bsc#1103097
+
+This behavior is required in IPv6, and there is little need
+to tolerate overlapping fragments in IPv4. This change
+simplifies the code and eliminates potential DDoS attack vectors.
+
+Tested: ran ip_defrag selftest (not yet available uptream).
+
+Suggested-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Peter Oskolkov <posk@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Florian Westphal <fw@strlen.de>
+Acked-by: Stephen Hemminger <stephen@networkplumber.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Michal Kubecek <mkubecek@suse.cz>
+
+---
+ include/uapi/linux/snmp.h | 1 +
+ net/ipv4/ip_fragment.c | 75 ++++++++++-----------------------------
+ net/ipv4/proc.c | 1 +
+ 3 files changed, 21 insertions(+), 56 deletions(-)
+
+diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
+index a8f3a1da5e81..30056849cc7e 100644
+--- a/include/uapi/linux/snmp.h
++++ b/include/uapi/linux/snmp.h
+@@ -55,6 +55,7 @@ enum
+ IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */
+ IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
+ IPSTATS_MIB_CEPKTS, /* InCEPkts */
++ IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */
+ __IPSTATS_MIB_MAX
+ };
+
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index 752711cd4834..348c496c118c 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -340,6 +340,7 @@ static int ip_frag_reinit(struct ipq *qp)
+ /* Add new segment to existing queue. */
+ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
+ {
++ struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
+ struct sk_buff *prev, *next;
+ struct net_device *dev;
+ unsigned int fragsize;
+@@ -420,65 +421,23 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
+ }
+
+ found:
+- /* We found where to put this one. Check for overlap with
+- * preceding fragment, and, if needed, align things so that
+- * any overlaps are eliminated.
++ /* RFC5722, Section 4, amended by Errata ID : 3089
++ * When reassembling an IPv6 datagram, if
++ * one or more its constituent fragments is determined to be an
++ * overlapping fragment, the entire datagram (and any constituent
++ * fragments) MUST be silently discarded.
++ *
++ * We do the same here for IPv4.
+ */
+- if (prev) {
+- int i = (FRAG_CB(prev)->offset + prev->len) - offset;
+
+- if (i > 0) {
+- offset += i;
+- err = -EINVAL;
+- if (end <= offset)
+- goto err;
+- err = -ENOMEM;
+- if (!pskb_pull(skb, i))
+- goto err;
+- if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+- skb->ip_summed = CHECKSUM_NONE;
+- }
+- }
++ /* Is there an overlap with the previous fragment? */
++ if (prev &&
++ (FRAG_CB(prev)->offset + prev->len) > offset)
++ goto discard_qp;
+
+- err = -ENOMEM;
+-
+- while (next && FRAG_CB(next)->offset < end) {
+- int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
+-
+- if (i < next->len) {
+- int delta = -next->truesize;
+-
+- /* Eat head of the next overlapped fragment
+- * and leave the loop. The next ones cannot overlap.
+- */
+- if (!pskb_pull(next, i))
+- goto err;
+- delta += next->truesize;
+- if (delta)
+- add_frag_mem_limit(qp->q.net, delta);
+- FRAG_CB(next)->offset += i;
+- qp->q.meat -= i;
+- if (next->ip_summed != CHECKSUM_UNNECESSARY)
+- next->ip_summed = CHECKSUM_NONE;
+- break;
+- } else {
+- struct sk_buff *free_it = next;
+-
+- /* Old fragment is completely overridden with
+- * new one drop it.
+- */
+- next = next->next;
+-
+- if (prev)
+- prev->next = next;
+- else
+- qp->q.fragments = next;
+-
+- qp->q.meat -= free_it->len;
+- sub_frag_mem_limit(qp->q.net, free_it->truesize);
+- kfree_skb(free_it);
+- }
+- }
++ /* Is there an overlap with the next fragment? */
++ if (next && FRAG_CB(next)->offset < end)
++ goto discard_qp;
+
+ FRAG_CB(skb)->offset = offset;
+
+@@ -525,6 +484,10 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
+ skb_dst_drop(skb);
+ return -EINPROGRESS;
+
++discard_qp:
++ inet_frag_kill(&qp->q, &ip4_frags);
++ err = -EINVAL;
++ __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
+ err:
+ kfree_skb(skb);
+ return err;
+diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
+index 8e687e6439e1..0d30b58a0ea5 100644
+--- a/net/ipv4/proc.c
++++ b/net/ipv4/proc.c
+@@ -132,6 +132,7 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
+ SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
+ SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
+ SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
++ SNMP_MIB_ITEM("ReasmOverlaps", IPSTATS_MIB_REASM_OVERLAPS),
+ SNMP_MIB_SENTINEL
+ };
+
+--
+2.18.0
+
diff --git a/patches.fixes/ipv4-frags-handle-possible-skb-truesize-change.patch b/patches.fixes/ipv4-frags-handle-possible-skb-truesize-change.patch
new file mode 100644
index 0000000000..2190459073
--- /dev/null
+++ b/patches.fixes/ipv4-frags-handle-possible-skb-truesize-change.patch
@@ -0,0 +1,54 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 30 Jul 2018 21:50:29 -0700
+Subject: ipv4: frags: handle possible skb truesize change
+Patch-mainline: v4.18-rc8
+Git-commit: 4672694bd4f1aebdab0ad763ae4716e89cb15221
+References: CVE-2018-5391 bsc#1103097
+
+ip_frag_queue() might call pskb_pull() on one skb that
+is already in the fragment queue.
+
+We need to take care of possible truesize change, or we
+might have an imbalance of the netns frags memory usage.
+
+IPv6 is immune to this bug, because RFC5722, Section 4,
+amended by Errata ID 3089 states :
+
+ When reassembling an IPv6 datagram, if
+ one or more its constituent fragments is determined to be an
+ overlapping fragment, the entire datagram (and any constituent
+ fragments) MUST be silently discarded.
+
+Fixes: 158f323b9868 ("net: adjust skb->truesize in pskb_expand_head()")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Michal Kubecek <mkubecek@suse.cz>
+
+---
+ net/ipv4/ip_fragment.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
+index 4bf3b8af0257..752711cd4834 100644
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -446,11 +446,16 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
+ int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
+
+ if (i < next->len) {
++ int delta = -next->truesize;
++
+ /* Eat head of the next overlapped fragment
+ * and leave the loop. The next ones cannot overlap.
+ */
+ if (!pskb_pull(next, i))
+ goto err;
++ delta += next->truesize;
++ if (delta)
++ add_frag_mem_limit(qp->q.net, delta);
+ FRAG_CB(next)->offset += i;
+ qp->q.meat -= i;
+ if (next->ip_summed != CHECKSUM_UNNECESSARY)
+--
+2.18.0
+
diff --git a/patches.fixes/ipv6-defrag-drop-non-last-frags-smaller-than-min-mtu.patch b/patches.fixes/ipv6-defrag-drop-non-last-frags-smaller-than-min-mtu.patch
new file mode 100644
index 0000000000..d348f4faf9
--- /dev/null
+++ b/patches.fixes/ipv6-defrag-drop-non-last-frags-smaller-than-min-mtu.patch
@@ -0,0 +1,61 @@
+From: Florian Westphal <fw@strlen.de>
+Date: Fri, 3 Aug 2018 02:22:20 +0200
+Subject: ipv6: defrag: drop non-last frags smaller than min mtu
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+Git-commit: 0ed4229b08c13c84a3c301a08defdc9e7f4467e6
+References: CVE-2018-5391 bsc#1103097
+
+don't bother with pathological cases, they only waste cycles.
+IPv6 requires a minimum MTU of 1280 so we should never see fragments
+smaller than this (except last frag).
+
+v3: don't use awkward "-offset + len"
+v2: drop IPv4 part, which added same check w. IPV4_MIN_MTU (68).
+ There were concerns that there could be even smaller frags
+ generated by intermediate nodes, e.g. on radio networks.
+
+Cc: Peter Oskolkov <posk@google.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Michal Kubecek <mkubecek@suse.cz>
+
+---
+ net/ipv6/netfilter/nf_conntrack_reasm.c | 4 ++++
+ net/ipv6/reassembly.c | 4 ++++
+ 2 files changed, 8 insertions(+)
+
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index b263bf3a19f7..3c3fd3f19a95 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -589,6 +589,10 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
+ hdr = ipv6_hdr(skb);
+ fhdr = (struct frag_hdr *)skb_transport_header(skb);
+
++ if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
++ fhdr->frag_off & htons(IP6_MF))
++ return -EINVAL;
++
+ skb_orphan(skb);
+ fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
+ skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index 846012eae526..9fe5caa7b032 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -558,6 +558,10 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
+ return 1;
+ }
+
++ if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
++ fhdr->frag_off & htons(IP6_MF))
++ goto fail_hdr;
++
+ fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
+ skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
+ if (fq) {
+--
+2.18.0
+
diff --git a/patches.fixes/x86-xen-init-gs-very-early.patch b/patches.fixes/x86-xen-init-gs-very-early.patch
new file mode 100644
index 0000000000..be4221930a
--- /dev/null
+++ b/patches.fixes/x86-xen-init-gs-very-early.patch
@@ -0,0 +1,61 @@
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 1 Feb 2018 13:40:19 +0100
+Subject: x86/xen: init %gs very early to avoid page faults with stack
+ protector
+Patch-mainline: 4.16
+Git-commit: 4f277295e54c5b7340e48efea3fc5cc21a2872b7
+References: bnc#1104777
+
+When running as Xen pv guest %gs is initialized some time after
+C code is started. Depending on stack protector usage this might be
+too late, resulting in page faults.
+
+So setup %gs and MSR_GS_BASE in assembly code already.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Tested-by: Chris Patterson <cjp256@gmail.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+---
+ arch/x86/xen/xen-head.S | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
+index 497cc55..96f26e0 100644
+--- a/arch/x86/xen/xen-head.S
++++ b/arch/x86/xen/xen-head.S
+@@ -9,7 +9,9 @@
+
+ #include <asm/boot.h>
+ #include <asm/asm.h>
++#include <asm/msr.h>
+ #include <asm/page_types.h>
++#include <asm/percpu.h>
+ #include <asm/unwind_hints.h>
+
+ #include <xen/interface/elfnote.h>
+@@ -35,6 +37,20 @@ ENTRY(startup_xen)
+ mov %_ASM_SI, xen_start_info
+ mov $init_thread_union+THREAD_SIZE, %_ASM_SP
+
++#ifdef CONFIG_X86_64
++ /* Set up %gs.
++ *
++ * The base of %gs always points to the bottom of the irqstack
++ * union. If the stack protector canary is enabled, it is
++ * located at %gs:40. Note that, on SMP, the boot cpu uses
++ * init data section till per cpu areas are set up.
++ */
++ movl $MSR_GS_BASE,%ecx
++ movq $INIT_PER_CPU_VAR(irq_stack_union),%rax
++ cdq
++ wrmsr
++#endif
++
+ jmp xen_start_kernel
+ END(startup_xen)
+ __FINIT
+--
+cgit v1.1
+
diff --git a/patches.fixes/xen-netfront-dont-cache-skb_shinfo.patch b/patches.fixes/xen-netfront-dont-cache-skb_shinfo.patch
new file mode 100644
index 0000000000..cac265e7cf
--- /dev/null
+++ b/patches.fixes/xen-netfront-dont-cache-skb_shinfo.patch
@@ -0,0 +1,53 @@
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 9 Aug 2018 16:42:16 +0200
+Subject: xen/netfront: don't cache skb_shinfo()
+Patch-mainline: 4.18
+Git-commit: d472b3a6cf63cd31cae1ed61930f07e6cd6671b5
+References: bnc#1065600
+
+skb_shinfo() can change when calling __pskb_pull_tail(): Don't cache
+its return value.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Wei Liu <wei.liu2@citrix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/xen-netfront.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
+index 2d8812dd1534..9dd2ca62d84a 100644
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -894,7 +894,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
+ struct sk_buff *skb,
+ struct sk_buff_head *list)
+ {
+- struct skb_shared_info *shinfo = skb_shinfo(skb);
+ RING_IDX cons = queue->rx.rsp_cons;
+ struct sk_buff *nskb;
+
+@@ -903,15 +902,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
+ RING_GET_RESPONSE(&queue->rx, ++cons);
+ skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
+
+- if (shinfo->nr_frags == MAX_SKB_FRAGS) {
++ if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
+ unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
+
+ BUG_ON(pull_to <= skb_headlen(skb));
+ __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
+ }
+- BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
++ BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
+
+- skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
++ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
++ skb_frag_page(nfrag),
+ rx->offset, rx->status, PAGE_SIZE);
+
+ skb_shinfo(nskb)->nr_frags = 0;
+--
+cgit 1.2-0.3.lf.el7
+
diff --git a/patches.fixes/xen-xsa270-fix.patch b/patches.fixes/xen-xsa270-fix.patch
new file mode 100644
index 0000000000..af35b18ef7
--- /dev/null
+++ b/patches.fixes/xen-xsa270-fix.patch
@@ -0,0 +1,56 @@
+From: Jan Beulich <jbeulich@suse.com>
+Subject: xen-netback: fix input validation in xenvif_set_hash_mapping()
+Patch-mainline: Not yet, embargo
+References: bnc#1103277
+
+Both len and off are frontend specified values, so we need to make
+sure there's no overflow when adding the two for the bounds check. We
+also want to avoid undefined behavior and hence use off to index into
+->hash.mapping[] only after bounds checking. This at the same time
+allows to take care of not applying off twice for the bounds checking
+against vif->num_queues.
+
+It is also insufficient to bounds check copy_op.len, as this is len
+truncated to 16 bits.
+
+This is XSA-270.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
+Tested-by: Paul Durrant <paul.durrant@citrix.com>
+---
+The bounds checking against vif->num_queues also occurs too early afaict
+(it should be done after the grant copy). I have patches ready as public
+follow-ups for both this and the (at least latent) issue of the mapping
+array crossing a page boundary.
+
+--- a/drivers/net/xen-netback/hash.c
++++ b/drivers/net/xen-netback/hash.c
+@@ -332,20 +332,22 @@ u32 xenvif_set_hash_mapping_size(struct
+ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
+ u32 off)
+ {
+- u32 *mapping = &vif->hash.mapping[off];
++ u32 *mapping = vif->hash.mapping;
+ struct gnttab_copy copy_op = {
+ .source.u.ref = gref,
+ .source.domid = vif->domid,
+- .dest.u.gmfn = virt_to_gfn(mapping),
+ .dest.domid = DOMID_SELF,
+- .dest.offset = xen_offset_in_page(mapping),
+- .len = len * sizeof(u32),
++ .len = len * sizeof(*mapping),
+ .flags = GNTCOPY_source_gref
+ };
+
+- if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
++ if ((off + len < off) || (off + len > vif->hash.size) ||
++ len > XEN_PAGE_SIZE / sizeof(*mapping))
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
++ copy_op.dest.u.gmfn = virt_to_gfn(mapping + off);
++ copy_op.dest.offset = xen_offset_in_page(mapping + off);
++
+ while (len-- != 0)
+ if (mapping[off++] >= vif->num_queues)
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
diff --git a/patches.kabi/ip-drop-IPSTATS_MIB_REASM_OVERLAPS.patch b/patches.kabi/ip-drop-IPSTATS_MIB_REASM_OVERLAPS.patch
new file mode 100644
index 0000000000..2f061ce9bc
--- /dev/null
+++ b/patches.kabi/ip-drop-IPSTATS_MIB_REASM_OVERLAPS.patch
@@ -0,0 +1,51 @@
+From: Jiri Bohac <jbohac@suse.cz>
+Subject: kabi; ip: drop IPSTATS_MIB_REASM_OVERLAPS
+Patch-mainline: Never, KABI fix
+References: bsc#1103097, CVE-2018-5391
+
+patches.fixes/ip-discard-ipv4-datagrams-with-overlapping-segments.patch adds
+the IPSTATS_MIB_REASM_OVERLAPS snmp attribute, whic breaks KABI.
+
+Drop this attribute and account the dropped fragments in IPSTATS_MIB_REASMFAILS
+instead.
+
+
+Signed-off-by: Jiri Bohac <jbohac@suse.cz>
+
+---
+ include/uapi/linux/snmp.h | 1 -
+ net/ipv4/ip_fragment.c | 2 +-
+ net/ipv4/proc.c | 1 -
+ 3 files changed, 1 insertion(+), 3 deletions(-)
+
+--- a/include/uapi/linux/snmp.h
++++ b/include/uapi/linux/snmp.h
+@@ -55,7 +55,6 @@ enum
+ IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */
+ IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
+ IPSTATS_MIB_CEPKTS, /* InCEPkts */
+- IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */
+ __IPSTATS_MIB_MAX
+ };
+
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -479,7 +479,7 @@ found:
+ discard_qp:
+ inet_frag_kill(&qp->q, &ip4_frags);
+ err = -EINVAL;
+- __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
++ __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
+ err:
+ kfree_skb(skb);
+ return err;
+--- a/net/ipv4/proc.c
++++ b/net/ipv4/proc.c
+@@ -132,7 +132,6 @@ static const struct snmp_mib snmp4_ipext
+ SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
+ SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
+ SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
+- SNMP_MIB_ITEM("ReasmOverlaps", IPSTATS_MIB_REASM_OVERLAPS),
+ SNMP_MIB_SENTINEL
+ };
+
diff --git a/patches.kabi/kvm_x86_ops-l1tf-kabi-fix.patch b/patches.kabi/kvm_x86_ops-l1tf-kabi-fix.patch
new file mode 100644
index 0000000000..03bf21cd02
--- /dev/null
+++ b/patches.kabi/kvm_x86_ops-l1tf-kabi-fix.patch
@@ -0,0 +1,42 @@
+From: Takashi Iwai <tiwai@suse.de>
+Subject: Fix kABI breakage of kvm_x86_ops due to L1TF patches
+Patch-mainline: Never, kABI fix
+References: bsc#1089343 CVE-2018-3646
+
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ arch/x86/include/asm/kvm_host.h | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -707,8 +707,10 @@ struct kvm_vcpu_arch {
+ /* be preempted when it's in kernel-mode(cpl=0) */
+ bool preempted_in_kernel;
+
++#ifndef __GENKSYMS__
+ /* Flush the L1 Data cache for L1TF mitigation on VMENTER */
+ bool l1tf_flush_l1d;
++#endif
+ };
+
+ struct kvm_lpage_info {
+@@ -889,7 +891,6 @@ struct kvm_vcpu_stat {
+ u64 signal_exits;
+ u64 irq_window_exits;
+ u64 nmi_window_exits;
+- u64 l1d_flush;
+ u64 halt_exits;
+ u64 halt_successful_poll;
+ u64 halt_attempted_poll;
+@@ -906,6 +907,9 @@ struct kvm_vcpu_stat {
+ u64 irq_injections;
+ u64 nmi_injections;
+ u64 req_event;
++#ifndef __GENKSYMS__
++ u64 l1d_flush;
++#endif
+ };
+
+ struct x86_instruction_info;
diff --git a/patches.suse/hv-netvsc-Fix-NULL-dereference-at-single-queue-mode-.patch b/patches.suse/hv-netvsc-Fix-NULL-dereference-at-single-queue-mode-.patch
new file mode 100644
index 0000000000..efcd49a4ae
--- /dev/null
+++ b/patches.suse/hv-netvsc-Fix-NULL-dereference-at-single-queue-mode-.patch
@@ -0,0 +1,38 @@
+From: Takashi Iwai <tiwai@suse.de>
+Subject: [PATCH] hv/netvsc: Fix NULL dereference at single queue mode fallback
+References: bsc#1104708
+Patch-mainline: No, will be submitted soon later
+
+The recent commit 916c5e1413be ("hv/netvsc: fix handling of fallback
+to single queue mode") tried to fix the fallback behavior to a single
+queue mode, but it changed the function to return zero incorrectly,
+while the function should return an object pointer. Eventually this
+leads to a NULL dereference at the callers that expect non-NULL
+value.
+
+Fix it by returning the proper net_device object.
+
+Fixes: 916c5e1413be ("hv/netvsc: fix handling of fallback to single queue mode")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/net/hyperv/rndis_filter.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
+index 408ece27131c..2a5209f23f29 100644
+--- a/drivers/net/hyperv/rndis_filter.c
++++ b/drivers/net/hyperv/rndis_filter.c
+@@ -1338,7 +1338,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
+ /* setting up multiple channels failed */
+ net_device->max_chn = 1;
+ net_device->num_chn = 1;
+- return 0;
++ return net_device;
+
+ err_dev_remv:
+ rndis_filter_device_remove(dev, net_device);
+--
+2.18.0
+
diff --git a/patches.suse/sched-debug-Reverse-the-order-of-printing-faults.patch b/patches.suse/sched-debug-Reverse-the-order-of-printing-faults.patch
new file mode 100644
index 0000000000..fc657dd099
--- /dev/null
+++ b/patches.suse/sched-debug-Reverse-the-order-of-printing-faults.patch
@@ -0,0 +1,48 @@
+From a635e99e1d438b63f323884e0c11242e647e136f Mon Sep 17 00:00:00 2001
+From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Date: Wed, 20 Jun 2018 22:32:47 +0530
+Subject: [PATCH] sched/debug: Reverse the order of printing faults
+
+References: bnc#1101669 optimise numa balancing for fast migrate
+Patch-mainline: v4.19
+Git-commit: 67d9f6c256cd66e15f85c92670f52a7ad4689cff
+
+Fix the order in which the private and shared numa faults are getting
+printed.
+
+No functional changes.
+
+Running SPECjbb2005 on a 4 node machine and comparing bops/JVM
+JVMS LAST_PATCH WITH_PATCH %CHANGE
+16 25215.7 25375.3 0.63
+1 72107 72617 0.70
+
+Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Rik van Riel <riel@surriel.com>
+Acked-by: Mel Gorman <mgorman@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1529514181-9842-7-git-send-email-srikar@linux.vnet.ibm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/sched/debug.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
+index ebe2d0c87c2f..46f1e44191c4 100644
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -833,8 +833,8 @@ void print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
+ unsigned long tpf, unsigned long gsf, unsigned long gpf)
+ {
+ SEQ_printf(m, "numa_faults node=%d ", node);
+- SEQ_printf(m, "task_private=%lu task_shared=%lu ", tsf, tpf);
+- SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gsf, gpf);
++ SEQ_printf(m, "task_private=%lu task_shared=%lu ", tpf, tsf);
++ SEQ_printf(m, "group_private=%lu group_shared=%lu\n", gpf, gsf);
+ }
+ #endif
+
diff --git a/patches.suse/sched-numa-Evaluate-move-once-per-node.patch b/patches.suse/sched-numa-Evaluate-move-once-per-node.patch
new file mode 100644
index 0000000000..31c8c82d93
--- /dev/null
+++ b/patches.suse/sched-numa-Evaluate-move-once-per-node.patch
@@ -0,0 +1,268 @@
+From 61868b82ff76717bedc0b9725a665ebec2f147e8 Mon Sep 17 00:00:00 2001
+From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Date: Wed, 20 Jun 2018 22:32:43 +0530
+Subject: [PATCH] sched/numa: Evaluate move once per node
+
+References: bnc#1101669 optimise numa balancing for fast migrate
+Patch-mainline: v4.19
+Git-commit: 305c1fac3225dfa7eeb89bfe91b7335a6edd5172
+
+task_numa_compare() helps choose the best CPU to move or swap the
+selected task. To achieve this task_numa_compare() is called for every
+CPU in the node. Currently it evaluates if the task can be moved/swapped
+for each of the CPUs. However the move evaluation is mostly independent
+of the CPU. Evaluating the move logic once per node, provides scope for
+simplifying task_numa_compare().
+
+Running SPECjbb2005 on a 4 node machine and comparing bops/JVM
+JVMS LAST_PATCH WITH_PATCH %CHANGE
+16 25705.2 25058.2 -2.51
+1 74433 72950 -1.99
+
+Running SPECjbb2005 on a 16 node machine and comparing bops/JVM
+JVMS LAST_PATCH WITH_PATCH %CHANGE
+8 96589.6 105930 9.670
+1 181830 178624 -1.76
+
+(numbers from v1 based on v4.17-rc5)
+Testcase Time: Min Max Avg StdDev
+numa01.sh Real: 440.65 941.32 758.98 189.17
+numa01.sh Sys: 183.48 320.07 258.42 50.09
+numa01.sh User: 37384.65 71818.14 60302.51 13798.96
+numa02.sh Real: 61.24 65.35 62.49 1.49
+numa02.sh Sys: 16.83 24.18 21.40 2.60
+numa02.sh User: 5219.59 5356.34 5264.03 49.07
+numa03.sh Real: 822.04 912.40 873.55 37.35
+numa03.sh Sys: 118.80 140.94 132.90 7.60
+numa03.sh User: 62485.19 70025.01 67208.33 2967.10
+numa04.sh Real: 690.66 872.12 778.49 65.44
+numa04.sh Sys: 459.26 563.03 494.03 42.39
+numa04.sh User: 51116.44 70527.20 58849.44 8461.28
+numa05.sh Real: 418.37 562.28 525.77 54.27
+numa05.sh Sys: 299.45 481.00 392.49 64.27
+numa05.sh User: 34115.09 41324.02 39105.30 2627.68
+
+Testcase Time: Min Max Avg StdDev %Change
+numa01.sh Real: 516.14 892.41 739.84 151.32 2.587%
+numa01.sh Sys: 153.16 192.99 177.70 14.58 45.42%
+numa01.sh User: 39821.04 69528.92 57193.87 10989.48 5.435%
+numa02.sh Real: 60.91 62.35 61.58 0.63 1.477%
+numa02.sh Sys: 16.47 26.16 21.20 3.85 0.943%
+numa02.sh User: 5227.58 5309.61 5265.17 31.04 -0.02%
+numa03.sh Real: 739.07 917.73 795.75 64.45 9.776%
+numa03.sh Sys: 94.46 136.08 109.48 14.58 21.39%
+numa03.sh User: 57478.56 72014.09 61764.48 5343.69 8.813%
+numa04.sh Real: 442.61 715.43 530.31 96.12 46.79%
+numa04.sh Sys: 224.90 348.63 285.61 48.83 72.97%
+numa04.sh User: 35836.84 47522.47 40235.41 3985.26 46.26%
+numa05.sh Real: 386.13 489.17 434.94 43.59 20.88%
+numa05.sh Sys: 144.29 438.56 278.80 105.78 40.77%
+numa05.sh User: 33255.86 36890.82 34879.31 1641.98 12.11%
+
+Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Mel Gorman <mgorman@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1529514181-9842-3-git-send-email-srikar@linux.vnet.ibm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/sched/fair.c | 128 +++++++++++++++++++++++-----------------------------
+ 1 file changed, 57 insertions(+), 71 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 783f0ce6a54c..055bb39aa01a 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -1608,9 +1608,8 @@ static bool load_too_imbalanced(long src_load, long dst_load,
+ * be exchanged with the source task
+ */
+ static void task_numa_compare(struct task_numa_env *env,
+- long taskimp, long groupimp)
++ long taskimp, long groupimp, bool maymove)
+ {
+- struct rq *src_rq = cpu_rq(env->src_cpu);
+ struct rq *dst_rq = cpu_rq(env->dst_cpu);
+ struct task_struct *cur;
+ long src_load, dst_load;
+@@ -1631,97 +1630,73 @@ static void task_numa_compare(struct task_numa_env *env,
+ if (cur == env->p)
+ goto unlock;
+
++ if (!cur) {
++ if (maymove || imp > env->best_imp)
++ goto assign;
++ else
++ goto unlock;
++ }
++
+ /*
+ * "imp" is the fault differential for the source task between the
+ * source and destination node. Calculate the total differential for
+ * the source task and potential destination task. The more negative
+- * the value is, the more rmeote accesses that would be expected to
++ * the value is, the more remote accesses that would be expected to
+ * be incurred if the tasks were swapped.
+ */
+- if (cur) {
+- /* Skip this swap candidate if cannot move to the source cpu */
+- if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
+- goto unlock;
++ /* Skip this swap candidate if cannot move to the source cpu */
++ if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
++ goto unlock;
+
++ /*
++ * If dst and source tasks are in the same NUMA group, or not
++ * in any group then look only at task weights.
++ */
++ if (cur->numa_group == env->p->numa_group) {
++ imp = taskimp + task_weight(cur, env->src_nid, dist) -
++ task_weight(cur, env->dst_nid, dist);
+ /*
+- * If dst and source tasks are in the same NUMA group, or not
+- * in any group then look only at task weights.
++ * Add some hysteresis to prevent swapping the
++ * tasks within a group over tiny differences.
+ */
+- if (cur->numa_group == env->p->numa_group) {
+- imp = taskimp + task_weight(cur, env->src_nid, dist) -
+- task_weight(cur, env->dst_nid, dist);
+- /*
+- * Add some hysteresis to prevent swapping the
+- * tasks within a group over tiny differences.
+- */
+- if (cur->numa_group)
+- imp -= imp/16;
+- } else {
+- /*
+- * Compare the group weights. If a task is all by
+- * itself (not part of a group), use the task weight
+- * instead.
+- */
+- if (cur->numa_group)
+- imp += group_weight(cur, env->src_nid, dist) -
+- group_weight(cur, env->dst_nid, dist);
+- else
+- imp += task_weight(cur, env->src_nid, dist) -
+- task_weight(cur, env->dst_nid, dist);
+- }
++ if (cur->numa_group)
++ imp -= imp / 16;
++ } else {
++ /*
++ * Compare the group weights. If a task is all by itself
++ * (not part of a group), use the task weight instead.
++ */
++ if (cur->numa_group && env->p->numa_group)
++ imp += group_weight(cur, env->src_nid, dist) -
++ group_weight(cur, env->dst_nid, dist);
++ else
++ imp += task_weight(cur, env->src_nid, dist) -
++ task_weight(cur, env->dst_nid, dist);
+ }
+
+- if (imp <= env->best_imp && moveimp <= env->best_imp)
++ if (imp <= env->best_imp)
+ goto unlock;
+
+- if (!cur) {
+- /* Is there capacity at our destination? */
+- if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
+- !env->dst_stats.has_free_capacity)
+- goto unlock;
+-
+- goto balance;
+- }
+-
+- /* Balance doesn't matter much if we're running a task per cpu */
+- if (imp > env->best_imp && src_rq->nr_running == 1 &&
+- dst_rq->nr_running == 1)
++ if (maymove && moveimp > imp && moveimp > env->best_imp) {
++ imp = moveimp - 1;
++ cur = NULL;
+ goto assign;
++ }
+
+ /*
+ * In the overloaded case, try and keep the load balanced.
+ */
+-balance:
+- load = task_h_load(env->p);
++ load = task_h_load(env->p) - task_h_load(cur);
++ if (!load)
++ goto assign;
++
+ dst_load = env->dst_stats.load + load;
+ src_load = env->src_stats.load - load;
+
+- if (moveimp > imp && moveimp > env->best_imp) {
+- /*
+- * If the improvement from just moving env->p direction is
+- * better than swapping tasks around, check if a move is
+- * possible. Store a slightly smaller score than moveimp,
+- * so an actually idle CPU will win.
+- */
+- if (!load_too_imbalanced(src_load, dst_load, env)) {
+- imp = moveimp - 1;
+- cur = NULL;
+- goto assign;
+- }
+- }
+-
+- if (imp <= env->best_imp)
+- goto unlock;
+-
+- if (cur) {
+- load = task_h_load(cur);
+- dst_load -= load;
+- src_load += load;
+- }
+-
+ if (load_too_imbalanced(src_load, dst_load, env))
+ goto unlock;
+
++assign:
+ /*
+ * One idle CPU per node is evaluated for a task numa move.
+ * Call select_idle_sibling to maybe find a better one.
+@@ -1737,7 +1712,6 @@ static void task_numa_compare(struct task_numa_env *env,
+ local_irq_enable();
+ }
+
+-assign:
+ task_numa_assign(env, cur, imp);
+ unlock:
+ rcu_read_unlock();
+@@ -1746,15 +1720,27 @@ static void task_numa_compare(struct task_numa_env *env,
+ static void task_numa_find_cpu(struct task_numa_env *env,
+ long taskimp, long groupimp)
+ {
++ long src_load, dst_load, load;
++ bool maymove = false;
+ int cpu;
+
++ load = task_h_load(env->p);
++ dst_load = env->dst_stats.load + load;
++ src_load = env->src_stats.load - load;
++
++ /*
++ * If the improvement from just moving env->p direction is better
++ * than swapping tasks around, check if a move is possible.
++ */
++ maymove = !load_too_imbalanced(src_load, dst_load, env);
++
+ for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
+ /* Skip this CPU if the source task cannot migrate */
+ if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed))
+ continue;
+
+ env->dst_cpu = cpu;
+- task_numa_compare(env, taskimp, groupimp);
++ task_numa_compare(env, taskimp, groupimp, maymove);
+ }
+ }
+
diff --git a/patches.suse/sched-numa-Modify-migrate_swap-to-accept-additional-parameters.patch b/patches.suse/sched-numa-Modify-migrate_swap-to-accept-additional-parameters.patch
new file mode 100644
index 0000000000..88ff6b8f5b
--- /dev/null
+++ b/patches.suse/sched-numa-Modify-migrate_swap-to-accept-additional-parameters.patch
@@ -0,0 +1,114 @@
+From 8a6454703a21399bbe67cee248268421119aa388 Mon Sep 17 00:00:00 2001
+From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Date: Wed, 20 Jun 2018 22:32:50 +0530
+Subject: [PATCH] sched/numa: Modify migrate_swap() to accept additional
+ parameters
+
+References: bnc#1101669 optimise numa balancing for fast migrate
+Patch-mainline: v4.19
+Git-commit: 0ad4e3dfe6cf3f207e61cbd8e3e4a943f8c1ad20
+
+There are checks in migrate_swap_stop() that check if the task/CPU
+combination is as per migrate_swap_arg before migrating.
+
+However atleast one of the two tasks to be swapped by migrate_swap() could
+have migrated to a completely different CPU before updating the
+migrate_swap_arg. The new CPU where the task is currently running could
+be a different node too. If the task has migrated, numa balancer might
+end up placing a task in a wrong node. Instead of achieving node
+consolidation, it may end up spreading the load across nodes.
+
+To avoid that pass the CPUs as additional parameters.
+
+While here, place migrate_swap under CONFIG_NUMA_BALANCING.
+
+Running SPECjbb2005 on a 4 node machine and comparing bops/JVM
+JVMS LAST_PATCH WITH_PATCH %CHANGE
+16 25377.3 25226.6 -0.59
+1 72287 73326 1.437
+
+Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Rik van Riel <riel@surriel.com>
+Acked-by: Mel Gorman <mgorman@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1529514181-9842-10-git-send-email-srikar@linux.vnet.ibm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/sched/core.c | 9 ++++++---
+ kernel/sched/fair.c | 3 ++-
+ kernel/sched/sched.h | 3 ++-
+ 3 files changed, 10 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index c24a2e31af26..18e9dbbbb818 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1227,6 +1227,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
+ __set_task_cpu(p, new_cpu);
+ }
+
++#ifdef CONFIG_NUMA_BALANCING
+ static void __migrate_swap_task(struct task_struct *p, int cpu)
+ {
+ if (task_on_rq_queued(p)) {
+@@ -1308,16 +1309,17 @@ static int migrate_swap_stop(void *data)
+ /*
+ * Cross migrate two tasks
+ */
+-int migrate_swap(struct task_struct *cur, struct task_struct *p)
++int migrate_swap(struct task_struct *cur, struct task_struct *p,
++ int target_cpu, int curr_cpu)
+ {
+ struct migration_swap_arg arg;
+ int ret = -EINVAL;
+
+ arg = (struct migration_swap_arg){
+ .src_task = cur,
+- .src_cpu = task_cpu(cur),
++ .src_cpu = curr_cpu,
+ .dst_task = p,
+- .dst_cpu = task_cpu(p),
++ .dst_cpu = target_cpu,
+ };
+
+ if (arg.src_cpu == arg.dst_cpu)
+@@ -1342,6 +1344,7 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
+ out:
+ return ret;
+ }
++#endif /* CONFIG_NUMA_BALANCING */
+
+ /*
+ * wait_task_inactive - wait for a thread to unschedule.
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index c9a932f49f0a..0b5b2a011a9c 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -1876,7 +1876,8 @@ static int task_numa_migrate(struct task_struct *p)
+ return ret;
+ }
+
+- ret = migrate_swap(p, env.best_task);
++ ret = migrate_swap(p, env.best_task, env.best_cpu, env.src_cpu);
++
+ if (ret != 0)
+ trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
+ put_task_struct(env.best_task);
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 0fc1d6c2ab0f..6da121dc1f13 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -947,7 +947,8 @@ enum numa_faults_stats {
+ };
+ extern void sched_setnuma(struct task_struct *p, int node);
+ extern int migrate_task_to(struct task_struct *p, int cpu);
+-extern int migrate_swap(struct task_struct *, struct task_struct *);
++extern int migrate_swap(struct task_struct *p, struct task_struct *t,
++ int cpu, int scpu);
+ extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
+ #else
+ static inline void
diff --git a/patches.suse/sched-numa-Move-task_numa_placement-closer-to-numa_migrate_preferred.patch b/patches.suse/sched-numa-Move-task_numa_placement-closer-to-numa_migrate_preferred.patch
new file mode 100644
index 0000000000..720fe0a091
--- /dev/null
+++ b/patches.suse/sched-numa-Move-task_numa_placement-closer-to-numa_migrate_preferred.patch
@@ -0,0 +1,111 @@
+From 2c7c1f2e7263ad68a54bc599e278cf8b406c6b20 Mon Sep 17 00:00:00 2001
+From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Date: Wed, 20 Jun 2018 22:33:00 +0530
+Subject: [PATCH] sched/numa: Move task_numa_placement() closer to
+ numa_migrate_preferred()
+
+References: bnc#1101669 optimise numa balancing for fast migrate
+Patch-mainline: v4.19
+Git-commit: b6a60cf36d497e7fbde9dd5b86fabd96850249f6
+
+numa_migrate_preferred() is called periodically or when task preferred
+node changes. Preferred node evaluations happen once per scan sequence.
+
+If the scan completion happens just after the periodic NUMA migration,
+then we try to migrate to the preferred node and the preferred node might
+change, needing another node migration.
+
+Avoid this by checking for scan sequence completion only when checking
+for periodic migration.
+
+Running SPECjbb2005 on a 4 node machine and comparing bops/JVM
+JVMS LAST_PATCH WITH_PATCH %CHANGE
+16 25862.6 26158.1 1.14258
+1 74357 72725 -2.19482
+
+Running SPECjbb2005 on a 16 node machine and comparing bops/JVM
+JVMS LAST_PATCH WITH_PATCH %CHANGE
+8 117019 113992 -2.58
+1 179095 174947 -2.31
+
+(numbers from v1 based on v4.17-rc5)
+Testcase Time: Min Max Avg StdDev
+numa01.sh Real: 449.46 770.77 615.22 101.70
+numa01.sh Sys: 132.72 208.17 170.46 24.96
+numa01.sh User: 39185.26 60290.89 50066.76 6807.84
+numa02.sh Real: 60.85 61.79 61.28 0.37
+numa02.sh Sys: 15.34 24.71 21.08 3.61
+numa02.sh User: 5204.41 5249.85 5231.21 17.60
+numa03.sh Real: 785.50 916.97 840.77 44.98
+numa03.sh Sys: 108.08 133.60 119.43 8.82
+numa03.sh User: 61422.86 70919.75 64720.87 3310.61
+numa04.sh Real: 429.57 587.37 480.80 57.40
+numa04.sh Sys: 240.61 321.97 290.84 33.58
+numa04.sh User: 34597.65 40498.99 37079.48 2060.72
+numa05.sh Real: 392.09 431.25 414.65 13.82
+numa05.sh Sys: 229.41 372.48 297.54 53.14
+numa05.sh User: 33390.86 34697.49 34222.43 556.42
+
+Testcase Time: Min Max Avg StdDev %Change
+numa01.sh Real: 424.63 566.18 498.12 59.26 23.50%
+numa01.sh Sys: 160.19 256.53 208.98 37.02 -18.4%
+numa01.sh User: 37320.00 46225.58 42001.57 3482.45 19.20%
+numa02.sh Real: 60.17 62.47 60.91 0.85 0.607%
+numa02.sh Sys: 15.30 22.82 17.04 2.90 23.70%
+numa02.sh User: 5202.13 5255.51 5219.08 20.14 0.232%
+numa03.sh Real: 823.91 844.89 833.86 8.46 0.828%
+numa03.sh Sys: 130.69 148.29 140.47 6.21 -14.9%
+numa03.sh User: 62519.15 64262.20 63613.38 620.05 1.740%
+numa04.sh Real: 515.30 603.74 548.56 30.93 -12.3%
+numa04.sh Sys: 459.73 525.48 489.18 21.63 -40.5%
+numa04.sh User: 40561.96 44919.18 42047.87 1526.85 -11.8%
+numa05.sh Real: 396.58 454.37 421.13 19.71 -1.53%
+numa05.sh Sys: 208.72 422.02 348.90 73.60 -14.7%
+numa05.sh User: 33124.08 36109.35 34846.47 1089.74 -1.79%
+
+Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Mel Gorman <mgorman@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1529514181-9842-20-git-send-email-srikar@linux.vnet.ibm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/sched/fair.c | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index f93d6756d4a5..afa617aaafb4 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -2238,9 +2238,6 @@ static void task_numa_placement(struct task_struct *p)
+ /* Set the new preferred node */
+ if (max_nid != p->numa_preferred_nid)
+ sched_setnuma(p, max_nid);
+-
+- if (task_node(p) != p->numa_preferred_nid)
+- numa_migrate_preferred(p);
+ }
+
+ update_task_scan_period(p, fault_types[0], fault_types[1]);
+@@ -2443,14 +2440,14 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
+ numa_is_active_node(mem_node, ng))
+ local = 1;
+
+- task_numa_placement(p);
+-
+ /*
+ * Retry task to preferred node migration periodically, in case it
+ * case it previously failed, or the scheduler moved us.
+ */
+- if (time_after(jiffies, p->numa_migrate_retry))
++ if (time_after(jiffies, p->numa_migrate_retry)) {
++ task_numa_placement(p);
+ numa_migrate_preferred(p);
++ }
+
+ if (migrated)
+ p->numa_pages_migrated += pages;
diff --git a/patches.suse/sched-numa-Remove-redundant-field-kabi.patch b/patches.suse/sched-numa-Remove-redundant-field-kabi.patch
new file mode 100644
index 0000000000..8aa8fba787
--- /dev/null
+++ b/patches.suse/sched-numa-Remove-redundant-field-kabi.patch
@@ -0,0 +1,30 @@
+From b780840d967413c7638b32e8071f65fef3825998 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 18 Jul 2018 09:35:22 +0100
+Subject: [PATCH] sched/numa: Remove redundant field -kabi
+
+References: bnc#1101669 optimise numa balancing for fast migrate
+Patch-mainline: Never, kabi
+
+This is essentially a revert of "sched/numa: Remove redundant field".
+The backport of that patch could have been avoided entirely but this makes
+it obvious that the patch should be dropped later when KABI is not a concern.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ include/linux/sched.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 7f08767403db..ac6ace3ed744 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -922,6 +922,8 @@ struct task_struct {
+ u64 last_sum_exec_runtime;
+ struct callback_head numa_work;
+
++ /* UNUSED field preserved due to KABI */
++ struct list_head numa_entry;
+ struct numa_group *numa_group;
+
+ /*
diff --git a/patches.suse/sched-numa-Remove-redundant-field.patch b/patches.suse/sched-numa-Remove-redundant-field.patch
new file mode 100644
index 0000000000..090ead98a7
--- /dev/null
+++ b/patches.suse/sched-numa-Remove-redundant-field.patch
@@ -0,0 +1,39 @@
+From 4327dffc5f18a5c8b75c61aeaaeb16a7cb6cc024 Mon Sep 17 00:00:00 2001
+From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Date: Wed, 20 Jun 2018 22:32:42 +0530
+Subject: [PATCH] sched/numa: Remove redundant field
+
+References: bnc#1101669 optimise numa balancing for fast migrate
+Patch-mainline: v4.19
+Git-commit: 6e30396767508101eacec8b93b068e8905e660dc
+
+'numa_entry' is a struct list_head defined in task_struct, but never used.
+
+No functional change.
+
+Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Rik van Riel <riel@surriel.com>
+Acked-by: Mel Gorman <mgorman@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1529514181-9842-2-git-send-email-srikar@linux.vnet.ibm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ include/linux/sched.h | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 370f127ef39c..7f08767403db 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -922,7 +922,6 @@ struct task_struct {
+ u64 last_sum_exec_runtime;
+ struct callback_head numa_work;
+
+- struct list_head numa_entry;
+ struct numa_group *numa_group;
+
+ /*
diff --git a/patches.suse/sched-numa-Remove-unused-task_capacity-from-struct-numa_stats.patch b/patches.suse/sched-numa-Remove-unused-task_capacity-from-struct-numa_stats.patch
new file mode 100644
index 0000000000..616d5afa65
--- /dev/null
+++ b/patches.suse/sched-numa-Remove-unused-task_capacity-from-struct-numa_stats.patch
@@ -0,0 +1,66 @@
+From 9d6bf7c6c1adf0337971e955726042133039ed59 Mon Sep 17 00:00:00 2001
+From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Date: Wed, 20 Jun 2018 22:32:49 +0530
+Subject: [PATCH] sched/numa: Remove unused task_capacity from 'struct
+ numa_stats'
+
+References: bnc#1101669 optimise numa balancing for fast migrate
+Patch-mainline: v4.19
+Git-commit: 10864a9e222048a862da2c21efa28929a4dfed15
+
+The task_capacity field in 'struct numa_stats' is redundant.
+Also move nr_running for better packing within the struct.
+
+No functional changes.
+
+Running SPECjbb2005 on a 4 node machine and comparing bops/JVM
+JVMS LAST_PATCH WITH_PATCH %CHANGE
+16 25308.6 25377.3 0.271
+1 72964 72287 -0.92
+
+Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Mel Gorman <mgorman@suse.de>
+Acked-by: Rik van Riel <riel@surriel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1529514181-9842-9-git-send-email-srikar@linux.vnet.ibm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/sched/fair.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index a81c475677e7..c9a932f49f0a 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -1478,14 +1478,12 @@ static unsigned long capacity_of(int cpu);
+
+ /* Cached statistics for all CPUs within a node */
+ struct numa_stats {
+- unsigned long nr_running;
+ unsigned long load;
+
+ /* Total compute capacity of CPUs on a node */
+ unsigned long compute_capacity;
+
+- /* Approximate capacity in terms of runnable tasks on a node */
+- unsigned long task_capacity;
++ unsigned int nr_running;
+ int has_free_capacity;
+ };
+
+@@ -1523,9 +1521,9 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
+ smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
+ capacity = cpus / smt; /* cores */
+
+- ns->task_capacity = min_t(unsigned, capacity,
++ capacity = min_t(unsigned, capacity,
+ DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
+- ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
++ ns->has_free_capacity = (ns->nr_running < capacity);
+ }
+
+ struct task_numa_env {
diff --git a/patches.suse/sched-numa-Set-preferred_node-based-on-best_cpu.patch b/patches.suse/sched-numa-Set-preferred_node-based-on-best_cpu.patch
new file mode 100644
index 0000000000..134501840a
--- /dev/null
+++ b/patches.suse/sched-numa-Set-preferred_node-based-on-best_cpu.patch
@@ -0,0 +1,121 @@
+From 26c63a86b332aa1f2c2c0d6fde7ada6d483d73b3 Mon Sep 17 00:00:00 2001
+From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Date: Wed, 20 Jun 2018 22:32:45 +0530
+Subject: [PATCH] sched/numa: Set preferred_node based on best_cpu
+
+References: bnc#1101669 optimise numa balancing for fast migrate
+Patch-mainline: v4.19
+Git-commit: 8cd45eee43bd46b933158b25aa7c742e0f3e811f
+
+Currently preferred node is set to dst_nid which is the last node in the
+iteration whose group weight or task weight is greater than the current
+node. However it doesn't guarantee that dst_nid has the numa capacity
+to move. It also doesn't guarantee that dst_nid has the best_cpu which
+is the CPU/node ideal for node migration.
+
+Lets consider faults on a 4 node system with group weight numbers
+in different nodes being in 0 < 1 < 2 < 3 proportion. Consider the task
+is running on 3 and 0 is its preferred node but its capacity is full.
+Consider nodes 1, 2 and 3 have capacity. Then the task should be
+migrated to node 1. Currently the task gets moved to node 2. env.dst_nid
+points to the last node whose faults were greater than current node.
+
+Modify to set the preferred node based of best_cpu. Earlier setting
+preferred node was skipped if nr_active_nodes is 1. This could result in
+the task being moved out of the preferred node to a random node during
+regular load balancing.
+
+Also while modifying task_numa_migrate(), use sched_setnuma to set
+preferred node. This ensures out numa accounting is correct.
+
+Running SPECjbb2005 on a 4 node machine and comparing bops/JVM
+JVMS LAST_PATCH WITH_PATCH %CHANGE
+16 25122.9 25549.6 1.698
+1 73850 73190 -0.89
+
+Running SPECjbb2005 on a 16 node machine and comparing bops/JVM
+JVMS LAST_PATCH WITH_PATCH %CHANGE
+8 105930 113437 7.08676
+1 178624 196130 9.80047
+
+(numbers from v1 based on v4.17-rc5)
+Testcase Time: Min Max Avg StdDev
+numa01.sh Real: 435.78 653.81 534.58 83.20
+numa01.sh Sys: 121.93 187.18 145.90 23.47
+numa01.sh User: 37082.81 51402.80 43647.60 5409.75
+numa02.sh Real: 60.64 61.63 61.19 0.40
+numa02.sh Sys: 14.72 25.68 19.06 4.03
+numa02.sh User: 5210.95 5266.69 5233.30 20.82
+numa03.sh Real: 746.51 808.24 780.36 23.88
+numa03.sh Sys: 97.26 108.48 105.07 4.28
+numa03.sh User: 58956.30 61397.05 60162.95 1050.82
+numa04.sh Real: 465.97 519.27 484.81 19.62
+numa04.sh Sys: 304.43 359.08 334.68 20.64
+numa04.sh User: 37544.16 41186.15 39262.44 1314.91
+numa05.sh Real: 411.57 457.20 433.29 16.58
+numa05.sh Sys: 230.05 435.48 339.95 67.58
+numa05.sh User: 33325.54 36896.31 35637.84 1222.64
+
+Testcase Time: Min Max Avg StdDev %Change
+numa01.sh Real: 506.35 794.46 599.06 104.26 -10.76%
+numa01.sh Sys: 150.37 223.56 195.99 24.94 -25.55%
+numa01.sh User: 43450.69 61752.04 49281.50 6635.33 -11.43%
+numa02.sh Real: 60.33 62.40 61.31 0.90 -0.195%
+numa02.sh Sys: 18.12 31.66 24.28 5.89 -21.49%
+numa02.sh User: 5203.91 5325.32 5260.29 49.98 -0.513%
+numa03.sh Real: 696.47 853.62 745.80 57.28 4.6339%
+numa03.sh Sys: 85.68 123.71 97.89 13.48 7.3347%
+numa03.sh User: 55978.45 66418.63 59254.94 3737.97 1.5323%
+numa04.sh Real: 444.05 514.83 497.06 26.85 -2.464%
+numa04.sh Sys: 230.39 375.79 316.23 48.58 5.8343%
+numa04.sh User: 35403.12 41004.10 39720.80 2163.08 -1.153%
+numa05.sh Real: 423.09 460.41 439.57 13.92 -1.428%
+numa05.sh Sys: 287.38 480.15 369.37 68.52 -7.964%
+numa05.sh User: 34732.12 38016.80 36255.85 1070.51 -1.704%
+
+Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Mel Gorman <mgorman@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1529514181-9842-5-git-send-email-srikar@linux.vnet.ibm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/sched/fair.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index f226616bb1c2..2da454425767 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -1793,7 +1793,7 @@ static int task_numa_migrate(struct task_struct *p)
+ * elsewhere, so there is no point in (re)trying.
+ */
+ if (unlikely(!sd)) {
+- p->numa_preferred_nid = task_node(p);
++ sched_setnuma(p, task_node(p));
+ return -EINVAL;
+ }
+
+@@ -1852,15 +1852,13 @@ static int task_numa_migrate(struct task_struct *p)
+ * trying for a better one later. Do not set the preferred node here.
+ */
+ if (p->numa_group) {
+- struct numa_group *ng = p->numa_group;
+-
+ if (env.best_cpu == -1)
+ nid = env.src_nid;
+ else
+- nid = env.dst_nid;
++ nid = cpu_to_node(env.best_cpu);
+
+- if (ng->active_nodes > 1 && numa_is_active_node(env.dst_nid, ng))
+- sched_setnuma(p, env.dst_nid);
++ if (nid != p->numa_preferred_nid)
++ sched_setnuma(p, nid);
+ }
+
+ /* No better CPU than the current one was found. */
diff --git a/patches.suse/sched-numa-Simplify-load_too_imbalanced.patch b/patches.suse/sched-numa-Simplify-load_too_imbalanced.patch
new file mode 100644
index 0000000000..b886b93276
--- /dev/null
+++ b/patches.suse/sched-numa-Simplify-load_too_imbalanced.patch
@@ -0,0 +1,110 @@
+From 302a1424fb79edea1d7e1bd09d31402a3c6da480 Mon Sep 17 00:00:00 2001
+From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Date: Wed, 20 Jun 2018 22:32:44 +0530
+Subject: [PATCH] sched/numa: Simplify load_too_imbalanced()
+
+References: bnc#1101669 optimise numa balancing for fast migrate
+Patch-mainline: v4.19
+Git-commit: 5f95ba7a43057f28a349ea1f03ee8d04e0f445ea
+
+Currently load_too_imbalance() cares about the slope of imbalance.
+It doesn't care of the direction of the imbalance.
+
+However this may not work if nodes that are being compared have
+dissimilar capacities. Few nodes might have more cores than other nodes
+in the system. Also unlike traditional load balance at a NUMA sched
+domain, multiple requests to migrate from the same source node to same
+destination node may run in parallel. This can cause huge load
+imbalance. This is specially true on a larger machines with either large
+cores per node or more number of nodes in the system. Hence allow
+move/swap only if the imbalance is going to reduce.
+
+Running SPECjbb2005 on a 4 node machine and comparing bops/JVM
+JVMS LAST_PATCH WITH_PATCH %CHANGE
+16 25058.2 25122.9 0.25
+1 72950 73850 1.23
+
+(numbers from v1 based on v4.17-rc5)
+Testcase Time: Min Max Avg StdDev
+numa01.sh Real: 516.14 892.41 739.84 151.32
+numa01.sh Sys: 153.16 192.99 177.70 14.58
+numa01.sh User: 39821.04 69528.92 57193.87 10989.48
+numa02.sh Real: 60.91 62.35 61.58 0.63
+numa02.sh Sys: 16.47 26.16 21.20 3.85
+numa02.sh User: 5227.58 5309.61 5265.17 31.04
+numa03.sh Real: 739.07 917.73 795.75 64.45
+numa03.sh Sys: 94.46 136.08 109.48 14.58
+numa03.sh User: 57478.56 72014.09 61764.48 5343.69
+numa04.sh Real: 442.61 715.43 530.31 96.12
+numa04.sh Sys: 224.90 348.63 285.61 48.83
+numa04.sh User: 35836.84 47522.47 40235.41 3985.26
+numa05.sh Real: 386.13 489.17 434.94 43.59
+numa05.sh Sys: 144.29 438.56 278.80 105.78
+numa05.sh User: 33255.86 36890.82 34879.31 1641.98
+
+Testcase Time: Min Max Avg StdDev %Change
+numa01.sh Real: 435.78 653.81 534.58 83.20 38.39%
+numa01.sh Sys: 121.93 187.18 145.90 23.47 21.79%
+numa01.sh User: 37082.81 51402.80 43647.60 5409.75 31.03%
+numa02.sh Real: 60.64 61.63 61.19 0.40 0.637%
+numa02.sh Sys: 14.72 25.68 19.06 4.03 11.22%
+numa02.sh User: 5210.95 5266.69 5233.30 20.82 0.608%
+numa03.sh Real: 746.51 808.24 780.36 23.88 1.972%
+numa03.sh Sys: 97.26 108.48 105.07 4.28 4.197%
+numa03.sh User: 58956.30 61397.05 60162.95 1050.82 2.661%
+numa04.sh Real: 465.97 519.27 484.81 19.62 9.385%
+numa04.sh Sys: 304.43 359.08 334.68 20.64 -14.6%
+numa04.sh User: 37544.16 41186.15 39262.44 1314.91 2.478%
+numa05.sh Real: 411.57 457.20 433.29 16.58 0.380%
+numa05.sh Sys: 230.05 435.48 339.95 67.58 -17.9%
+numa05.sh User: 33325.54 36896.31 35637.84 1222.64 -2.12%
+
+Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Rik van Riel <riel@surriel.com>
+Acked-by: Mel Gorman <mgorman@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1529514181-9842-4-git-send-email-srikar@linux.vnet.ibm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/sched/fair.c | 20 ++------------------
+ 1 file changed, 2 insertions(+), 18 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 055bb39aa01a..f226616bb1c2 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -1574,28 +1574,12 @@ static bool load_too_imbalanced(long src_load, long dst_load,
+ src_capacity = env->src_stats.compute_capacity;
+ dst_capacity = env->dst_stats.compute_capacity;
+
+- /* We care about the slope of the imbalance, not the direction. */
+- if (dst_load < src_load)
+- swap(dst_load, src_load);
+-
+- /* Is the difference below the threshold? */
+- imb = dst_load * src_capacity * 100 -
+- src_load * dst_capacity * env->imbalance_pct;
+- if (imb <= 0)
+- return false;
++ imb = abs(dst_load * src_capacity - src_load * dst_capacity);
+
+- /*
+- * The imbalance is above the allowed threshold.
+- * Compare it with the old imbalance.
+- */
+ orig_src_load = env->src_stats.load;
+ orig_dst_load = env->dst_stats.load;
+
+- if (orig_dst_load < orig_src_load)
+- swap(orig_dst_load, orig_src_load);
+-
+- old_imb = orig_dst_load * src_capacity * 100 -
+- orig_src_load * dst_capacity * env->imbalance_pct;
++ old_imb = abs(orig_dst_load * src_capacity - orig_src_load * dst_capacity);
+
+ /* Would this change make things worse? */
+ return (imb > old_imb);
diff --git a/patches.suse/sched-numa-Skip-nodes-that-are-at-hoplimit.patch b/patches.suse/sched-numa-Skip-nodes-that-are-at-hoplimit.patch
new file mode 100644
index 0000000000..800b9f2a52
--- /dev/null
+++ b/patches.suse/sched-numa-Skip-nodes-that-are-at-hoplimit.patch
@@ -0,0 +1,90 @@
+From 5540dd30c85c2b4fcace6ae1feb6845fd697fbda Mon Sep 17 00:00:00 2001
+From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Date: Wed, 20 Jun 2018 22:32:48 +0530
+Subject: [PATCH] sched/numa: Skip nodes that are at 'hoplimit'
+
+References: bnc#1101669 optimise numa balancing for fast migrate
+Patch-mainline: v4.19
+Git-commit: 0ee7e74dc0dc64d9900751d03c5c22dfdd173fb8
+
+When comparing two nodes at a distance of 'hoplimit', we should consider
+nodes only up to 'hoplimit'. Currently we also consider nodes at 'oplimit'
+distance too. Hence two nodes at a distance of 'hoplimit' will have same
+groupweight. Fix this by skipping nodes at hoplimit.
+
+Running SPECjbb2005 on a 4 node machine and comparing bops/JVM
+JVMS LAST_PATCH WITH_PATCH %CHANGE
+16 25375.3 25308.6 -0.26
+1 72617 72964 0.477
+
+Running SPECjbb2005 on a 16 node machine and comparing bops/JVM
+JVMS LAST_PATCH WITH_PATCH %CHANGE
+8 113372 108750 -4.07684
+1 177403 183115 3.21979
+
+(numbers from v1 based on v4.17-rc5)
+Testcase Time: Min Max Avg StdDev
+numa01.sh Real: 478.45 565.90 515.11 30.87
+numa01.sh Sys: 207.79 271.04 232.94 21.33
+numa01.sh User: 39763.93 47303.12 43210.73 2644.86
+numa02.sh Real: 60.00 61.46 60.78 0.49
+numa02.sh Sys: 15.71 25.31 20.69 3.42
+numa02.sh User: 5175.92 5265.86 5235.97 32.82
+numa03.sh Real: 776.42 834.85 806.01 23.22
+numa03.sh Sys: 114.43 128.75 121.65 5.49
+numa03.sh User: 60773.93 64855.25 62616.91 1576.39
+numa04.sh Real: 456.93 511.95 482.91 20.88
+numa04.sh Sys: 178.09 460.89 356.86 94.58
+numa04.sh User: 36312.09 42553.24 39623.21 2247.96
+numa05.sh Real: 393.98 493.48 436.61 35.59
+numa05.sh Sys: 164.49 329.15 265.87 61.78
+numa05.sh User: 33182.65 36654.53 35074.51 1187.71
+
+Testcase Time: Min Max Avg StdDev %Change
+numa01.sh Real: 414.64 819.20 556.08 147.70 -7.36%
+numa01.sh Sys: 77.52 205.04 139.40 52.05 67.10%
+numa01.sh User: 37043.24 61757.88 45517.48 9290.38 -5.06%
+numa02.sh Real: 60.80 63.32 61.63 0.88 -1.37%
+numa02.sh Sys: 17.35 39.37 25.71 7.33 -19.5%
+numa02.sh User: 5213.79 5374.73 5268.90 55.09 -0.62%
+numa03.sh Real: 780.09 948.64 831.43 63.02 -3.05%
+numa03.sh Sys: 104.96 136.92 116.31 11.34 4.591%
+numa03.sh User: 60465.42 73339.78 64368.03 4700.14 -2.72%
+numa04.sh Real: 412.60 681.92 521.29 96.64 -7.36%
+numa04.sh Sys: 210.32 314.10 251.77 37.71 41.74%
+numa04.sh User: 34026.38 45581.20 38534.49 4198.53 2.825%
+numa05.sh Real: 394.79 439.63 411.35 16.87 6.140%
+numa05.sh Sys: 238.32 330.09 292.31 38.32 -9.04%
+numa05.sh User: 33456.45 34876.07 34138.62 609.45 2.741%
+
+While there is a regression with this change, this change is needed from a
+correctness perspective. Also it helps consolidation as seen from perf bench
+output.
+
+Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Rik van Riel <riel@surriel.com>
+Acked-by: Mel Gorman <mgorman@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1529514181-9842-8-git-send-email-srikar@linux.vnet.ibm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/sched/fair.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index f504c18e5c92..a81c475677e7 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -1340,7 +1340,7 @@ static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
+ * of each group. Skip other nodes.
+ */
+ if (sched_numa_topology_type == NUMA_BACKPLANE &&
+- dist > maxdist)
++ dist >= maxdist)
+ continue;
+
+ /* Add up the faults from nearby nodes. */
diff --git a/patches.suse/sched-numa-Update-the-scan-period-without-holding-the-numa_group-lock.patch b/patches.suse/sched-numa-Update-the-scan-period-without-holding-the-numa_group-lock.patch
new file mode 100644
index 0000000000..c0597e8622
--- /dev/null
+++ b/patches.suse/sched-numa-Update-the-scan-period-without-holding-the-numa_group-lock.patch
@@ -0,0 +1,55 @@
+From e4b70fe94501d795895bdb9291b96afe43e4e75f Mon Sep 17 00:00:00 2001
+From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Date: Wed, 20 Jun 2018 22:32:55 +0530
+Subject: [PATCH] sched/numa: Update the scan period without holding the
+ numa_group lock
+
+References: bnc#1101669 optimise numa balancing for fast migrate
+Patch-mainline: v4.19
+Git-commit: 30619c89b17d46808b4cdf5b3f81b6a01ade1473
+
+The metrics for updating scan periods are local or task specific.
+Currently this update happens under the numa_group lock, which seems
+unnecessary. Hence move this update outside the lock.
+
+Running SPECjbb2005 on a 4 node machine and comparing bops/JVM
+JVMS LAST_PATCH WITH_PATCH %CHANGE
+16 25355.9 25645.4 1.141
+1 72812 72142 -0.92
+
+Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Rik van Riel <riel@surriel.com>
+Acked-by: Mel Gorman <mgorman@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1529514181-9842-15-git-send-email-srikar@linux.vnet.ibm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/sched/fair.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 0b5b2a011a9c..bb545ec07de7 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -2228,8 +2228,6 @@ static void task_numa_placement(struct task_struct *p)
+ }
+ }
+
+- update_task_scan_period(p, fault_types[0], fault_types[1]);
+-
+ if (p->numa_group) {
+ numa_group_count_active_nodes(p->numa_group);
+ spin_unlock_irq(group_lock);
+@@ -2244,6 +2242,8 @@ static void task_numa_placement(struct task_struct *p)
+ if (task_node(p) != p->numa_preferred_nid)
+ numa_migrate_preferred(p);
+ }
++
++ update_task_scan_period(p, fault_types[0], fault_types[1]);
+ }
+
+ static inline int get_numa_group(struct numa_group *grp)
diff --git a/patches.suse/sched-numa-Use-group_weights-to-identify-if-migration-degrades-locality.patch b/patches.suse/sched-numa-Use-group_weights-to-identify-if-migration-degrades-locality.patch
new file mode 100644
index 0000000000..ea662343e2
--- /dev/null
+++ b/patches.suse/sched-numa-Use-group_weights-to-identify-if-migration-degrades-locality.patch
@@ -0,0 +1,126 @@
+From d6fc1a4843b5ce0e71484c6bc3a99002c5f36702 Mon Sep 17 00:00:00 2001
+From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Date: Wed, 20 Jun 2018 22:32:56 +0530
+Subject: [PATCH] sched/numa: Use group_weights to identify if migration
+ degrades locality
+
+References: bnc#1101669 optimise numa balancing for fast migrate
+Patch-mainline: v4.19
+Git-commit: f35678b6a17063f3b0d391af5ab8f8c83cf31b0c
+
+On NUMA_BACKPLANE and NUMA_GLUELESS_MESH systems, tasks/memory should be
+consolidated to the closest group of nodes. In such a case, relying on
+group_fault metric may not always help to consolidate. There can always
+be a case where a node closer to the preferred node may have lesser
+faults than a node further away from the preferred node. In such a case,
+moving to node with more faults might avoid numa consolidation.
+
+Using group_weight would help to consolidate task/memory around the
+preferred_node.
+
+While here, to be on the conservative side, don't override migrate thread
+degrades locality logic for CPU_NEWLY_IDLE load balancing.
+
+Note: Similar problems exist with should_numa_migrate_memory and will be
+dealt separately.
+
+Running SPECjbb2005 on a 4 node machine and comparing bops/JVM
+JVMS LAST_PATCH WITH_PATCH %CHANGE
+16 25645.4 25960 1.22
+1 72142 73550 1.95
+
+Running SPECjbb2005 on a 16 node machine and comparing bops/JVM
+JVMS LAST_PATCH WITH_PATCH %CHANGE
+8 110199 120071 8.958
+1 176303 176249 -0.03
+
+(numbers from v1 based on v4.17-rc5)
+Testcase Time: Min Max Avg StdDev
+numa01.sh Real: 490.04 774.86 596.26 96.46
+numa01.sh Sys: 151.52 242.88 184.82 31.71
+numa01.sh User: 41418.41 60844.59 48776.09 6564.27
+numa02.sh Real: 60.14 62.94 60.98 1.00
+numa02.sh Sys: 16.11 30.77 21.20 5.28
+numa02.sh User: 5184.33 5311.09 5228.50 44.24
+numa03.sh Real: 790.95 856.35 826.41 24.11
+numa03.sh Sys: 114.93 118.85 117.05 1.63
+numa03.sh User: 60990.99 64959.28 63470.43 1415.44
+numa04.sh Real: 434.37 597.92 504.87 59.70
+numa04.sh Sys: 237.63 397.40 289.74 55.98
+numa04.sh User: 34854.87 41121.83 38572.52 2615.84
+numa05.sh Real: 386.77 448.90 417.22 22.79
+numa05.sh Sys: 149.23 379.95 303.04 79.55
+numa05.sh User: 32951.76 35959.58 34562.18 1034.05
+
+Testcase Time: Min Max Avg StdDev %Change
+numa01.sh Real: 493.19 672.88 597.51 59.38 -0.20%
+numa01.sh Sys: 150.09 245.48 207.76 34.26 -11.0%
+numa01.sh User: 41928.51 53779.17 48747.06 3901.39 0.059%
+numa02.sh Real: 60.63 62.87 61.22 0.83 -0.39%
+numa02.sh Sys: 16.64 27.97 20.25 4.06 4.691%
+numa02.sh User: 5222.92 5309.60 5254.03 29.98 -0.48%
+numa03.sh Real: 821.52 902.15 863.60 32.41 -4.30%
+numa03.sh Sys: 112.04 130.66 118.35 7.08 -1.09%
+numa03.sh User: 62245.16 69165.14 66443.04 2450.32 -4.47%
+numa04.sh Real: 414.53 519.57 476.25 37.00 6.009%
+numa04.sh Sys: 181.84 335.67 280.41 54.07 3.327%
+numa04.sh User: 33924.50 39115.39 37343.78 1934.26 3.290%
+numa05.sh Real: 408.30 441.45 417.90 12.05 -0.16%
+numa05.sh Sys: 233.41 381.60 295.58 57.37 2.523%
+numa05.sh User: 33301.31 35972.50 34335.19 938.94 0.661%
+
+Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1529514181-9842-16-git-send-email-srikar@linux.vnet.ibm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/sched/fair.c | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index bb545ec07de7..f93d6756d4a5 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -7067,8 +7067,8 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
+ static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
+ {
+ struct numa_group *numa_group = rcu_dereference(p->numa_group);
+- unsigned long src_faults, dst_faults;
+- int src_nid, dst_nid;
++ unsigned long src_weight, dst_weight;
++ int src_nid, dst_nid, dist;
+
+ if (!static_branch_likely(&sched_numa_balancing))
+ return -1;
+@@ -7095,18 +7095,19 @@ static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
+ return 0;
+
+ /* Leaving a core idle is often worse than degrading locality. */
+- if (env->idle != CPU_NOT_IDLE)
++ if (env->idle == CPU_IDLE)
+ return -1;
+
++ dist = node_distance(src_nid, dst_nid);
+ if (numa_group) {
+- src_faults = group_faults(p, src_nid);
+- dst_faults = group_faults(p, dst_nid);
++ src_weight = group_weight(p, src_nid, dist);
++ dst_weight = group_weight(p, dst_nid, dist);
+ } else {
+- src_faults = task_faults(p, src_nid);
+- dst_faults = task_faults(p, dst_nid);
++ src_weight = task_weight(p, src_nid, dist);
++ dst_weight = task_weight(p, dst_nid, dist);
+ }
+
+- return dst_faults < src_faults;
++ return dst_weight < src_weight;
+ }
+
+ #else
diff --git a/patches.suse/sched-numa-Use-task-faults-only-if-numa_group-is-not-yet-set-up.patch b/patches.suse/sched-numa-Use-task-faults-only-if-numa_group-is-not-yet-set-up.patch
new file mode 100644
index 0000000000..a1e201480d
--- /dev/null
+++ b/patches.suse/sched-numa-Use-task-faults-only-if-numa_group-is-not-yet-set-up.patch
@@ -0,0 +1,124 @@
+From 7bdb19f67f9d5ff45174cdd09756884a4949060c Mon Sep 17 00:00:00 2001
+From: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Date: Wed, 20 Jun 2018 22:32:46 +0530
+Subject: [PATCH] sched/numa: Use task faults only if numa_group is not yet set
+ up
+
+References: bnc#1101669 optimise numa balancing for fast migrate
+Patch-mainline: v4.19
+Git-commit: f03bb6760b8e5e2bcecc88d2a2ef41c09adcab39
+
+When numa_group faults are available, task_numa_placement only uses
+numa_group faults to evaluate preferred node. However it still accounts
+task faults and even evaluates the preferred node just based on task
+faults just to discard it in favour of preferred node chosen on the
+basis of numa_group.
+
+Instead use task faults only if numa_group is not set.
+
+Running SPECjbb2005 on a 4 node machine and comparing bops/JVM
+JVMS LAST_PATCH WITH_PATCH %CHANGE
+16 25549.6 25215.7 -1.30
+1 73190 72107 -1.47
+
+Running SPECjbb2005 on a 16 node machine and comparing bops/JVM
+JVMS LAST_PATCH WITH_PATCH %CHANGE
+8 113437 113372 -0.05
+1 196130 177403 -9.54
+
+(numbers from v1 based on v4.17-rc5)
+Testcase Time: Min Max Avg StdDev
+numa01.sh Real: 506.35 794.46 599.06 104.26
+numa01.sh Sys: 150.37 223.56 195.99 24.94
+numa01.sh User: 43450.69 61752.04 49281.50 6635.33
+numa02.sh Real: 60.33 62.40 61.31 0.90
+numa02.sh Sys: 18.12 31.66 24.28 5.89
+numa02.sh User: 5203.91 5325.32 5260.29 49.98
+numa03.sh Real: 696.47 853.62 745.80 57.28
+numa03.sh Sys: 85.68 123.71 97.89 13.48
+numa03.sh User: 55978.45 66418.63 59254.94 3737.97
+numa04.sh Real: 444.05 514.83 497.06 26.85
+numa04.sh Sys: 230.39 375.79 316.23 48.58
+numa04.sh User: 35403.12 41004.10 39720.80 2163.08
+numa05.sh Real: 423.09 460.41 439.57 13.92
+numa05.sh Sys: 287.38 480.15 369.37 68.52
+numa05.sh User: 34732.12 38016.80 36255.85 1070.51
+
+Testcase Time: Min Max Avg StdDev %Change
+numa01.sh Real: 478.45 565.90 515.11 30.87 16.29%
+numa01.sh Sys: 207.79 271.04 232.94 21.33 -15.8%
+numa01.sh User: 39763.93 47303.12 43210.73 2644.86 14.04%
+numa02.sh Real: 60.00 61.46 60.78 0.49 0.871%
+numa02.sh Sys: 15.71 25.31 20.69 3.42 17.35%
+numa02.sh User: 5175.92 5265.86 5235.97 32.82 0.464%
+numa03.sh Real: 776.42 834.85 806.01 23.22 -7.47%
+numa03.sh Sys: 114.43 128.75 121.65 5.49 -19.5%
+numa03.sh User: 60773.93 64855.25 62616.91 1576.39 -5.36%
+numa04.sh Real: 456.93 511.95 482.91 20.88 2.930%
+numa04.sh Sys: 178.09 460.89 356.86 94.58 -11.3%
+numa04.sh User: 36312.09 42553.24 39623.21 2247.96 0.246%
+numa05.sh Real: 393.98 493.48 436.61 35.59 0.677%
+numa05.sh Sys: 164.49 329.15 265.87 61.78 38.92%
+numa05.sh User: 33182.65 36654.53 35074.51 1187.71 3.368%
+
+Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Mel Gorman <mgorman@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1529514181-9842-6-git-send-email-srikar@linux.vnet.ibm.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+---
+ kernel/sched/fair.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 2da454425767..f504c18e5c92 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -2138,8 +2138,8 @@ static int preferred_group_nid(struct task_struct *p, int nid)
+
+ static void task_numa_placement(struct task_struct *p)
+ {
+- int seq, nid, max_nid = -1, max_group_nid = -1;
+- unsigned long max_faults = 0, max_group_faults = 0;
++ int seq, nid, max_nid = -1;
++ unsigned long max_faults = 0;
+ unsigned long fault_types[2] = { 0, 0 };
+ unsigned long total_faults;
+ u64 runtime, period;
+@@ -2218,15 +2218,15 @@ static void task_numa_placement(struct task_struct *p)
+ }
+ }
+
+- if (faults > max_faults) {
+- max_faults = faults;
++ if (!p->numa_group) {
++ if (faults > max_faults) {
++ max_faults = faults;
++ max_nid = nid;
++ }
++ } else if (group_faults > max_faults) {
++ max_faults = group_faults;
+ max_nid = nid;
+ }
+-
+- if (group_faults > max_group_faults) {
+- max_group_faults = group_faults;
+- max_group_nid = nid;
+- }
+ }
+
+ update_task_scan_period(p, fault_types[0], fault_types[1]);
+@@ -2234,7 +2234,7 @@ static void task_numa_placement(struct task_struct *p)
+ if (p->numa_group) {
+ numa_group_count_active_nodes(p->numa_group);
+ spin_unlock_irq(group_lock);
+- max_nid = preferred_group_nid(p, max_group_nid);
++ max_nid = preferred_group_nid(p, max_nid);
+ }
+
+ if (max_faults) {
diff --git a/series.conf b/series.conf
index b5fb21ccff..1154e64841 100644
--- a/series.conf
+++ b/series.conf
@@ -1232,6 +1232,7 @@
patches.arch/cpu-hotplug-Provide-cpus_read-write_-un-lock.patch
patches.arch/cpu-hotplug-Provide-lockdep_assert_cpus_held.patch
patches.arch/stop_machine-Provide-stop_machine_cpuslocked.patch
+ patches.arch/00-jump_label-reorder-hotplug-lock-and-jump_label_lock.patch
patches.fixes/cpu-hotplug-Remove-unused-check_for_tasks-function.patch
patches.arch/powerpc-Only-obtain-cpu_hotplug_lock-if-called-by-rt.patch
patches.fixes/xen-mce-dont-issue-error-message.patch
@@ -5109,6 +5110,8 @@
patches.suse/0001-locking-osq_lock-Fix-osq_lock-queue-corruption.patch
patches.suse/0001-locking-rwsem-spinlock-Add-killable-versions-of-__do.patch
patches.suse/0001-locking-rwsem-xadd-Add-killable-versions-of-rwsem_do.patch
+ patches.arch/01-jump_label-fix-concurrent-static_key_enable-disable.patch
+ patches.arch/02-jump_label-provide-hotplug-context-variants.patch
patches.fixes/futex-Remove-duplicated-code-and-fix-undefined-behav.patch
patches.suse/0001-smp-Avoid-using-two-cache-lines-for-struct-call_sing.patch
patches.suse/0001-locking-pvqspinlock-Relax-cmpxchg-s-to-improve-perfo.patch
@@ -7671,6 +7674,7 @@
patches.drivers/edac-skx_edac-fix-detection-of-single-rank-dimms.patch
patches.drivers/edac-skx_edac-handle-systems-with-segmented-pci-busses.patch
patches.drivers/edac-sb_edac-fix-missing-dimm-sysfs-entries-with-knl-snc2-snc4-mode.patch
+ patches.drivers/EDAC-sb_edac-Fix-missing-break-in-switch
patches.drivers/hwmon-pmbus-core-Prevent-unintentional-setting-of-pa.patch
patches.drivers/regulator-qcom_spmi-Include-offset-when-translating-
patches.drivers/regulator-tps65218-Fix-strobe-assignment
@@ -9035,6 +9039,7 @@
patches.drivers/iw_cxgb4-Fix-possible-circular-dependency-locking-wa.patch
patches.drivers/iw_cxgb4-only-call-the-cq-comp_handler-when-the-cq-i.patch
patches.drivers/iw_cxgb4-atomically-flush-the-qp.patch
+ patches.drivers/ACPI-PCI-Bail-early-in-acpi_pci_add_bus-if-there-is-
patches.drivers/PCI-AER-Report-non-fatal-errors-only-to-the-affected
patches.drivers/PCI-ASPM-Deal-with-missing-root-ports-in-link-state-
patches.drivers/PCI-ASPM-Account-for-downstream-device-s-Port-Common
@@ -11074,6 +11079,8 @@
patches.drivers/sdhci-Advertise-2.0v-supply-on-SDIO-host-controller
patches.drivers/spi-imx-do-not-access-registers-while-clocks-disable
patches.drivers/spi-sun6i-disable-unprepare-clocks-on-remove
+ patches.drivers/EDAC-octeon-Fix-an-uninitialized-variable-warning
+ patches.drivers/EDAC-mv64x60-Fix-an-error-handling-path
patches.suse/0001-pktcdvd-Fix-pkt_setup_dev-error-path.patch
patches.suse/0001-pktcdvd-Fix-a-recently-introduced-NULL-pointer-deref.patch
patches.suse/0095-dm-limit-the-max-bio-size-as-BIO_MAX_PAGES-PAGE_SIZE.patch
@@ -11970,6 +11977,7 @@
patches.suse/mm-pin-address_space-before-dereferencing-it-while-isolating-an-LRU-page.patch
patches.fixes/mm-fadvise-discard-partial-page-if-endbyte-is-also-E.patch
patches.suse/mm-numa-do-not-trap-faults-on-shared-data-section-pages.patch
+ patches.fixes/0001-typec-tcpm-fusb302-Resolve-out-of-order-messaging-ev.patch
patches.fixes/tools-usbip-fixes-build-with-musl-libc-toolchain.patch
patches.fixes/0001-usb-Don-t-print-a-warning-if-interface-driver-rebind.patch
patches.drivers/0001-usb-xhci-Make-some-static-functions-global.patch
@@ -12815,6 +12823,7 @@
patches.arch/s390-add-options-to-change-branch-prediction-behavio.patch
patches.arch/s390-sles15-05-05-bpoff-user-space.patch
patches.arch/s390-sles15-05-06-expoline.patch
+ patches.fixes/x86-xen-init-gs-very-early.patch
patches.fixes/cifs-silence-compiler-warnings-showing-up-with-gcc-8.patch
patches.drivers/cxgbit-call-neigh_event_send-to-update-MAC-address.patch
patches.fixes/iscsi-target-make-sure-to-wake-up-sleeping-login-wor.patch
@@ -13650,6 +13659,7 @@
patches.drivers/iwlwifi-mvm-make-sure-internal-station-has-a-valid-i
patches.drivers/iwlwifi-mvm-fix-array-out-of-bounds-reference
patches.drivers/brcmfmac-Fix-check-for-ISO3166-code
+ patches.fixes/0001-net-qmi_wwan-add-BroadMobi-BM806U-2020-2033.patch
patches.suse/bonding-fix-the-err-path-for-dev-hwaddr-sync-in-bond.patch
patches.suse/bonding-move-dev_mc_sync-after-master_upper_dev_link.patch
patches.suse/bonding-process-the-err-returned-by-dev_set_allmulti.patch
@@ -13658,6 +13668,7 @@
patches.suse/vhost_net-add-missing-lock-nesting-notation.patch
patches.fixes/batman-adv-fix-multicast-via-unicast-transmission-wi.patch
patches.fixes/batman-adv-fix-packet-loss-for-broadcasted-DHCP-pack.patch
+ patches.fixes/0001-net-usb-add-qmi_wwan-if-on-lte-modem-wistron-neweb-d.patch
patches.suse/net-fix-possible-out-of-bound-read-in-skb_network_pr.patch
patches.drivers/r8169-fix-setting-driver_data-after-register_netdev
patches.suse/strparser-Fix-sign-of-err-codes.patch
@@ -15142,6 +15153,7 @@
patches.drivers/drm-psr-Fix-missed-entry-in-PSR-setup-time-table
patches.suse/ipv6-sr-fix-memory-OOB-access-in-seg6_do_srh_encap-i.patch
patches.suse/mlxsw-spectrum-Forbid-creation-of-VLAN-1-over-port-L.patch
+ patches.fixes/0001-net-qmi_wwan-Add-Netgear-Aircard-779S.patch
patches.drivers/be2net-Fix-error-detection-logic-for-BE3.patch
patches.drivers/Revert-rt2800-use-TXOP_BACKOFF-for-probe-frames
patches.drivers/iwlwifi-pcie-compare-with-number-of-IRQs-requested-f
@@ -15184,6 +15196,7 @@
patches.fixes/0001-dlm-fix-a-clerical-error-when-set-SCTP_NODELAY.patch
patches.fixes/0002-dlm-make-sctp_connect_to_sock-return-in-specified-ti.patch
patches.fixes/0003-dlm-remove-O_NONBLOCK-flag-in-sctp_connect_to_sock.patch
+ patches.suse/sched-numa-Stagger-NUMA-balancing-scan-periods-for-new-threads.patch
patches.fixes/tools-power-turbostat-Correct-SNB_C1-C3_AUTO_UNDEMOT.patch
patches.drivers/PM-wakeup-Only-update-last-time-for-active-wakeup-so
patches.fixes/cpufreq-Fix-new-policy-initialization-during-limits-.patch
@@ -15311,6 +15324,7 @@
patches.drivers/ASoC-cs35l35-Add-use_single_rw-to-regmap-config
patches.fixes/kconfig-Avoid-format-overflow-warning-from-GCC-8.1
patches.fixes/ARM-8764-1-kgdb-fix-NUMREGBYTES-so-that-gdb_regs-is-.patch
+ patches.drivers/EDAC-altera-Fix-ARM64-build-warning
patches.fixes/printk-fix-possible-reuse-of-va_list-variable.patch
patches.drivers/clk-renesas-cpg-mssr-Stop-using-printk-format-pCr
patches.drivers/thermal-bcm2835-Stop-using-printk-format-pCr
@@ -15595,6 +15609,7 @@
patches.drivers/media-si470x-fix-__be16-annotations
patches.drivers/media-dvb_frontend-fix-locking-issues-at-dvb_fronten
patches.drivers/media-v4l2-compat-ioctl32-prevent-go-past-max-size
+ patches.drivers/media-omap3isp-fix-unbalanced-dma_iommu_mapping
patches.drivers/media-cx231xx-Add-support-for-AverMedia-DVD-EZMaker-
patches.drivers/media-siano-get-rid-of-__le32-__le16-cast-warnings
patches.drivers/media-cx88-Get-rid-of-spurious-call-to-cx8800_start_
@@ -15602,6 +15617,7 @@
patches.drivers/media-media-device-fix-ioctl-function-types
patches.drivers/media-rcar_jpu-Add-missing-clk_disable_unprepare-on-
patches.suse/0001-media-smiapp-fix-timeout-checking-in-smiapp_read_nvm.patch
+ patches.drivers/media-videobuf2-core-don-t-call-memop-finish-when-qu
patches.drivers/media-vivid-potential-integer-overflow-in-vidioc_g_e
patches.drivers/media-tw686x-Fix-incorrect-vb2_mem_ops-GFP-flags
patches.drivers/media-uvcvideo-Support-realtek-s-UVC-1.5-device
@@ -15968,6 +15984,7 @@
patches.fixes/ARM-davinci-board-da850-evm-fix-WP-pin-polarity-for-.patch
patches.drivers/usb-dwc3-of-simple-fix-use-after-free-on-remove.patch
patches.drivers/usb-dwc2-fix-the-incorrect-bitmaps-for-the-ports-of-.patch
+ patches.drivers/xhci-Fix-perceived-dead-host-due-to-runtime-s.patch
patches.fixes/0001-xhci-Fix-kernel-oops-in-trace_xhci_free_virt_device.patch
patches.fixes/0001-acpi-Add-helper-for-deactivating-memory-region.patch
patches.fixes/0001-usb-typec-ucsi-acpi-Workaround-for-cache-mode-issue.patch
@@ -16071,8 +16088,10 @@
patches.drivers/ibmvnic-Revise-RX-TX-queue-error-messages.patch
patches.suse/msft-hv-1666-hv-netvsc-fix-handling-of-fallback-to-single-queue-m.patch
patches.drivers/net-mlx4_en-Don-t-reuse-RX-page-when-XDP-is-set.patch
+ patches.fixes/0001-net-lan78xx-Fix-race-in-tx-pending-skb-size-calculat.patch
patches.drivers/ibmvnic-Fix-error-recovery-on-login-failure.patch
patches.drivers/net-cxgb3_main-fix-potential-Spectre-v1.patch
+ patches.fixes/0001-net-usb-asix-replace-mii_nway_restart-in-resume-path.patch
patches.fixes/batman-adv-Fix-bat_ogm_iv-best-gw-refcnt-after-netli.patch
patches.fixes/batman-adv-Fix-bat_v-best-gw-refcnt-after-netlink-du.patch
patches.fixes/batman-adv-Fix-debugfs-path-for-renamed-hardif.patch
@@ -16151,6 +16170,7 @@
patches.fixes/ext4-fix-inline-data-updates-with-checksums-enabled.patch
patches.fixes/ext4-check-for-allocation-block-validity-with-block-.patch
patches.fixes/ext4-fix-check-to-prevent-initializing-reserved-inod.patch
+ patches.fixes/0001-net-lan78xx-fix-rx-handling-before-first-packet-is-s.patch
patches.drivers/can-ems_usb-Fix-memory-leak-on-ems_usb_disconnect
patches.drivers/virtio_balloon-fix-another-race-between-migration-an
patches.fixes/scsi-qla2xxx-fix-unintialized-list-head-crash.patch
@@ -16160,14 +16180,32 @@
patches.fixes/audit-fix-potential-null-dereference-context-module.patch
patches.suse/cpufreq-intel_pstate-Limit-the-scope-of-HWP-dynamic-.patch
patches.drivers/iwlwifi-add-more-card-IDs-for-9000-series
+ patches.fixes/inet-frag-enforce-memory-limits-earlier.patch
+ patches.fixes/ipv4-frags-handle-possible-skb-truesize-change.patch
patches.drivers/crypto-padlock-aes-Fix-Nano-workaround-data-corrupti
patches.drivers/drm-re-enable-error-handling
patches.drivers/drm-atomic-Check-old_plane_state-crtc-in-drm_atomic_
patches.drivers/drm-atomic-Initialize-variables-in-drm_atomic_helper
patches.drivers/drm-vc4-Reset-x-y-_scaling-1-when-dealing-with-unipl
patches.fixes/nohz-Fix-local_timer_softirq_pending.patch
+ patches.fixes/xen-netfront-dont-cache-skb_shinfo.patch
+ patches.fixes/init-rename-and-re-order-boot_cpu_state_init.patch
+ patches.suse/sched-numa-Remove-redundant-field.patch
+ patches.suse/sched-numa-Evaluate-move-once-per-node.patch
+ patches.suse/sched-numa-Simplify-load_too_imbalanced.patch
+ patches.suse/sched-numa-Set-preferred_node-based-on-best_cpu.patch
+ patches.suse/sched-numa-Use-task-faults-only-if-numa_group-is-not-yet-set-up.patch
+ patches.suse/sched-debug-Reverse-the-order-of-printing-faults.patch
+ patches.suse/sched-numa-Skip-nodes-that-are-at-hoplimit.patch
+ patches.suse/sched-numa-Remove-unused-task_capacity-from-struct-numa_stats.patch
+ patches.suse/sched-numa-Modify-migrate_swap-to-accept-additional-parameters.patch
+ patches.suse/sched-numa-Update-the-scan-period-without-holding-the-numa_group-lock.patch
+ patches.suse/sched-numa-Use-group_weights-to-identify-if-migration-degrades-locality.patch
+ patches.suse/sched-numa-Move-task_numa_placement-closer-to-numa_migrate_preferred.patch
# davem/net-next
+ patches.fixes/ip-discard-IPv4-datagrams-with-overlapping-segments.patch
+ patches.fixes/ipv6-defrag-drop-non-last-frags-smaller-than-min-mtu.patch
patches.drivers/qed-remove-redundant-pointer-name.patch
patches.drivers/qed-Add-qed-APIs-for-PHY-module-query.patch
patches.drivers/qede-Add-driver-callbacks-for-eeprom-module-query.patch
@@ -16257,6 +16295,7 @@
# out-of-tree patches
patches.suse/0001-kvm-Introduce-nopvspin-kernel-parameter.patch
+ patches.fixes/xen-xsa270-fix.patch
########################################################
# end of sorted patches
@@ -16379,7 +16418,7 @@
patches.fixes/getcwd-close-race-with-d_move-called-by-lustre.patch
patches.fixes/vfs-use-synchronize_rcu_expedited-in-namespace_unlock.patch
patches.fixes/0001-autofs-revert-autofs-take-more-care-to-not-update-la.patch
- patches.suse/sched-numa-Stagger-NUMA-balancing-scan-periods-for-new-threads.patch
+ patches.suse/sched-numa-Remove-redundant-field-kabi.patch
########################################################
# misc small fixes
@@ -16463,6 +16502,8 @@
patches.suse/suse-hv-kvp_on_msg.dbg.patch
patches.suse/suse-hv-hv_compose_msi_msg.patch
+ patches.suse/hv-netvsc-Fix-NULL-dereference-at-single-queue-mode-.patch
+
# bsc#1056592
patches.suse/ras-cec-disable-cec.patch
@@ -16657,6 +16698,7 @@
patches.suse/bsc1084332-0003-lan78xx-Enable-LEDs-and-auto-negotiation.patch
patches.suse/bsc1084332-0004-lan78xx-Avoid-spurious-kevent-4-error.patch
patches.suse/0001-lan78xx-Connect-phy-early.patch
+ patches.kabi/ip-drop-IPSTATS_MIB_REASM_OVERLAPS.patch
########################################################
# Netfilter
@@ -16995,6 +17037,65 @@
patches.kabi/KABI-powerpc-mmu_context-provide-old-version-of-mm_i.patch
patches.kabi/KABI-x86-kvm-kABI-workaround-for-CVE-2018-10853-fix.patch
+ ######
+ # L1TF
+ ######
+ # bare metal
+ patches.arch/x86-l1tf-01-increase-32bitPAE-__PHYSICAL_PAGE_MASK.patch
+ patches.arch/x86-l1tf-02-change-order-of-offset-type.patch
+ patches.arch/x86-l1tf-03-protect-swap-entries.patch
+ patches.arch/x86-l1tf-04-protect-PROT_NONE-ptes.patch
+ patches.arch/x86-l1tf-05-make-sure-first-pages-is-reserved.patch
+ patches.arch/x86-l1tf-06-add-sysfs-report.patch
+ patches.arch/x86-l1tf-07-limit-swap-file-size.patch
+ patches.arch/x86-l1tf-08-disallow-non-privileged-high-MMIO-PROT_NONE.patch
+ # smt=off
+ patches.arch/02-sched-smt-update-sched_smt_present-at-runtime.patch
+ patches.arch/03-x86-smp-provide-topology_is_primary_thread.patch
+ patches.arch/04-x86-topology-provide-topology_smt_supported.patch
+ patches.arch/05-cpu-hotplug-make-bringup-teardown-of-smp-threads-symmetric.patch
+ patches.arch/06-cpu-hotplug-split-do_cpu_down.patch
+ patches.arch/07-cpu-hotplug-provide-knobs-to-control-smt.patch
+ patches.arch/08-x86-cpu-remove-the-pointless-cpu-printout.patch
+ patches.arch/09-x86-cpu-amd-remove-the-pointless-detect_ht-call.patch
+ patches.arch/10-x86-cpu-common-provide-detect_ht_early.patch
+ patches.arch/11-x86-cpu-topology-provide-detect_extended_topology_early.patch
+ patches.arch/12-x86-cpu-intel-evaluate-smp_num_siblings-early.patch
+ patches.arch/13-x86-cpu-amd-do-not-check-cpuid-max-ext-level-before-parsing-smp-info.patch
+ patches.arch/14-x86-cpu-amd-evaluate-smp_num_siblings-early.patch
+ patches.arch/16-x86-cpu-amd-move-topoext-reenablement-before-reading-smp_num_siblings.patch
+ patches.arch/17-cpu-hotplug-Boot-HT-siblings-at-least-once.patch
+ patches.arch/18-cpu-hotplug-Online-siblings-when-SMT-control-is-turn.patch
+ # KVM
+ patches.arch/00-x86-cpufeatures-Add-detection-of-L1D-cache-flush-sup.patch
+ patches.arch/01-x86-KVM-Warn-user-if-KVM-is-loaded-SMT-and-L1TF-CPU-.patch
+ patches.arch/02-x86-KVM-VMX-Add-module-argument-for-L1TF-mitigation.patch
+ patches.arch/03-x86-KVM-VMX-Add-L1D-flush-algorithm.patch
+ patches.arch/04-x86-KVM-VMX-Add-L1D-MSR-based-flush.patch
+ patches.arch/05-x86-KVM-VMX-Add-L1D-flush-logic.patch
+ patches.arch/06-x86-KVM-VMX-Split-the-VMX-MSR-LOAD-structures-to-hav.patch
+ patches.arch/07-x86-KVM-VMX-Add-find_msr-helper-function.patch
+ patches.arch/08-x86-KVM-VMX-Separate-the-VMX-AUTOLOAD-guest-host-num.patch
+ patches.arch/09-x86-KVM-VMX-Extend-add_atomic_switch_msr-to-allow-VM.patch
+ patches.arch/10-x86-KVM-VMX-Use-MSR-save-list-for-IA32_FLUSH_CMD-if-.patch
+ # L1TF runtime control
+ patches.arch/0001-x86-litf-Introduce-vmx-status-variable.patch
+ patches.arch/0002-x86-kvm-Drop-L1TF-MSR-list-approach.patch
+ patches.arch/0003-x86-l1tf-Handle-EPT-disabled-state-proper.patch
+ patches.arch/0004-x86-kvm-Move-l1tf-setup-function.patch
+ patches.arch/0005-x86-kvm-Add-static-key-for-flush-always.patch
+ patches.arch/0006-x86-kvm-Serialize-L1D-flush-parameter-setter.patch
+ patches.arch/0007-x86-kvm-Allow-runtime-control-of-L1D-flush.patch
+ patches.arch/0008-cpu-hotplug-Expose-SMT-control-init-function.patch
+ patches.arch/0009-cpu-hotplug-Set-CPU_SMT_NOT_SUPPORTED-early.patch
+ patches.arch/0010-x86-bugs-kvm-Introduce-boot-time-control-of-L1TF-mit.patch
+ patches.arch/0011-Documentation-Add-section-about-CPU-vulnerabilities.patch
+ patches.arch/0012-cpu-hotplug-detect-SMT-disabled-by-BIOS.patch
+ # fixes
+ patches.arch/0001-x86-KVM-VMX-Initialize-the-vmx_l1d_flush_pages-conte.patch
+
+ patches.kabi/kvm_x86_ops-l1tf-kabi-fix.patch
+
########################################################
# You'd better have a good reason for adding a patch
# below here.
@@ -17113,3 +17214,5 @@
+agraf patches.arch/arm64-bsc1045298-KVM-arm-arm64-Handle-hva-aging-while-destroying-the-.patch
###
+bsc1094575 patches.drivers/s390-qeth-use-Read-device-to-query-hypervisor-for-MA.patch
+
+