Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDenis Kirjanov <dkirjanov@suse.com>2019-09-25 15:31:15 +0300
committerDenis Kirjanov <dkirjanov@suse.com>2019-09-25 15:32:39 +0300
commit846b396cb42728e55cfe179cce20d49117fd4547 (patch)
tree789f88bd8d561b1a43d7034554943148dc9f2d59
parent92f27e76ee3450c3905909aa0c6d903c8b3c9405 (diff)
parentea9cc185749e1a977829840c88e3b4a0bf9f7fe1 (diff)
Merge 'users/jroedel/SLE12-SP5/for-next' into SLE12-SP5
Pull iommu fixes from Joerg Roedel
-rw-r--r--patches.suse/iommu-amd-fix-race-in-increase_address_space68
-rw-r--r--patches.suse/iommu-amd-flush-old-domains-in-kdump-kernel79
-rw-r--r--patches.suse/iommu-dma-fix-for-dereferencing-before-null-checking39
-rw-r--r--patches.suse/iommu-don-t-use-sme_active-in-generic-code33
-rw-r--r--patches.suse/iommu-iova-avoid-false-sharing-on-fq_timer_on44
-rw-r--r--series.conf5
6 files changed, 268 insertions, 0 deletions
diff --git a/patches.suse/iommu-amd-fix-race-in-increase_address_space b/patches.suse/iommu-amd-fix-race-in-increase_address_space
new file mode 100644
index 0000000000..fa34829dea
--- /dev/null
+++ b/patches.suse/iommu-amd-fix-race-in-increase_address_space
@@ -0,0 +1,68 @@
+From: Joerg Roedel <jroedel@suse.de>
+Date: Fri, 6 Sep 2019 10:39:54 +0200
+Subject: iommu/amd: Fix race in increase_address_space()
+Git-commit: 754265bcab78a9014f0f99cd35e0d610fcd7dfa7
+Patch-mainline: v5.3-rc8
+References: bsc#1151697
+
+After the conversion to lock-less dma-api call the
+increase_address_space() function can be called without any
+locking. Multiple CPUs could potentially race for increasing
+the address space, leading to invalid domain->mode settings
+and invalid page-tables. This has been happening in the wild
+under high IO load and memory pressure.
+
+Fix the race by locking this operation. The function is
+called infrequently so that this does not introduce
+a performance regression in the dma-api path again.
+
+Reported-by: Qian Cai <cai@lca.pw>
+Fixes: 256e4621c21a ('iommu/amd: Make use of the generic IOVA allocator')
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+---
+ drivers/iommu/amd_iommu.c | 16 +++++++++++-----
+ 1 file changed, 11 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index f853b96ee547..61de81965c44 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1435,18 +1435,21 @@ static void free_pagetable(struct protection_domain *domain)
+ * another level increases the size of the address space by 9 bits to a size up
+ * to 64 bits.
+ */
+-static bool increase_address_space(struct protection_domain *domain,
++static void increase_address_space(struct protection_domain *domain,
+ gfp_t gfp)
+ {
++ unsigned long flags;
+ u64 *pte;
+
+- if (domain->mode == PAGE_MODE_6_LEVEL)
++ spin_lock_irqsave(&domain->lock, flags);
++
++ if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
+ /* address space already 64 bit large */
+- return false;
++ goto out;
+
+ pte = (void *)get_zeroed_page(gfp);
+ if (!pte)
+- return false;
++ goto out;
+
+ *pte = PM_LEVEL_PDE(domain->mode,
+ iommu_virt_to_phys(domain->pt_root));
+@@ -1454,7 +1457,10 @@ static bool increase_address_space(struct protection_domain *domain,
+ domain->mode += 1;
+ domain->updated = true;
+
+- return true;
++out:
++ spin_unlock_irqrestore(&domain->lock, flags);
++
++ return;
+ }
+
+ static u64 *alloc_pte(struct protection_domain *domain,
+
diff --git a/patches.suse/iommu-amd-flush-old-domains-in-kdump-kernel b/patches.suse/iommu-amd-flush-old-domains-in-kdump-kernel
new file mode 100644
index 0000000000..3e5841850f
--- /dev/null
+++ b/patches.suse/iommu-amd-flush-old-domains-in-kdump-kernel
@@ -0,0 +1,79 @@
+From: Stuart Hayes <stuart.w.hayes@gmail.com>
+Date: Thu, 5 Sep 2019 12:09:48 -0500
+Subject: iommu/amd: Flush old domains in kdump kernel
+Git-commit: 36b7200f67dfe75b416b5281ed4ace9927b513bc
+Patch-mainline: v5.3-rc8
+References: bsc#1151698
+
+When devices are attached to the amd_iommu in a kdump kernel, the old device
+table entries (DTEs), which were copied from the crashed kernel, will be
+overwritten with a new domain number. When the new DTE is written, the IOMMU
+is told to flush the DTE from its internal cache--but it is not told to flush
+the translation cache entries for the old domain number.
+
+Without this patch, AMD systems using the tg3 network driver fail when kdump
+tries to save the vmcore to a network system, showing network timeouts and
+(sometimes) IOMMU errors in the kernel log.
+
+This patch will flush IOMMU translation cache entries for the old domain when
+a DTE gets overwritten with a new domain number.
+
+Signed-off-by: Stuart Hayes <stuart.w.hayes@gmail.com>
+Fixes: 3ac3e5ee5ed5 ('iommu/amd: Copy old trans table from old kernel')
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+---
+ drivers/iommu/amd_iommu.c | 24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index b607a92791d3..f853b96ee547 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -1143,6 +1143,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
+ iommu_completion_wait(iommu);
+ }
+
++static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
++{
++ struct iommu_cmd cmd;
++
++ build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
++ dom_id, 1);
++ iommu_queue_command(iommu, &cmd);
++
++ iommu_completion_wait(iommu);
++}
++
+ static void amd_iommu_flush_all(struct amd_iommu *iommu)
+ {
+ struct iommu_cmd cmd;
+@@ -1873,6 +1884,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
+ {
+ u64 pte_root = 0;
+ u64 flags = 0;
++ u32 old_domid;
+
+ if (domain->mode != PAGE_MODE_NONE)
+ pte_root = iommu_virt_to_phys(domain->pt_root);
+@@ -1922,8 +1934,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
+ flags &= ~DEV_DOMID_MASK;
+ flags |= domain->id;
+
++ old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
+ amd_iommu_dev_table[devid].data[1] = flags;
+ amd_iommu_dev_table[devid].data[0] = pte_root;
++
++ /*
++ * A kdump kernel might be replacing a domain ID that was copied from
++ * the previous kernel--if so, it needs to flush the translation cache
++ * entries for the old domain ID that is being overwritten
++ */
++ if (old_domid) {
++ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
++
++ amd_iommu_flush_tlb_domid(iommu, old_domid);
++ }
+ }
+
+ static void clear_dte_entry(u16 devid)
+
diff --git a/patches.suse/iommu-dma-fix-for-dereferencing-before-null-checking b/patches.suse/iommu-dma-fix-for-dereferencing-before-null-checking
new file mode 100644
index 0000000000..253c5f9675
--- /dev/null
+++ b/patches.suse/iommu-dma-fix-for-dereferencing-before-null-checking
@@ -0,0 +1,39 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Sat, 24 Aug 2019 09:47:12 +0800
+Subject: iommu/dma: Fix for dereferencing before null checking
+Git-commit: 6b0c54e7f2715997c366e8374209bc74259b0a59
+Patch-mainline: v5.4-rc1
+References: bsc#1151699
+
+The cookie is dereferenced before null checking in the function
+iommu_dma_init_domain.
+
+This patch moves the dereferencing after the null checking.
+
+Fixes: fdbe574eb693 ("iommu/dma: Allow MSI-only cookies")
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+---
+ drivers/iommu/dma-iommu.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/iommu/dma-iommu.c
++++ b/drivers/iommu/dma-iommu.c
+@@ -292,13 +292,15 @@ int iommu_dma_init_domain(struct iommu_d
+ u64 size, struct device *dev)
+ {
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+- struct iova_domain *iovad = &cookie->iovad;
+ unsigned long order, base_pfn, end_pfn;
++ struct iova_domain *iovad;
+ int attr;
+
+ if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
+ return -EINVAL;
+
++ iovad = &cookie->iovad;
++
+ /* Use the smallest supported page size for IOVA granularity */
+ order = __ffs(domain->pgsize_bitmap);
+ base_pfn = max_t(unsigned long, 1, base >> order);
+
diff --git a/patches.suse/iommu-don-t-use-sme_active-in-generic-code b/patches.suse/iommu-don-t-use-sme_active-in-generic-code
new file mode 100644
index 0000000000..d721659852
--- /dev/null
+++ b/patches.suse/iommu-don-t-use-sme_active-in-generic-code
@@ -0,0 +1,33 @@
+From: Joerg Roedel <jroedel@suse.de>
+Date: Tue, 3 Sep 2019 15:15:44 +0200
+Subject: iommu: Don't use sme_active() in generic code
+Git-commit: 2896ba40d0becdb72b45f096cad70633abc014f6
+Patch-mainline: v5.4-rc1
+References: bsc#1151700
+
+Switch to the generic function mem_encrypt_active() because
+sme_active() is x86 specific and can't be called from
+generic code on other platforms than x86.
+
+Fixes: 2cc13bb4f59f ("iommu: Disable passthrough mode when SME is active")
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+---
+ drivers/iommu/iommu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 66cfacaa483d..d658c7c6a2ab 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -120,8 +120,8 @@ static int __init iommu_subsys_init(void)
+ else
+ iommu_set_default_translated(false);
+
+- if (iommu_default_passthrough() && sme_active()) {
+- pr_info("SME detected - Disabling default IOMMU Passthrough\n");
++ if (iommu_default_passthrough() && mem_encrypt_active()) {
++ pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
+ iommu_set_default_translated(false);
+ }
+ }
+
diff --git a/patches.suse/iommu-iova-avoid-false-sharing-on-fq_timer_on b/patches.suse/iommu-iova-avoid-false-sharing-on-fq_timer_on
new file mode 100644
index 0000000000..2e1931f29d
--- /dev/null
+++ b/patches.suse/iommu-iova-avoid-false-sharing-on-fq_timer_on
@@ -0,0 +1,44 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 28 Aug 2019 06:13:38 -0700
+Subject: iommu/iova: Avoid false sharing on fq_timer_on
+Git-commit: 0d87308cca2c124f9bce02383f1d9632c9be89c4
+Patch-mainline: v5.4-rc1
+References: bsc#1151701
+
+In commit 14bd9a607f90 ("iommu/iova: Separate atomic variables
+to improve performance") Jinyu Qi identified that the atomic_cmpxchg()
+in queue_iova() was causing a performance loss and moved critical fields
+so that the false sharing would not impact them.
+
+However, avoiding the false sharing in the first place seems easy.
+We should attempt the atomic_cmpxchg() no more than 100 times
+per second. Adding an atomic_read() will keep the cache
+line mostly shared.
+
+This false sharing came with commit 9a005a800ae8
+("iommu/iova: Add flush timer").
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Fixes: 9a005a800ae8 ('iommu/iova: Add flush timer')
+Cc: Jinyu Qi <jinyuqi@huawei.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Acked-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+---
+ drivers/iommu/iova.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/iommu/iova.c
++++ b/drivers/iommu/iova.c
+@@ -589,7 +589,9 @@ void queue_iova(struct iova_domain *iova
+
+ spin_unlock_irqrestore(&fq->lock, flags);
+
+- if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
++ /* Avoid false sharing as much as possible. */
++ if (!atomic_read(&iovad->fq_timer_on) &&
++ !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1))
+ mod_timer(&iovad->fq_timer,
+ jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
+
+
diff --git a/series.conf b/series.conf
index 7ffc26e0eb..6e21ea86b8 100644
--- a/series.conf
+++ b/series.conf
@@ -49771,7 +49771,11 @@
patches.suse/usb-host-xhci-rcar-Fix-typo-in-compatible-string-mat.patch
patches.suse/USB-cdc-wdm-fix-race-between-write-and-disconnect-du.patch
patches.suse/VMCI-Release-resource-if-the-work-is-already-queued.patch
+ patches.suse/iommu-amd-flush-old-domains-in-kdump-kernel
+ patches.suse/iommu-amd-fix-race-in-increase_address_space
patches.suse/vhost-make-sure-log_num-in_num.patch
+ patches.suse/iommu-iova-avoid-false-sharing-on-fq_timer_on
+ patches.suse/iommu-dma-fix-for-dereferencing-before-null-checking
# jejb/scsi for-next
patches.suse/scsi-cxlflash-Mark-expected-switch-fall-throughs.patch
@@ -50571,6 +50575,7 @@
patches.suse/0009-iommu-Set-default-domain-type-at-runtime.patch
patches.suse/0010-iommu-Disable-passthrough-mode-when-SME-is-active.patch
patches.suse/0011-Documentation-Update-Documentation-for-iommu.passthr.patch
+ patches.suse/iommu-don-t-use-sme_active-in-generic-code
########################################################
# kABI consistency patches