Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichal Suchanek <msuchanek@suse.de>2018-10-31 13:27:38 +0100
committerMichal Suchanek <msuchanek@suse.de>2018-10-31 23:28:42 +0100
commit58d737bd888e53dedd0e4844f9bc01e05fe64cad (patch)
treec5d466f63ca3959edb29fb929d05540a903b7820
parentedfa6fc416520d0ef4843629bf7794f2c8adfba1 (diff)
KVM: PPC: Make iommu_table::it_userspace big endian
(bsc#1061840).
-rw-r--r--patches.arch/KVM-PPC-Make-iommu_table-it_userspace-big-endian.patch193
-rw-r--r--series.conf1
2 files changed, 194 insertions, 0 deletions
diff --git a/patches.arch/KVM-PPC-Make-iommu_table-it_userspace-big-endian.patch b/patches.arch/KVM-PPC-Make-iommu_table-it_userspace-big-endian.patch
new file mode 100644
index 0000000000..8b03359da0
--- /dev/null
+++ b/patches.arch/KVM-PPC-Make-iommu_table-it_userspace-big-endian.patch
@@ -0,0 +1,193 @@
+From 3474389de987c8fcf938cc3a507d5fe9cc0eb142 Mon Sep 17 00:00:00 2001
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+Date: Wed, 4 Jul 2018 16:13:46 +1000
+Subject: [PATCH] KVM: PPC: Make iommu_table::it_userspace big endian
+
+References: bsc#1061840
+Patch-mainline: v4.19-rc1
+Git-commit: 00a5c58d9499bd0c290b57205f43a70f2e69d3f6
+
+We are going to reuse multilevel TCE code for the userspace copy of
+the TCE table and since it is big endian, let's make the copy big endian
+too.
+
+Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
+Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
+Acked-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ arch/powerpc/include/asm/iommu.h | 2 +-
+ arch/powerpc/kvm/book3s_64_vio.c | 11 ++++++-----
+ arch/powerpc/kvm/book3s_64_vio_hv.c | 10 +++++-----
+ drivers/vfio/vfio_iommu_spapr_tce.c | 19 +++++++++----------
+ 4 files changed, 21 insertions(+), 21 deletions(-)
+
+diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
+index 8a8ce220d7d0..470124740864 100644
+--- a/arch/powerpc/include/asm/iommu.h
++++ b/arch/powerpc/include/asm/iommu.h
+@@ -117,7 +117,7 @@ struct iommu_table {
+ unsigned long *it_map; /* A simple allocation bitmap for now */
+ unsigned long it_page_shift;/* table iommu page size */
+ struct list_head it_group_list;/* List of iommu_table_group_link */
+- unsigned long *it_userspace; /* userspace view of the table */
++ __be64 *it_userspace; /* userspace view of the table */
+ struct iommu_table_ops *it_ops;
+ struct kref it_kref;
+ };
+diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
+index 8c456fa691a5..963059af8f3d 100644
+--- a/arch/powerpc/kvm/book3s_64_vio.c
++++ b/arch/powerpc/kvm/book3s_64_vio.c
+@@ -378,19 +378,19 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
+ {
+ struct mm_iommu_table_group_mem_t *mem = NULL;
+ const unsigned long pgsize = 1ULL << tbl->it_page_shift;
+- unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
++ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+
+ if (!pua)
+ /* it_userspace allocation might be delayed */
+ return H_TOO_HARD;
+
+- mem = mm_iommu_lookup(kvm->mm, *pua, pgsize);
++ mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
+ if (!mem)
+ return H_TOO_HARD;
+
+ mm_iommu_mapped_dec(mem);
+
+- *pua = 0;
++ *pua = cpu_to_be64(0);
+
+ return H_SUCCESS;
+ }
+@@ -437,7 +437,8 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
+ enum dma_data_direction dir)
+ {
+ long ret;
+- unsigned long hpa, *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
++ unsigned long hpa;
++ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ struct mm_iommu_table_group_mem_t *mem;
+
+ if (!pua)
+@@ -464,7 +465,7 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
+ if (dir != DMA_NONE)
+ kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
+
+- *pua = ua;
++ *pua = cpu_to_be64(ua);
+
+ return 0;
+ }
+diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
+index 5b298f5a1a14..841aef714929 100644
+--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
++++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
+@@ -200,7 +200,7 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
+ {
+ struct mm_iommu_table_group_mem_t *mem = NULL;
+ const unsigned long pgsize = 1ULL << tbl->it_page_shift;
+- unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
++ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+
+ if (!pua)
+ /* it_userspace allocation might be delayed */
+@@ -210,13 +210,13 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
+ if (WARN_ON_ONCE_RM(!pua))
+ return H_HARDWARE;
+
+- mem = mm_iommu_lookup_rm(kvm->mm, *pua, pgsize);
++ mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
+ if (!mem)
+ return H_TOO_HARD;
+
+ mm_iommu_mapped_dec(mem);
+
+- *pua = 0;
++ *pua = cpu_to_be64(0);
+
+ return H_SUCCESS;
+ }
+@@ -268,7 +268,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
+ {
+ long ret;
+ unsigned long hpa = 0;
+- unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
++ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ struct mm_iommu_table_group_mem_t *mem;
+
+ if (!pua)
+@@ -303,7 +303,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
+ if (dir != DMA_NONE)
+ kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
+
+- *pua = ua;
++ *pua = cpu_to_be64(ua);
+
+ return 0;
+ }
+diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
+index b4c68f3b82be..a78974e1fee6 100644
+--- a/drivers/vfio/vfio_iommu_spapr_tce.c
++++ b/drivers/vfio/vfio_iommu_spapr_tce.c
+@@ -230,7 +230,7 @@ static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl,
+ decrement_locked_vm(mm, cb >> PAGE_SHIFT);
+ return -ENOMEM;
+ }
+- tbl->it_userspace = uas;
++ tbl->it_userspace = (__be64 *) uas;
+
+ return 0;
+ }
+@@ -482,20 +482,20 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container,
+ struct mm_iommu_table_group_mem_t *mem = NULL;
+ int ret;
+ unsigned long hpa = 0;
+- unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
++ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+
+ if (!pua)
+ return;
+
+- ret = tce_iommu_prereg_ua_to_hpa(container, *pua, tbl->it_page_shift,
+- &hpa, &mem);
++ ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua),
++ tbl->it_page_shift, &hpa, &mem);
+ if (ret)
+- pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
+- __func__, *pua, entry, ret);
++ pr_debug("%s: tce %llx at #%lx was not cached, ret=%d\n",
++ __func__, be64_to_cpu(*pua), entry, ret);
+ if (mem)
+ mm_iommu_mapped_dec(mem);
+
+- *pua = 0;
++ *pua = cpu_to_be64(0);
+ }
+
+ static int tce_iommu_clear(struct tce_container *container,
+@@ -605,8 +605,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
+
+ for (i = 0; i < pages; ++i) {
+ struct mm_iommu_table_group_mem_t *mem = NULL;
+- unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
+- entry + i);
++ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry + i);
+
+ ret = tce_iommu_prereg_ua_to_hpa(container,
+ tce, tbl->it_page_shift, &hpa, &mem);
+@@ -640,7 +639,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
+ if (dirtmp != DMA_NONE)
+ tce_iommu_unuse_page_v2(container, tbl, entry + i);
+
+- *pua = tce;
++ *pua = cpu_to_be64(tce);
+
+ tce += IOMMU_PAGE_SIZE(tbl);
+ }
+--
+2.13.7
+
diff --git a/series.conf b/series.conf
index 313aee125c..3a7772cc03 100644
--- a/series.conf
+++ b/series.conf
@@ -17662,6 +17662,7 @@
patches.suse/0003-modsign-log-module-name-in-the-event-of-an-error.patch
patches.suse/0004-ARM-module-fix-modsign-build-error.patch
patches.arch/powerpc-powernv-Move-TCE-manupulation-code-to-its-ow.patch
+ patches.arch/KVM-PPC-Make-iommu_table-it_userspace-big-endian.patch
patches.arch/cxl-Fix-wrong-comparison-in-cxl_adapter_context_get.patch
patches.arch/powerpc-pkeys-Give-all-threads-control-of-their-key-.patch
patches.arch/powerpc-pkeys-Deny-read-write-execute-by-default.patch