Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichal Suchanek <msuchanek@suse.de>2018-10-31 12:36:54 +0100
committerMichal Suchanek <msuchanek@suse.de>2018-10-31 23:28:39 +0100
commit976d3073c47cc32ec4aa1345866533a440311b7c (patch)
treec93ab0918aead7c40d5aa44ba49870de5dfb7fc8
parent233d742a11ac4ba99c86b6f8aeb8bfa235c5ad27 (diff)
KVM: PPC: Book3S: Allow backing bigger guest IOMMU pages with
smaller physical pages (bsc#1061840).
-rw-r--r--patches.arch/KVM-PPC-Book3S-Allow-backing-bigger-guest-IOMMU-page.patch248
-rw-r--r--series.conf1
2 files changed, 249 insertions, 0 deletions
diff --git a/patches.arch/KVM-PPC-Book3S-Allow-backing-bigger-guest-IOMMU-page.patch b/patches.arch/KVM-PPC-Book3S-Allow-backing-bigger-guest-IOMMU-page.patch
new file mode 100644
index 0000000000..30125ea287
--- /dev/null
+++ b/patches.arch/KVM-PPC-Book3S-Allow-backing-bigger-guest-IOMMU-page.patch
@@ -0,0 +1,248 @@
+From ca1fc489cfa06a554fd71eb46d8927614ec7e6f3 Mon Sep 17 00:00:00 2001
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+Date: Mon, 14 May 2018 20:00:28 +1000
+Subject: [PATCH] KVM: PPC: Book3S: Allow backing bigger guest IOMMU pages with
+ smaller physical pages
+
+References: bsc#1061840
+Patch-mainline: v4.18-rc1
+Git-commit: ca1fc489cfa06a554fd71eb46d8927614ec7e6f3
+
+At the moment we only support in the host the IOMMU page sizes which
+the guest is aware of, which is 4KB/64KB/16MB. However P9 does not support
+16MB IOMMU pages, 2MB and 1GB pages are supported instead. We can still
+emulate bigger guest pages (for example 16MB) with smaller host pages
+(4KB/64KB/2MB).
+
+This allows the physical IOMMU pages to use a page size smaller or equal
+than the guest visible IOMMU page size.
+
+Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
+Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ arch/powerpc/kvm/book3s_64_vio.c | 64 +++++++++++++++++++++++++++++--------
+ arch/powerpc/kvm/book3s_64_vio_hv.c | 50 +++++++++++++++++++++++++----
+ 2 files changed, 94 insertions(+), 20 deletions(-)
+
+diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
+index 041e54d26750..984f1978a19c 100644
+--- a/arch/powerpc/kvm/book3s_64_vio.c
++++ b/arch/powerpc/kvm/book3s_64_vio.c
+@@ -176,14 +176,12 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
+
+ if (!tbltmp)
+ continue;
+- /*
+- * Make sure hardware table parameters are exactly the same;
+- * this is used in the TCE handlers where boundary checks
+- * use only the first attached table.
+- */
+- if ((tbltmp->it_page_shift == stt->page_shift) &&
+- (tbltmp->it_offset == stt->offset) &&
+- (tbltmp->it_size == stt->size)) {
++ /* Make sure hardware table parameters are compatible */
++ if ((tbltmp->it_page_shift <= stt->page_shift) &&
++ (tbltmp->it_offset << tbltmp->it_page_shift ==
++ stt->offset << stt->page_shift) &&
++ (tbltmp->it_size << tbltmp->it_page_shift ==
++ stt->size << stt->page_shift)) {
+ /*
+ * Reference the table to avoid races with
+ * add/remove DMA windows.
+@@ -396,7 +394,7 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
+ return H_SUCCESS;
+ }
+
+-static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
++static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
+ struct iommu_table *tbl, unsigned long entry)
+ {
+ enum dma_data_direction dir = DMA_NONE;
+@@ -416,7 +414,24 @@ static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
+ return ret;
+ }
+
+-long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
++static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
++ struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
++ unsigned long entry)
++{
++ unsigned long i, ret = H_SUCCESS;
++ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
++ unsigned long io_entry = entry * subpages;
++
++ for (i = 0; i < subpages; ++i) {
++ ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
++ if (ret != H_SUCCESS)
++ break;
++ }
++
++ return ret;
++}
++
++long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
+ unsigned long entry, unsigned long ua,
+ enum dma_data_direction dir)
+ {
+@@ -453,6 +468,27 @@ long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
+ return 0;
+ }
+
++static long kvmppc_tce_iommu_map(struct kvm *kvm,
++ struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
++ unsigned long entry, unsigned long ua,
++ enum dma_data_direction dir)
++{
++ unsigned long i, pgoff, ret = H_SUCCESS;
++ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
++ unsigned long io_entry = entry * subpages;
++
++ for (i = 0, pgoff = 0; i < subpages;
++ ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
++
++ ret = kvmppc_tce_iommu_do_map(kvm, tbl,
++ io_entry + i, ua + pgoff, dir);
++ if (ret != H_SUCCESS)
++ break;
++ }
++
++ return ret;
++}
++
+ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ unsigned long ioba, unsigned long tce)
+ {
+@@ -491,10 +527,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
+ if (dir == DMA_NONE)
+- ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
++ ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
+ stit->tbl, entry);
+ else
+- ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl,
++ ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
+ entry, ua, dir);
+
+ if (ret == H_SUCCESS)
+@@ -570,7 +606,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ return H_PARAMETER;
+
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
+- ret = kvmppc_tce_iommu_map(vcpu->kvm,
++ ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
+ stit->tbl, entry + i, ua,
+ iommu_tce_direction(tce));
+
+@@ -618,7 +654,7 @@ long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
+ unsigned long entry = ioba >> stt->page_shift;
+
+ for (i = 0; i < npages; ++i) {
+- ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
++ ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
+ stit->tbl, entry + i);
+
+ if (ret == H_SUCCESS)
+diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
+index e220fabb2f5d..635f3ca8129a 100644
+--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
++++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
+@@ -221,7 +221,7 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
+ return H_SUCCESS;
+ }
+
+-static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
++static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
+ struct iommu_table *tbl, unsigned long entry)
+ {
+ enum dma_data_direction dir = DMA_NONE;
+@@ -245,7 +245,24 @@ static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
+ return ret;
+ }
+
+-static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
++static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
++ struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
++ unsigned long entry)
++{
++ unsigned long i, ret = H_SUCCESS;
++ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
++ unsigned long io_entry = entry * subpages;
++
++ for (i = 0; i < subpages; ++i) {
++ ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
++ if (ret != H_SUCCESS)
++ break;
++ }
++
++ return ret;
++}
++
++static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
+ unsigned long entry, unsigned long ua,
+ enum dma_data_direction dir)
+ {
+@@ -290,6 +307,27 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
+ return 0;
+ }
+
++static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
++ struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
++ unsigned long entry, unsigned long ua,
++ enum dma_data_direction dir)
++{
++ unsigned long i, pgoff, ret = H_SUCCESS;
++ unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
++ unsigned long io_entry = entry * subpages;
++
++ for (i = 0, pgoff = 0; i < subpages;
++ ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
++
++ ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
++ io_entry + i, ua + pgoff, dir);
++ if (ret != H_SUCCESS)
++ break;
++ }
++
++ return ret;
++}
++
+ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ unsigned long ioba, unsigned long tce)
+ {
+@@ -327,10 +365,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
+ if (dir == DMA_NONE)
+- ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
++ ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
+ stit->tbl, entry);
+ else
+- ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
++ ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
+ stit->tbl, entry, ua, dir);
+
+ if (ret == H_SUCCESS)
+@@ -477,7 +515,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ return H_PARAMETER;
+
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
+- ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
++ ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
+ stit->tbl, entry + i, ua,
+ iommu_tce_direction(tce));
+
+@@ -529,7 +567,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
+ unsigned long entry = ioba >> stt->page_shift;
+
+ for (i = 0; i < npages; ++i) {
+- ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
++ ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
+ stit->tbl, entry + i);
+
+ if (ret == H_SUCCESS)
+--
+2.13.7
+
diff --git a/series.conf b/series.conf
index 7829af2e62..c9fb9286a8 100644
--- a/series.conf
+++ b/series.conf
@@ -16722,6 +16722,7 @@
patches.arch/KVM-PPC-Book3S-HV-Set-RWMR-on-POWER8-so-PURR-SPURR-c.patch
patches.arch/KVM-PPC-Book3S-HV-Fix-inaccurate-comment.patch
patches.arch/KVM-PPC-Book3S-Use-correct-page-shift-in-H_STUFF_TCE.patch
+ patches.arch/KVM-PPC-Book3S-Allow-backing-bigger-guest-IOMMU-page.patch
patches.suse/ipv6-allow-PMTU-exceptions-to-local-routes.patch
patches.suse/net-dsa-add-error-handling-for-pskb_trim_rcsum.patch
patches.drivers/ixgbe-Fix-setting-of-TC-configuration-for-macvlan-ca.patch