Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichal Marek <mmarek@suse.com>2017-03-03 14:39:33 +0100
committerMichal Marek <mmarek@suse.com>2017-03-03 14:39:33 +0100
commit56e022453a9793ec089a82d5640ef2d926161f7f (patch)
tree005cf68e5488268e3d6d9e705d71cce1ef9604cf
parent271c9dd46fc36d5defb7ead7691ac832249a5fff (diff)
parentd08622ac64c365600c1a51e4819e7e7da8dd3da8 (diff)
Merge branch 'users/mbrugger/SLE12-SP3/for-next' into SLE12-SP3rpm-4.4.52-1
Pull Qualcomm Centriq enablement from Matthias Brugger (fate#320512).
-rw-r--r--config/arm64/default6
-rw-r--r--config/arm64/vanilla1
-rw-r--r--config/ppc64le/debug4
-rw-r--r--config/ppc64le/default4
-rw-r--r--config/ppc64le/vanilla1
-rw-r--r--config/s390x/default2
-rw-r--r--config/s390x/vanilla1
-rw-r--r--config/x86_64/debug4
-rw-r--r--config/x86_64/default4
-rw-r--r--config/x86_64/vanilla1
-rw-r--r--patches.arch/0001-dma-mapping-make-the-generic-coherent-dma-mmap-imple.patch85
-rw-r--r--patches.arch/0002-arc-convert-to-dma_map_ops.patch442
-rw-r--r--patches.arch/0003-avr32-convert-to-dma_map_ops.patch551
-rw-r--r--patches.arch/0004-blackfin-convert-to-dma_map_ops.patch277
-rw-r--r--patches.arch/0005-c6x-convert-to-dma_map_ops.patch356
-rw-r--r--patches.arch/0006-cris-convert-to-dma_map_ops.patch303
-rw-r--r--patches.arch/0007-nios2-convert-to-dma_map_ops.patch408
-rw-r--r--patches.arch/0008-frv-convert-to-dma_map_ops.patch407
-rw-r--r--patches.arch/0009-parisc-convert-to-dma_map_ops.patch732
-rw-r--r--patches.arch/0010-mn10300-convert-to-dma_map_ops.patch310
-rw-r--r--patches.arch/0011-m68k-convert-to-dma_map_ops.patch294
-rw-r--r--patches.arch/0012-metag-convert-to-dma_map_ops.patch443
-rw-r--r--patches.arch/0013-sparc-use-generic-dma_set_mask.patch53
-rw-r--r--patches.arch/0014-tile-uninline-dma_set_mask.patch109
-rw-r--r--patches.arch/0015-dma-mapping-always-provide-the-dma_map_ops-based-imp.patch1739
-rw-r--r--patches.arch/0016-dma-mapping-remove-asm-generic-dma-coherent.h.patch201
-rw-r--r--patches.arch/arm64-Relocate-screen_info.lfb_base-on-PCI-BAR-alloc.patch192
-rw-r--r--patches.arch/base-Export-platform_msi_domain_-alloc-free-_irqs.patch43
-rw-r--r--patches.arch/qcom-0001-arm64-efi-split-off-EFI-init-and-runtime-code-for-re.patch803
-rw-r--r--patches.arch/qcom-0002-arm64-efi-refactor-EFI-init-and-runtime-code-for-reu.patch229
-rw-r--r--patches.arch/qcom-0003-x86-efi-bgrt-Fix-kernel-panic-when-mapping-BGRT-data.patch189
-rw-r--r--patches.arch/qcom-0004-ARM-wire-up-UEFI-init-and-runtime-support.patch189
-rw-r--r--patches.arch/qcom-0005-ARM-add-UEFI-stub-support.patch464
-rw-r--r--patches.arch/qcom-0006-efi-include-asm-early_ioremap.h-not-asm-efi.h-to-get.patch44
-rw-r--r--patches.arch/qcom-0007-efi-arm-Drop-writable-mapping-of-the-UEFI-System-tab.patch121
-rw-r--r--patches.arch/qcom-0008-efi-arm64-Drop-__init-annotation-from-handle_kernel_.patch52
-rw-r--r--patches.arch/qcom-0009-arm64-vmlinux.lds.S-Handle-.init.rodata.xxx-and-.ini.patch47
-rw-r--r--patches.arch/qcom-0010-efi-efistub-Prevent-__init-annotations-from-being-us.patch56
-rw-r--r--patches.arch/qcom-0011-efi-arm-init-Use-read-only-early-mappings.patch70
-rw-r--r--patches.arch/qcom-0012-efi-arm-Check-for-LPAE-support-before-booting-a-LPAE.patch61
-rw-r--r--patches.arch/qcom-0013-efi-arm64-Check-for-h-w-support-before-booting-a-4-K.patch67
-rw-r--r--patches.arch/qcom-0014-efi-arm-Perform-hardware-compatibility-check.patch51
-rw-r--r--patches.arch/qcom-0015-x86-efi-Map-RAM-into-the-identity-page-table-for-mix.patch72
-rw-r--r--patches.arch/qcom-0016-x86-efi-Hoist-page-table-switching-code-into-efi_cal.patch216
-rw-r--r--patches.arch/qcom-0017-x86-efi-Build-our-own-page-table-structures.patch313
-rw-r--r--patches.arch/qcom-0018-x86-efi-Setup-separate-EFI-page-tables-in-kexec-path.patch86
-rw-r--r--patches.arch/qcom-0019-x86-rtc-Replace-paravirt-rtc-check-with-platform-leg.patch336
-rw-r--r--patches.arch/qcom-0020-x86-init-Use-a-platform-legacy-quirk-for-EBDA.patch115
-rw-r--r--patches.arch/qcom-0021-x86-init-Rename-EBDA-code-file.patch228
-rw-r--r--patches.arch/qcom-0022-efi-Iterate-over-efi.memmap-in-for_each_efi_memory_d.patch332
-rw-r--r--patches.arch/qcom-0023-efi-Remove-global-memmap-EFI-memory-map.patch474
-rw-r--r--patches.arch/qcom-0024-efi-arm-Use-memremap-to-create-the-persistent-memmap.patch44
-rw-r--r--patches.arch/qcom-0025-efi-Add-support-for-the-EFI_MEMORY_ATTRIBUTES_TABLE-.patch85
-rw-r--r--patches.arch/qcom-0026-efi-Implement-generic-support-for-the-Memory-Attribu.patch260
-rw-r--r--patches.arch/qcom-0027-efi-arm-Take-the-Memory-Attributes-table-into-accoun.patch74
-rw-r--r--patches.arch/qcom-0028-x86-efi-Fix-boot-crash-by-always-mapping-boot-servic.patch199
-rw-r--r--patches.arch/qcom-0029-x86-boot-Reorganize-and-clean-up-the-BIOS-area-reser.patch293
-rw-r--r--patches.arch/qcom-0030-efi-capsule-Allocate-whole-capsule-into-virtual-memo.patch87
-rw-r--r--patches.arch/qcom-0031-efi-libstub-Allocate-headspace-in-efi_get_memory_map.patch351
-rw-r--r--patches.arch/qcom-0032-efi-libstub-Introduce-ExitBootServices-helper.patch135
-rw-r--r--patches.arch/qcom-0033-efi-libstub-Use-efi_exit_boot_services-in-FDT.patch88
-rw-r--r--patches.arch/qcom-0034-x86-efi-Use-efi_exit_boot_services.patch192
-rw-r--r--patches.arch/qcom-0035-x86-efi-Test-for-EFI_MEMMAP-functionality-when-itera.patch70
-rw-r--r--patches.arch/qcom-0036-efi-Refactor-efi_memmap_init_early-into-arch-neutral.patch383
-rw-r--r--patches.arch/qcom-0037-x86-efi-Consolidate-region-mapping-logic.patch130
-rw-r--r--patches.arch/qcom-0038-efi-Add-efi_memmap_init_late-for-permanent-EFI-memma.patch400
-rw-r--r--patches.arch/qcom-0039-efi-fake_mem-Refactor-main-two-code-chunks-into-func.patch292
-rw-r--r--patches.arch/qcom-0040-efi-Split-out-EFI-memory-map-functions-into-new-file.patch670
-rw-r--r--patches.arch/qcom-0041-efi-Add-efi_memmap_install-for-installing-new-EFI-me.patch110
-rw-r--r--patches.arch/qcom-0042-efi-Allow-drivers-to-reserve-boot-services-forever.patch252
-rw-r--r--patches.arch/qcom-0043-efi-runtime-map-Use-efi.memmap-directly-instead-of-a.patch218
-rw-r--r--patches.arch/qcom-0044-efi-esrt-Use-efi_mem_reserve-and-avoid-a-kmalloc.patch93
-rw-r--r--patches.arch/qcom-0045-x86-efi-bgrt-Use-efi_mem_reserve-to-avoid-copying-im.patch61
-rw-r--r--patches.arch/qcom-0046-efi-esrt-Use-memremap-not-ioremap-to-access-ESRT-tab.patch55
-rw-r--r--patches.arch/qcom-0047-efi-arm-esrt-Add-missing-call-to-efi_esrt_init.patch32
-rw-r--r--patches.arch/qcom-0048-efi-x86-Prune-invalid-memory-map-entries-and-fix-boo.patch148
-rw-r--r--patches.arch/qcom-0049-x86-efi-Defer-efi_esrt_init-until-after-memblock_x86.patch98
-rw-r--r--patches.arch/qcom-0050-x86-efi-Don-t-allocate-memmap-through-memblock-after.patch172
-rw-r--r--patches.arch/qcom-0051-x86-efi-Round-EFI-memmap-reservations-to-EFI_PAGE_SI.patch84
-rw-r--r--patches.arch/qcom-0052-ARM-efi-Apply-strict-permissions-for-UEFI-Runtime-Se.patch107
-rw-r--r--patches.arch/qcom-0053-arm64-efi-Apply-strict-permissions-to-UEFI-Runtime-S.patch105
-rw-r--r--patches.drivers/0001-crypto-acomp-add-asynchronous-compression-api.patch604
-rw-r--r--patches.drivers/0002-crypto-acomp-add-driver-side-scomp-interface.patch778
-rw-r--r--patches.drivers/FDT-Add-a-helper-to-get-the-subnode-by-given-name.patch62
-rw-r--r--patches.drivers/PCI-AER-Remove-duplicate-AER-severity-translation.patch64
-rw-r--r--patches.drivers/crypto-acomp-update-testmgr-with-support-for-acomp.patch205
-rw-r--r--patches.drivers/crypto-testmgr-Use-heap-buffer-for-acomp-test-input.patch162
-rw-r--r--patches.drivers/crypto-testmgr-don-t-use-stack-buffer-in-test_acomp.patch69
-rw-r--r--patches.drivers/crypto-testmgr-use-kmemdup-instead-of-kmalloc-memcpy.patch54
-rw-r--r--patches.drivers/firmware-dmi_scan-Always-show-system-identification-.patch44
-rw-r--r--patches.drivers/qcom-0001-dmaengine-qcom_bam_dma-move-to-qcom-directory.patch2608
-rw-r--r--patches.drivers/qcom-0002-dmaengine-hidma-Add-Device-Tree-binding.patch117
-rw-r--r--patches.drivers/qcom-0003-dmaengine-add-Qualcomm-Technologies-HIDMA-management.patch828
-rw-r--r--patches.drivers/qcom-0004-dmaengine-add-Qualcomm-Technologies-HIDMA-channel-dr.patch924
-rw-r--r--patches.drivers/qcom-0005-dmaengine-qcom_hidma-implement-lower-level-hardware-.patch1060
-rw-r--r--patches.drivers/qcom-0006-dmaengine-qcom_hidma-add-debugfs-hooks.patch311
-rw-r--r--patches.drivers/qcom-0007-dmaengine-qcom_hidma-add-support-for-object-hierarch.patch256
-rw-r--r--patches.drivers/qcom-0008-dmaengine-qcom_hidma-use-for_each_matching_node-macr.patch35
-rw-r--r--patches.drivers/qcom-0009-dmaengine-qcom_hidma_lli-kill-the-tasklets-upon-exit.patch33
-rw-r--r--patches.drivers/qcom-0010-dmaengine-qcom_hidma-kill-the-tasklets-upon-exit.patch33
-rw-r--r--patches.drivers/qcom-0011-dmaengine-qcom_hidma-fix-return-value-check-in-hidma.patch38
-rw-r--r--patches.drivers/qcom-0012-dmaengine-Add-helper-function-to-prep-for-error-repo.patch107
-rw-r--r--patches.drivers/qcom-0013-dmaengine-add-support-to-provide-error-result-from-a.patch129
-rw-r--r--patches.drivers/qcom-0014-dmaengine-qcom_hidma-convert-callback-to-helper-func.patch55
-rw-r--r--patches.drivers/qcom-0015-dmaengine-qcom_hidma-release-the-descriptor-before-t.patch75
-rw-r--r--patches.drivers/qcom-0016-dmaengine-qcom_hidma-report-transfer-errors-with-new.patch52
-rw-r--r--patches.drivers/qcom-0017-dmaengine-qcom_hidma-add-error-reporting-for-tx_stat.patch203
-rw-r--r--patches.drivers/qcom-0018-dmaengine-dmatest-exclude-compare-and-fill-time-duri.patch86
-rw-r--r--patches.drivers/qcom-0019-dma-mapping-add-map-unmap-_resource-to-dma_map_ops.patch40
-rw-r--r--patches.drivers/qcom-0020-dma-debug-add-support-for-resource-mappings.patch150
-rw-r--r--patches.drivers/qcom-0021-arm-dma-mapping-add-map-unmap-_resource-for-iommu-op.patch111
-rw-r--r--patches.drivers/qcom-0022-dma-mapping-add-dma_-map-unmap-_resource.patch108
-rw-r--r--patches.drivers/qcom-0023-dmaengine-rcar-dmac-group-slave-configuration.patch121
-rw-r--r--patches.drivers/qcom-0024-dmaengine-rcar-dmac-add-iommu-support-for-slave-tran.patch167
-rw-r--r--patches.drivers/qcom-0025-dmaengine-qcom_hidma-prevent-disable-in-error.patch50
-rw-r--r--patches.drivers/qcom-0026-dmaengine-qcom_hidma-remove-useless-debugfs-file-rem.patch60
-rw-r--r--patches.drivers/qcom-0027-of-irq-make-of_msi_configure-accessible-from-modules.patch34
-rw-r--r--patches.drivers/qcom-0028-dmaengine-qcom_hidma-configure-DMA-and-MSI-for-OF.patch41
-rw-r--r--patches.drivers/qcom-0029-dmaengine-qcom_hidma-add-a-common-API-to-setup-the-i.patch87
-rw-r--r--patches.drivers/qcom-0030-dmaengine-qcom_hidma-break-completion-processing-on-.patch42
-rw-r--r--patches.drivers/qcom-0031-dmaengine-qcom_hidma-make-pending_tre_count-atomic.patch111
-rw-r--r--patches.drivers/qcom-0032-dmaengine-qcom_hidma-bring-out-interrupt-cause.patch115
-rw-r--r--patches.drivers/qcom-0033-dmaengine-qcom_hidma-protect-common-data-structures.patch139
-rw-r--r--patches.drivers/qcom-0034-dmaengine-qcom_hidma-add-MSI-support-for-interrupts.patch291
-rw-r--r--patches.drivers/qcom-0035-dmaengine-qcom_hidma-remove-unneeded-of_node_put.patch35
-rw-r--r--patches.drivers/qcom-0036-dmaengine-qcom_hidma-hide-MSI-handler-when-unused.patch47
-rw-r--r--patches.drivers/qcom-0037-dmaengine-qcom_hidma-cleanup-sysfs-entries-during-re.patch115
-rw-r--r--patches.drivers/qcom-0038-dmaengine-qcom_hidma-autoload-while-probing-ACPI.patch48
-rw-r--r--patches.suse/arm64-efi-Don-t-truncate-frame-buffer-address-to-32-.patch65
-rw-r--r--patches.suse/arm64-efi-mark-UEFI-reserved-regions-as-MEMBLOCK_NOM.patch17
-rw-r--r--patches.suse/efi-arm64-Don-t-apply-MEMBLOCK_NOMAP-to-UEFI-memory-map-mapping.patch13
-rw-r--r--series.conf129
-rw-r--r--supported.conf4
133 files changed, 27718 insertions, 290 deletions
diff --git a/config/arm64/default b/config/arm64/default
index 7e733034ed..c3a94a447b 100644
--- a/config/arm64/default
+++ b/config/arm64/default
@@ -224,7 +224,6 @@ CONFIG_JUMP_LABEL=y
# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
-CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_DMA_CONTIGUOUS=y
CONFIG_GENERIC_SMP_IDLE_THREAD=y
CONFIG_GENERIC_IDLE_POLL_SETUP=y
@@ -5548,9 +5547,11 @@ CONFIG_DMA_BCM2835=m
# CONFIG_FSL_EDMA is not set
# CONFIG_INTEL_IDMA64 is not set
# CONFIG_PL330_DMA is not set
-CONFIG_QCOM_BAM_DMA=m
# CONFIG_TEGRA20_APB_DMA is not set
CONFIG_XGENE_DMA=m
+CONFIG_QCOM_BAM_DMA=m
+CONFIG_QCOM_HIDMA_MGMT=m
+CONFIG_QCOM_HIDMA=m
CONFIG_DW_DMAC_CORE=m
# CONFIG_DW_DMAC is not set
CONFIG_DW_DMAC_PCI=m
@@ -6494,6 +6495,7 @@ CONFIG_CRYPTO_AKCIPHER2=y
CONFIG_CRYPTO_AKCIPHER=y
CONFIG_CRYPTO_KPP2=y
CONFIG_CRYPTO_KPP=m
+CONFIG_CRYPTO_ACOMP2=y
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_MANAGER=y
diff --git a/config/arm64/vanilla b/config/arm64/vanilla
index a796bb5ac9..f4b365fa20 100644
--- a/config/arm64/vanilla
+++ b/config/arm64/vanilla
@@ -7,6 +7,7 @@ CONFIG_FB_CFB_COPYAREA=m
CONFIG_FB_CFB_FILLRECT=m
CONFIG_FB_CFB_IMAGEBLIT=m
CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_HAVE_DMA_ATTRS=y
CONFIG_LOCALVERSION="-vanilla"
CONFIG_MODULES=y
# CONFIG_MODULE_SIG is not set
diff --git a/config/ppc64le/debug b/config/ppc64le/debug
index bf2c787d53..d9a4dad1cd 100644
--- a/config/ppc64le/debug
+++ b/config/ppc64le/debug
@@ -263,7 +263,6 @@ CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
-CONFIG_HAVE_DMA_ATTRS=y
CONFIG_GENERIC_SMP_IDLE_THREAD=y
CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
CONFIG_HAVE_DMA_API_DEBUG=y
@@ -3889,6 +3888,8 @@ CONFIG_DMA_VIRTUAL_CHANNELS=m
CONFIG_DMA_OF=y
# CONFIG_FSL_EDMA is not set
CONFIG_INTEL_IDMA64=m
+# CONFIG_QCOM_HIDMA_MGMT is not set
+# CONFIG_QCOM_HIDMA is not set
CONFIG_DW_DMAC_CORE=m
# CONFIG_DW_DMAC is not set
CONFIG_DW_DMAC_PCI=m
@@ -4743,6 +4744,7 @@ CONFIG_CRYPTO_AKCIPHER2=y
CONFIG_CRYPTO_AKCIPHER=y
CONFIG_CRYPTO_KPP2=y
CONFIG_CRYPTO_KPP=m
+CONFIG_CRYPTO_ACOMP2=y
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_MANAGER=y
diff --git a/config/ppc64le/default b/config/ppc64le/default
index 7b94877acf..778a0da17a 100644
--- a/config/ppc64le/default
+++ b/config/ppc64le/default
@@ -265,7 +265,6 @@ CONFIG_HAVE_IOREMAP_PROT=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
-CONFIG_HAVE_DMA_ATTRS=y
CONFIG_GENERIC_SMP_IDLE_THREAD=y
CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
CONFIG_HAVE_DMA_API_DEBUG=y
@@ -3880,6 +3879,8 @@ CONFIG_DMA_VIRTUAL_CHANNELS=m
CONFIG_DMA_OF=y
# CONFIG_FSL_EDMA is not set
CONFIG_INTEL_IDMA64=m
+# CONFIG_QCOM_HIDMA_MGMT is not set
+# CONFIG_QCOM_HIDMA is not set
CONFIG_DW_DMAC_CORE=m
# CONFIG_DW_DMAC is not set
CONFIG_DW_DMAC_PCI=m
@@ -4729,6 +4730,7 @@ CONFIG_CRYPTO_AKCIPHER2=y
CONFIG_CRYPTO_AKCIPHER=y
CONFIG_CRYPTO_KPP2=y
CONFIG_CRYPTO_KPP=m
+CONFIG_CRYPTO_ACOMP2=y
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_MANAGER=y
diff --git a/config/ppc64le/vanilla b/config/ppc64le/vanilla
index 0cabcb9837..fcc696d39b 100644
--- a/config/ppc64le/vanilla
+++ b/config/ppc64le/vanilla
@@ -2,6 +2,7 @@ CONFIG_CRYPTO_PCOMP=m
CONFIG_CRYPTO_PCOMP2=y
CONFIG_CRYPTO_ZLIB=m
CONFIG_DM_CACHE_MQ=m
+CONFIG_HAVE_DMA_ATTRS=y
CONFIG_LOCALVERSION="-vanilla"
# CONFIG_MODULE_SIG is not set
# CONFIG_SYSTEM_DATA_VERIFICATION is not set
diff --git a/config/s390x/default b/config/s390x/default
index 586dde2453..bdc3038cf2 100644
--- a/config/s390x/default
+++ b/config/s390x/default
@@ -212,7 +212,6 @@ CONFIG_KRETPROBES=y
CONFIG_HAVE_KPROBES=y
CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
-CONFIG_HAVE_DMA_ATTRS=y
CONFIG_GENERIC_SMP_IDLE_THREAD=y
CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
CONFIG_HAVE_DMA_API_DEBUG=y
@@ -2566,6 +2565,7 @@ CONFIG_CRYPTO_AKCIPHER2=y
CONFIG_CRYPTO_AKCIPHER=y
CONFIG_CRYPTO_KPP2=y
CONFIG_CRYPTO_KPP=m
+CONFIG_CRYPTO_ACOMP2=y
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_MANAGER=y
diff --git a/config/s390x/vanilla b/config/s390x/vanilla
index abe90f7146..9cf9505f1a 100644
--- a/config/s390x/vanilla
+++ b/config/s390x/vanilla
@@ -3,6 +3,7 @@ CONFIG_CRYPTO_PCOMP2=y
CONFIG_CRYPTO_ZLIB=m
CONFIG_DM_CACHE_MQ=m
# CONFIG_DP83867_PHY is not set
+CONFIG_HAVE_DMA_ATTRS=y
CONFIG_LOCALVERSION="-vanilla"
# CONFIG_MODULE_SIG is not set
CONFIG_PCI_NR_MSI=256
diff --git a/config/x86_64/debug b/config/x86_64/debug
index d8110af3b4..6a2044f446 100644
--- a/config/x86_64/debug
+++ b/config/x86_64/debug
@@ -269,7 +269,6 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_OPTPROBES=y
CONFIG_HAVE_KPROBES_ON_FTRACE=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
-CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_DMA_CONTIGUOUS=y
CONFIG_GENERIC_SMP_IDLE_THREAD=y
CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y
@@ -5734,6 +5733,8 @@ CONFIG_DMA_VIRTUAL_CHANNELS=m
CONFIG_DMA_ACPI=y
CONFIG_INTEL_IDMA64=m
CONFIG_INTEL_IOATDMA=m
+# CONFIG_QCOM_HIDMA_MGMT is not set
+# CONFIG_QCOM_HIDMA is not set
CONFIG_DW_DMAC_CORE=m
# CONFIG_DW_DMAC is not set
CONFIG_DW_DMAC_PCI=m
@@ -6746,6 +6747,7 @@ CONFIG_CRYPTO_AKCIPHER2=y
CONFIG_CRYPTO_AKCIPHER=y
CONFIG_CRYPTO_KPP2=y
CONFIG_CRYPTO_KPP=m
+CONFIG_CRYPTO_ACOMP2=y
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_MANAGER=y
diff --git a/config/x86_64/default b/config/x86_64/default
index 37f79cc80f..4df5f985d2 100644
--- a/config/x86_64/default
+++ b/config/x86_64/default
@@ -269,7 +269,6 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_OPTPROBES=y
CONFIG_HAVE_KPROBES_ON_FTRACE=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
-CONFIG_HAVE_DMA_ATTRS=y
CONFIG_HAVE_DMA_CONTIGUOUS=y
CONFIG_GENERIC_SMP_IDLE_THREAD=y
CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y
@@ -5723,6 +5722,8 @@ CONFIG_DMA_VIRTUAL_CHANNELS=m
CONFIG_DMA_ACPI=y
CONFIG_INTEL_IDMA64=m
CONFIG_INTEL_IOATDMA=m
+# CONFIG_QCOM_HIDMA_MGMT is not set
+# CONFIG_QCOM_HIDMA is not set
CONFIG_DW_DMAC_CORE=m
# CONFIG_DW_DMAC is not set
CONFIG_DW_DMAC_PCI=m
@@ -6733,6 +6734,7 @@ CONFIG_CRYPTO_AKCIPHER2=y
CONFIG_CRYPTO_AKCIPHER=y
CONFIG_CRYPTO_KPP2=y
CONFIG_CRYPTO_KPP=m
+CONFIG_CRYPTO_ACOMP2=y
CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_MANAGER=y
diff --git a/config/x86_64/vanilla b/config/x86_64/vanilla
index d08d7b5a16..c512bffa35 100644
--- a/config/x86_64/vanilla
+++ b/config/x86_64/vanilla
@@ -5,6 +5,7 @@ CONFIG_CRYPTO_ZLIB=m
CONFIG_DM_CACHE_MQ=m
# CONFIG_DP83867_PHY is not set
CONFIG_FRAME_POINTER=y
+CONFIG_HAVE_DMA_ATTRS=y
CONFIG_LOCALVERSION="-vanilla"
# CONFIG_MODULE_SIG is not set
# CONFIG_PRESCAN_RXQ is not set
diff --git a/patches.arch/0001-dma-mapping-make-the-generic-coherent-dma-mmap-imple.patch b/patches.arch/0001-dma-mapping-make-the-generic-coherent-dma-mmap-imple.patch
new file mode 100644
index 0000000000..3e07241fb3
--- /dev/null
+++ b/patches.arch/0001-dma-mapping-make-the-generic-coherent-dma-mmap-imple.patch
@@ -0,0 +1,85 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 20 Jan 2016 15:01:22 -0800
+Subject: dma-mapping: make the generic coherent dma mmap implementation
+ optional
+Git-commit: 0d4a619b64bad7117947a84a10c17a2b8f14d252
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+This series converts all remaining architectures to use dma_map_ops and
+the generic implementation of the DMA API. This not only simplifies the
+code a lot, but also prepares for possible future changes like more
+generic non-iommu dma_ops implementations or generic per-device
+dma_map_ops.
+
+This patch (of 16):
+
+We have a couple architectures that do not want to support this code, so
+add another Kconfig symbol that disables the code similar to what we do
+for the nommu case.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Cc: Haavard Skinnemoen <hskinnemoen@gmail.com>
+Cc: Hans-Christian Egtvedt <egtvedt@samfundet.no>
+Cc: Steven Miao <realmz6@gmail.com>
+Cc: Ley Foon Tan <lftan@altera.com>
+Cc: David Howells <dhowells@redhat.com>
+Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
+Cc: Chris Metcalf <cmetcalf@ezchip.com>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Aurelien Jacquiot <a-jacquiot@ti.com>
+Cc: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: Helge Deller <deller@gmx.de>
+Cc: James Hogan <james.hogan@imgtec.com>
+Cc: Jesper Nilsson <jesper.nilsson@axis.com>
+Cc: Mark Salter <msalter@redhat.com>
+Cc: Mikael Starvik <starvik@axis.com>
+Cc: Vineet Gupta <vgupta@synopsys.com>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/Kconfig | 3 +++
+ drivers/base/dma-mapping.c | 4 ++--
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/arch/Kconfig b/arch/Kconfig
+index ba1b626..51c03ef 100644
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -632,4 +632,7 @@ config OLD_SIGACTION
+ config COMPAT_OLD_SIGACTION
+ bool
+
++config ARCH_NO_COHERENT_DMA_MMAP
++ bool
++
+ source "kernel/gcov/Kconfig"
+diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
+index d95c597..381e39d 100644
+--- a/drivers/base/dma-mapping.c
++++ b/drivers/base/dma-mapping.c
+@@ -247,7 +247,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size)
+ {
+ int ret = -ENXIO;
+-#ifdef CONFIG_MMU
++#if defined(CONFIG_MMU) && !defined(CONFIG_ARCH_NO_COHERENT_DMA_MMAP)
+ unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
+@@ -264,7 +264,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ user_count << PAGE_SHIFT,
+ vma->vm_page_prot);
+ }
+-#endif /* CONFIG_MMU */
++#endif /* CONFIG_MMU && !CONFIG_ARCH_NO_COHERENT_DMA_MMAP */
+
+ return ret;
+ }
+--
+2.10.0
+
diff --git a/patches.arch/0002-arc-convert-to-dma_map_ops.patch b/patches.arch/0002-arc-convert-to-dma_map_ops.patch
new file mode 100644
index 0000000000..ee72c76c80
--- /dev/null
+++ b/patches.arch/0002-arc-convert-to-dma_map_ops.patch
@@ -0,0 +1,442 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 20 Jan 2016 15:01:26 -0800
+Subject: arc: convert to dma_map_ops
+Git-commit: 052c96dbe33b032b949510ca724ed54d02e1255c
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+[vgupta@synopsys.com: ARC: dma mapping fixes #2]
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Cc: Vineet Gupta <vgupta@synopsys.com>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Cc: Carlos Palminha <CARLOS.PALMINHA@synopsys.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/arc/Kconfig | 1 +
+ arch/arc/include/asm/dma-mapping.h | 187 +------------------------------------
+ arch/arc/mm/dma.c | 152 ++++++++++++++++++++----------
+ 3 files changed, 110 insertions(+), 230 deletions(-)
+
+diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
+index 76dde9d..8150c27 100644
+--- a/arch/arc/Kconfig
++++ b/arch/arc/Kconfig
+@@ -38,6 +38,7 @@ config ARC
+ select OF_EARLY_FLATTREE
+ select PERF_USE_VMALLOC
+ select HAVE_DEBUG_STACKOVERFLOW
++ select HAVE_DMA_ATTRS
+
+ config TRACE_IRQFLAGS_SUPPORT
+ def_bool y
+diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
+index 2d28ba9..2a617f9 100644
+--- a/arch/arc/include/asm/dma-mapping.h
++++ b/arch/arc/include/asm/dma-mapping.h
+@@ -11,192 +11,13 @@
+ #ifndef ASM_ARC_DMA_MAPPING_H
+ #define ASM_ARC_DMA_MAPPING_H
+
+-#include <asm-generic/dma-coherent.h>
+-#include <asm/cacheflush.h>
++extern struct dma_map_ops arc_dma_ops;
+
+-void *dma_alloc_noncoherent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t gfp);
+-
+-void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+- dma_addr_t dma_handle);
+-
+-void *dma_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t gfp);
+-
+-void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
+- dma_addr_t dma_handle);
+-
+-/* drivers/base/dma-mapping.c */
+-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t dma_addr, size_t size);
+-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+- void *cpu_addr, dma_addr_t dma_addr,
+- size_t size);
+-
+-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+-
+-/*
+- * streaming DMA Mapping API...
+- * CPU accesses page via normal paddr, thus needs to explicitly made
+- * consistent before each use
+- */
+-
+-static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size,
+- enum dma_data_direction dir)
+-{
+- switch (dir) {
+- case DMA_FROM_DEVICE:
+- dma_cache_inv(paddr, size);
+- break;
+- case DMA_TO_DEVICE:
+- dma_cache_wback(paddr, size);
+- break;
+- case DMA_BIDIRECTIONAL:
+- dma_cache_wback_inv(paddr, size);
+- break;
+- default:
+- pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
+- }
+-}
+-
+-void __arc_dma_cache_sync(unsigned long paddr, size_t size,
+- enum dma_data_direction dir);
+-
+-#define _dma_cache_sync(addr, sz, dir) \
+-do { \
+- if (__builtin_constant_p(dir)) \
+- __inline_dma_cache_sync(addr, sz, dir); \
+- else \
+- __arc_dma_cache_sync(addr, sz, dir); \
+-} \
+-while (0);
+-
+-static inline dma_addr_t
+-dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+- enum dma_data_direction dir)
+-{
+- _dma_cache_sync((unsigned long)cpu_addr, size, dir);
+- return (dma_addr_t)cpu_addr;
+-}
+-
+-static inline void
+-dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+- size_t size, enum dma_data_direction dir)
+-{
+-}
+-
+-static inline dma_addr_t
+-dma_map_page(struct device *dev, struct page *page,
+- unsigned long offset, size_t size,
+- enum dma_data_direction dir)
+-{
+- unsigned long paddr = page_to_phys(page) + offset;
+- return dma_map_single(dev, (void *)paddr, size, dir);
+-}
+-
+-static inline void
+-dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+- size_t size, enum dma_data_direction dir)
+-{
+-}
+-
+-static inline int
+-dma_map_sg(struct device *dev, struct scatterlist *sg,
+- int nents, enum dma_data_direction dir)
+-{
+- struct scatterlist *s;
+- int i;
+-
+- for_each_sg(sg, s, nents, i)
+- s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+- s->length, dir);
+-
+- return nents;
+-}
+-
+-static inline void
+-dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+- int nents, enum dma_data_direction dir)
++static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+- struct scatterlist *s;
+- int i;
+-
+- for_each_sg(sg, s, nents, i)
+- dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+-}
+-
+-static inline void
+-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+- size_t size, enum dma_data_direction dir)
+-{
+- _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
+-}
+-
+-static inline void
+-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+- size_t size, enum dma_data_direction dir)
+-{
+- _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
+-}
+-
+-static inline void
+-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction)
+-{
+- _dma_cache_sync(dma_handle + offset, size, DMA_FROM_DEVICE);
+-}
+-
+-static inline void
+-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction)
+-{
+- _dma_cache_sync(dma_handle + offset, size, DMA_TO_DEVICE);
++ return &arc_dma_ops;
+ }
+
+-static inline void
+-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
+- enum dma_data_direction dir)
+-{
+- int i;
+- struct scatterlist *sg;
+-
+- for_each_sg(sglist, sg, nelems, i)
+- _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+-}
+-
+-static inline void
+-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
+- int nelems, enum dma_data_direction dir)
+-{
+- int i;
+- struct scatterlist *sg;
+-
+- for_each_sg(sglist, sg, nelems, i)
+- _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+-}
+-
+-static inline int dma_supported(struct device *dev, u64 dma_mask)
+-{
+- /* Support 32 bit DMA mask exclusively */
+- return dma_mask == DMA_BIT_MASK(32);
+-}
+-
+-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+-{
+- return 0;
+-}
+-
+-static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+-{
+- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+- return -EIO;
+-
+- *dev->dma_mask = dma_mask;
+-
+- return 0;
+-}
++#include <asm-generic/dma-mapping-common.h>
+
+ #endif
+diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
+index 29a46bb..01eaf88 100644
+--- a/arch/arc/mm/dma.c
++++ b/arch/arc/mm/dma.c
+@@ -17,18 +17,14 @@
+ */
+
+ #include <linux/dma-mapping.h>
+-#include <linux/dma-debug.h>
+-#include <linux/export.h>
+ #include <asm/cache.h>
+ #include <asm/cacheflush.h>
+
+-/*
+- * Helpers for Coherent DMA API.
+- */
+-void *dma_alloc_noncoherent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t gfp)
++
++static void *arc_dma_alloc(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ {
+- void *paddr;
++ void *paddr, *kvaddr;
+
+ /* This is linear addr (0x8000_0000 based) */
+ paddr = alloc_pages_exact(size, gfp);
+@@ -38,22 +34,6 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ /* This is bus address, platform dependent */
+ *dma_handle = (dma_addr_t)paddr;
+
+- return paddr;
+-}
+-EXPORT_SYMBOL(dma_alloc_noncoherent);
+-
+-void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+- dma_addr_t dma_handle)
+-{
+- free_pages_exact((void *)dma_handle, size);
+-}
+-EXPORT_SYMBOL(dma_free_noncoherent);
+-
+-void *dma_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t gfp)
+-{
+- void *paddr, *kvaddr;
+-
+ /*
+ * IOC relies on all data (even coherent DMA data) being in cache
+ * Thus allocate normal cached memory
+@@ -65,22 +45,15 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
+ * -For coherent data, Read/Write to buffers terminate early in cache
+ * (vs. always going to memory - thus are faster)
+ */
+- if (is_isa_arcv2() && ioc_exists)
+- return dma_alloc_noncoherent(dev, size, dma_handle, gfp);
+-
+- /* This is linear addr (0x8000_0000 based) */
+- paddr = alloc_pages_exact(size, gfp);
+- if (!paddr)
+- return NULL;
++ if ((is_isa_arcv2() && ioc_exists) ||
++ dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
++ return paddr;
+
+ /* This is kernel Virtual address (0x7000_0000 based) */
+ kvaddr = ioremap_nocache((unsigned long)paddr, size);
+ if (kvaddr == NULL)
+ return NULL;
+
+- /* This is bus address, platform dependent */
+- *dma_handle = (dma_addr_t)paddr;
+-
+ /*
+ * Evict any existing L1 and/or L2 lines for the backing page
+ * in case it was used earlier as a normal "cached" page.
+@@ -95,26 +68,111 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
+
+ return kvaddr;
+ }
+-EXPORT_SYMBOL(dma_alloc_coherent);
+
+-void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
+- dma_addr_t dma_handle)
++static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
++ dma_addr_t dma_handle, struct dma_attrs *attrs)
+ {
+- if (is_isa_arcv2() && ioc_exists)
+- return dma_free_noncoherent(dev, size, kvaddr, dma_handle);
+-
+- iounmap((void __force __iomem *)kvaddr);
++ if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) &&
++ !(is_isa_arcv2() && ioc_exists))
++ iounmap((void __force __iomem *)vaddr);
+
+ free_pages_exact((void *)dma_handle, size);
+ }
+-EXPORT_SYMBOL(dma_free_coherent);
+
+ /*
+- * Helper for streaming DMA...
++ * streaming DMA Mapping API...
++ * CPU accesses page via normal paddr, thus needs to explicitly made
++ * consistent before each use
+ */
+-void __arc_dma_cache_sync(unsigned long paddr, size_t size,
+- enum dma_data_direction dir)
++static void _dma_cache_sync(unsigned long paddr, size_t size,
++ enum dma_data_direction dir)
++{
++ switch (dir) {
++ case DMA_FROM_DEVICE:
++ dma_cache_inv(paddr, size);
++ break;
++ case DMA_TO_DEVICE:
++ dma_cache_wback(paddr, size);
++ break;
++ case DMA_BIDIRECTIONAL:
++ dma_cache_wback_inv(paddr, size);
++ break;
++ default:
++ pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
++ }
++}
++
++static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ unsigned long paddr = page_to_phys(page) + offset;
++ _dma_cache_sync(paddr, size, dir);
++ return (dma_addr_t)paddr;
++}
++
++static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
++ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
++{
++ struct scatterlist *s;
++ int i;
++
++ for_each_sg(sg, s, nents, i)
++ s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
++ s->length, dir);
++
++ return nents;
++}
++
++static void arc_dma_sync_single_for_cpu(struct device *dev,
++ dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
++{
++ _dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
++}
++
++static void arc_dma_sync_single_for_device(struct device *dev,
++ dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
+ {
+- __inline_dma_cache_sync(paddr, size, dir);
++ _dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
+ }
+-EXPORT_SYMBOL(__arc_dma_cache_sync);
++
++static void arc_dma_sync_sg_for_cpu(struct device *dev,
++ struct scatterlist *sglist, int nelems,
++ enum dma_data_direction dir)
++{
++ int i;
++ struct scatterlist *sg;
++
++ for_each_sg(sglist, sg, nelems, i)
++ _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
++}
++
++static void arc_dma_sync_sg_for_device(struct device *dev,
++ struct scatterlist *sglist, int nelems,
++ enum dma_data_direction dir)
++{
++ int i;
++ struct scatterlist *sg;
++
++ for_each_sg(sglist, sg, nelems, i)
++ _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
++}
++
++static int arc_dma_supported(struct device *dev, u64 dma_mask)
++{
++ /* Support 32 bit DMA mask exclusively */
++ return dma_mask == DMA_BIT_MASK(32);
++}
++
++struct dma_map_ops arc_dma_ops = {
++ .alloc = arc_dma_alloc,
++ .free = arc_dma_free,
++ .map_page = arc_dma_map_page,
++ .map_sg = arc_dma_map_sg,
++ .sync_single_for_device = arc_dma_sync_single_for_device,
++ .sync_single_for_cpu = arc_dma_sync_single_for_cpu,
++ .sync_sg_for_cpu = arc_dma_sync_sg_for_cpu,
++ .sync_sg_for_device = arc_dma_sync_sg_for_device,
++ .dma_supported = arc_dma_supported,
++};
++EXPORT_SYMBOL(arc_dma_ops);
+--
+2.10.0
+
diff --git a/patches.arch/0003-avr32-convert-to-dma_map_ops.patch b/patches.arch/0003-avr32-convert-to-dma_map_ops.patch
new file mode 100644
index 0000000000..23f080a6f5
--- /dev/null
+++ b/patches.arch/0003-avr32-convert-to-dma_map_ops.patch
@@ -0,0 +1,551 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 20 Jan 2016 15:01:29 -0800
+Subject: avr32: convert to dma_map_ops
+Git-commit: a34a517ac96c6910a3a0aab9513035bfbed0020c
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Cc: Haavard Skinnemoen <hskinnemoen@gmail.com>
+Cc: Hans-Christian Egtvedt <egtvedt@samfundet.no>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/avr32/Kconfig | 1 +
+ arch/avr32/include/asm/dma-mapping.h | 342 +----------------------------------
+ arch/avr32/mm/dma-coherent.c | 115 ++++++++----
+ 3 files changed, 85 insertions(+), 373 deletions(-)
+
+diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
+index b6878eb..aac3d69 100644
+--- a/arch/avr32/Kconfig
++++ b/arch/avr32/Kconfig
+@@ -7,6 +7,7 @@ config AVR32
+ select HAVE_OPROFILE
+ select HAVE_KPROBES
+ select VIRT_TO_BUS
++ select HAVE_DMA_ATTRS
+ select GENERIC_IRQ_PROBE
+ select GENERIC_ATOMIC64
+ select HARDIRQS_SW_RESEND
+diff --git a/arch/avr32/include/asm/dma-mapping.h b/arch/avr32/include/asm/dma-mapping.h
+index ae7ac92..0239ca8 100644
+--- a/arch/avr32/include/asm/dma-mapping.h
++++ b/arch/avr32/include/asm/dma-mapping.h
+@@ -1,350 +1,16 @@
+ #ifndef __ASM_AVR32_DMA_MAPPING_H
+ #define __ASM_AVR32_DMA_MAPPING_H
+
+-#include <linux/mm.h>
+-#include <linux/device.h>
+-#include <linux/scatterlist.h>
+-#include <asm/processor.h>
+-#include <asm/cacheflush.h>
+-#include <asm/io.h>
+-
+ extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ int direction);
+
+-/*
+- * Return whether the given device DMA address mask can be supported
+- * properly. For example, if your device can only drive the low 24-bits
+- * during bus mastering, then you would pass 0x00ffffff as the mask
+- * to this function.
+- */
+-static inline int dma_supported(struct device *dev, u64 mask)
+-{
+- /* Fix when needed. I really don't know of any limitations */
+- return 1;
+-}
+-
+-static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+-{
+- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+- return -EIO;
+-
+- *dev->dma_mask = dma_mask;
+- return 0;
+-}
+-
+-/*
+- * dma_map_single can't fail as it is implemented now.
+- */
+-static inline int dma_mapping_error(struct device *dev, dma_addr_t addr)
+-{
+- return 0;
+-}
+-
+-/**
+- * dma_alloc_coherent - allocate consistent memory for DMA
+- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+- * @size: required memory size
+- * @handle: bus-specific DMA address
+- *
+- * Allocate some uncached, unbuffered memory for a device for
+- * performing DMA. This function allocates pages, and will
+- * return the CPU-viewed address, and sets @handle to be the
+- * device-viewed address.
+- */
+-extern void *dma_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *handle, gfp_t gfp);
+-
+-/**
+- * dma_free_coherent - free memory allocated by dma_alloc_coherent
+- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+- * @size: size of memory originally requested in dma_alloc_coherent
+- * @cpu_addr: CPU-view address returned from dma_alloc_coherent
+- * @handle: device-view address returned from dma_alloc_coherent
+- *
+- * Free (and unmap) a DMA buffer previously allocated by
+- * dma_alloc_coherent().
+- *
+- * References to memory and mappings associated with cpu_addr/handle
+- * during and after this call executing are illegal.
+- */
+-extern void dma_free_coherent(struct device *dev, size_t size,
+- void *cpu_addr, dma_addr_t handle);
+-
+-/**
+- * dma_alloc_writecombine - allocate write-combining memory for DMA
+- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+- * @size: required memory size
+- * @handle: bus-specific DMA address
+- *
+- * Allocate some uncached, buffered memory for a device for
+- * performing DMA. This function allocates pages, and will
+- * return the CPU-viewed address, and sets @handle to be the
+- * device-viewed address.
+- */
+-extern void *dma_alloc_writecombine(struct device *dev, size_t size,
+- dma_addr_t *handle, gfp_t gfp);
+-
+-/**
+- * dma_free_coherent - free memory allocated by dma_alloc_writecombine
+- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+- * @size: size of memory originally requested in dma_alloc_writecombine
+- * @cpu_addr: CPU-view address returned from dma_alloc_writecombine
+- * @handle: device-view address returned from dma_alloc_writecombine
+- *
+- * Free (and unmap) a DMA buffer previously allocated by
+- * dma_alloc_writecombine().
+- *
+- * References to memory and mappings associated with cpu_addr/handle
+- * during and after this call executing are illegal.
+- */
+-extern void dma_free_writecombine(struct device *dev, size_t size,
+- void *cpu_addr, dma_addr_t handle);
+-
+-/**
+- * dma_map_single - map a single buffer for streaming DMA
+- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+- * @cpu_addr: CPU direct mapped address of buffer
+- * @size: size of buffer to map
+- * @dir: DMA transfer direction
+- *
+- * Ensure that any data held in the cache is appropriately discarded
+- * or written back.
+- *
+- * The device owns this memory once this call has completed. The CPU
+- * can regain ownership by calling dma_unmap_single() or dma_sync_single().
+- */
+-static inline dma_addr_t
+-dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+- enum dma_data_direction direction)
+-{
+- dma_cache_sync(dev, cpu_addr, size, direction);
+- return virt_to_bus(cpu_addr);
+-}
+-
+-/**
+- * dma_unmap_single - unmap a single buffer previously mapped
+- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+- * @handle: DMA address of buffer
+- * @size: size of buffer to map
+- * @dir: DMA transfer direction
+- *
+- * Unmap a single streaming mode DMA translation. The handle and size
+- * must match what was provided in the previous dma_map_single() call.
+- * All other usages are undefined.
+- *
+- * After this call, reads by the CPU to the buffer are guaranteed to see
+- * whatever the device wrote there.
+- */
+-static inline void
+-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+- enum dma_data_direction direction)
+-{
+-
+-}
+-
+-/**
+- * dma_map_page - map a portion of a page for streaming DMA
+- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+- * @page: page that buffer resides in
+- * @offset: offset into page for start of buffer
+- * @size: size of buffer to map
+- * @dir: DMA transfer direction
+- *
+- * Ensure that any data held in the cache is appropriately discarded
+- * or written back.
+- *
+- * The device owns this memory once this call has completed. The CPU
+- * can regain ownership by calling dma_unmap_page() or dma_sync_single().
+- */
+-static inline dma_addr_t
+-dma_map_page(struct device *dev, struct page *page,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction)
+-{
+- return dma_map_single(dev, page_address(page) + offset,
+- size, direction);
+-}
+-
+-/**
+- * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
+- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+- * @handle: DMA address of buffer
+- * @size: size of buffer to map
+- * @dir: DMA transfer direction
+- *
+- * Unmap a single streaming mode DMA translation. The handle and size
+- * must match what was provided in the previous dma_map_single() call.
+- * All other usages are undefined.
+- *
+- * After this call, reads by the CPU to the buffer are guaranteed to see
+- * whatever the device wrote there.
+- */
+-static inline void
+-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+- enum dma_data_direction direction)
+-{
+- dma_unmap_single(dev, dma_address, size, direction);
+-}
+-
+-/**
+- * dma_map_sg - map a set of SG buffers for streaming mode DMA
+- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+- * @sg: list of buffers
+- * @nents: number of buffers to map
+- * @dir: DMA transfer direction
+- *
+- * Map a set of buffers described by scatterlist in streaming
+- * mode for DMA. This is the scatter-gather version of the
+- * above pci_map_single interface. Here the scatter gather list
+- * elements are each tagged with the appropriate dma address
+- * and length. They are obtained via sg_dma_{address,length}(SG).
+- *
+- * NOTE: An implementation may be able to use a smaller number of
+- * DMA address/length pairs than there are SG table elements.
+- * (for example via virtual mapping capabilities)
+- * The routine returns the number of addr/length pairs actually
+- * used, at most nents.
+- *
+- * Device ownership issues as mentioned above for pci_map_single are
+- * the same here.
+- */
+-static inline int
+-dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
+- enum dma_data_direction direction)
+-{
+- int i;
+- struct scatterlist *sg;
+-
+- for_each_sg(sglist, sg, nents, i) {
+- char *virt;
+-
+- sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
+- virt = sg_virt(sg);
+- dma_cache_sync(dev, virt, sg->length, direction);
+- }
+-
+- return nents;
+-}
+-
+-/**
+- * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+- * @sg: list of buffers
+- * @nents: number of buffers to map
+- * @dir: DMA transfer direction
+- *
+- * Unmap a set of streaming mode DMA translations.
+- * Again, CPU read rules concerning calls here are the same as for
+- * pci_unmap_single() above.
+- */
+-static inline void
+-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+- enum dma_data_direction direction)
+-{
+-
+-}
+-
+-/**
+- * dma_sync_single_for_cpu
+- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+- * @handle: DMA address of buffer
+- * @size: size of buffer to map
+- * @dir: DMA transfer direction
+- *
+- * Make physical memory consistent for a single streaming mode DMA
+- * translation after a transfer.
+- *
+- * If you perform a dma_map_single() but wish to interrogate the
+- * buffer using the cpu, yet do not wish to teardown the DMA mapping,
+- * you must call this function before doing so. At the next point you
+- * give the DMA address back to the card, you must first perform a
+- * dma_sync_single_for_device, and then the device again owns the
+- * buffer.
+- */
+-static inline void
+-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+- size_t size, enum dma_data_direction direction)
+-{
+- /*
+- * No need to do anything since the CPU isn't supposed to
+- * touch this memory after we flushed it at mapping- or
+- * sync-for-device time.
+- */
+-}
+-
+-static inline void
+-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+- size_t size, enum dma_data_direction direction)
+-{
+- dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
+-}
+-
+-static inline void
+-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction)
+-{
+- /* just sync everything, that's all the pci API can do */
+- dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
+-}
+-
+-static inline void
+-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction)
+-{
+- /* just sync everything, that's all the pci API can do */
+- dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
+-}
++extern struct dma_map_ops avr32_dma_ops;
+
+-/**
+- * dma_sync_sg_for_cpu
+- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+- * @sg: list of buffers
+- * @nents: number of buffers to map
+- * @dir: DMA transfer direction
+- *
+- * Make physical memory consistent for a set of streaming
+- * mode DMA translations after a transfer.
+- *
+- * The same as dma_sync_single_for_* but for a scatter-gather list,
+- * same rules and usage.
+- */
+-static inline void
+-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+- int nents, enum dma_data_direction direction)
++static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+- /*
+- * No need to do anything since the CPU isn't supposed to
+- * touch this memory after we flushed it at mapping- or
+- * sync-for-device time.
+- */
++ return &avr32_dma_ops;
+ }
+
+-static inline void
+-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
+- int nents, enum dma_data_direction direction)
+-{
+- int i;
+- struct scatterlist *sg;
+-
+- for_each_sg(sglist, sg, nents, i)
+- dma_cache_sync(dev, sg_virt(sg), sg->length, direction);
+-}
+-
+-/* Now for the API extensions over the pci_ one */
+-
+-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+-
+-/* drivers/base/dma-mapping.c */
+-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t dma_addr, size_t size);
+-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+- void *cpu_addr, dma_addr_t dma_addr,
+- size_t size);
+-
+-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
++#include <asm-generic/dma-mapping-common.h>
+
+ #endif /* __ASM_AVR32_DMA_MAPPING_H */
+diff --git a/arch/avr32/mm/dma-coherent.c b/arch/avr32/mm/dma-coherent.c
+index 50cdb5b..92cf1fb 100644
+--- a/arch/avr32/mm/dma-coherent.c
++++ b/arch/avr32/mm/dma-coherent.c
+@@ -9,9 +9,14 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/gfp.h>
+ #include <linux/export.h>
++#include <linux/mm.h>
++#include <linux/device.h>
++#include <linux/scatterlist.h>
+
+-#include <asm/addrspace.h>
++#include <asm/processor.h>
+ #include <asm/cacheflush.h>
++#include <asm/io.h>
++#include <asm/addrspace.h>
+
+ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
+ {
+@@ -93,60 +98,100 @@ static void __dma_free(struct device *dev, size_t size,
+ __free_page(page++);
+ }
+
+-void *dma_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *handle, gfp_t gfp)
++static void *avr32_dma_alloc(struct device *dev, size_t size,
++ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+ {
+ struct page *page;
+- void *ret = NULL;
++ dma_addr_t phys;
+
+ page = __dma_alloc(dev, size, handle, gfp);
+- if (page)
+- ret = phys_to_uncached(page_to_phys(page));
++ if (!page)
++ return NULL;
++ phys = page_to_phys(page);
+
+- return ret;
++ if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) {
++ /* Now, map the page into P3 with write-combining turned on */
++ *handle = phys;
++ return __ioremap(phys, size, _PAGE_BUFFER);
++ } else {
++ return phys_to_uncached(phys);
++ }
+ }
+-EXPORT_SYMBOL(dma_alloc_coherent);
+
+-void dma_free_coherent(struct device *dev, size_t size,
+- void *cpu_addr, dma_addr_t handle)
++static void avr32_dma_free(struct device *dev, size_t size,
++ void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
+ {
+- void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
+ struct page *page;
+
+- pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
+- cpu_addr, (unsigned long)handle, (unsigned)size);
+- BUG_ON(!virt_addr_valid(addr));
+- page = virt_to_page(addr);
++ if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) {
++ iounmap(cpu_addr);
++
++ page = phys_to_page(handle);
++ } else {
++ void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
++
++ pr_debug("avr32_dma_free addr %p (phys %08lx) size %u\n",
++ cpu_addr, (unsigned long)handle, (unsigned)size);
++
++ BUG_ON(!virt_addr_valid(addr));
++ page = virt_to_page(addr);
++ }
++
+ __dma_free(dev, size, page, handle);
+ }
+-EXPORT_SYMBOL(dma_free_coherent);
+
+-void *dma_alloc_writecombine(struct device *dev, size_t size,
+- dma_addr_t *handle, gfp_t gfp)
++static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction, struct dma_attrs *attrs)
+ {
+- struct page *page;
+- dma_addr_t phys;
++ void *cpu_addr = page_address(page) + offset;
+
+- page = __dma_alloc(dev, size, handle, gfp);
+- if (!page)
+- return NULL;
++ dma_cache_sync(dev, cpu_addr, size, direction);
++ return virt_to_bus(cpu_addr);
++}
+
+- phys = page_to_phys(page);
+- *handle = phys;
++static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist,
++ int nents, enum dma_data_direction direction,
++ struct dma_attrs *attrs)
++{
++ int i;
++ struct scatterlist *sg;
++
++ for_each_sg(sglist, sg, nents, i) {
++ char *virt;
+
+- /* Now, map the page into P3 with write-combining turned on */
+- return __ioremap(phys, size, _PAGE_BUFFER);
++ sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
++ virt = sg_virt(sg);
++ dma_cache_sync(dev, virt, sg->length, direction);
++ }
++
++ return nents;
+ }
+-EXPORT_SYMBOL(dma_alloc_writecombine);
+
+-void dma_free_writecombine(struct device *dev, size_t size,
+- void *cpu_addr, dma_addr_t handle)
++static void avr32_dma_sync_single_for_device(struct device *dev,
++ dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction)
+ {
+- struct page *page;
++ dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
++}
+
+- iounmap(cpu_addr);
++static void avr32_dma_sync_sg_for_device(struct device *dev,
++ struct scatterlist *sglist, int nents,
++ enum dma_data_direction direction)
++{
++ int i;
++ struct scatterlist *sg;
+
+- page = phys_to_page(handle);
+- __dma_free(dev, size, page, handle);
++ for_each_sg(sglist, sg, nents, i)
++ dma_cache_sync(dev, sg_virt(sg), sg->length, direction);
+ }
+-EXPORT_SYMBOL(dma_free_writecombine);
++
++struct dma_map_ops avr32_dma_ops = {
++ .alloc = avr32_dma_alloc,
++ .free = avr32_dma_free,
++ .map_page = avr32_dma_map_page,
++ .map_sg = avr32_dma_map_sg,
++ .sync_single_for_device = avr32_dma_sync_single_for_device,
++ .sync_sg_for_device = avr32_dma_sync_sg_for_device,
++};
++EXPORT_SYMBOL(avr32_dma_ops);
+--
+2.10.0
+
diff --git a/patches.arch/0004-blackfin-convert-to-dma_map_ops.patch b/patches.arch/0004-blackfin-convert-to-dma_map_ops.patch
new file mode 100644
index 0000000000..6b2c6eac60
--- /dev/null
+++ b/patches.arch/0004-blackfin-convert-to-dma_map_ops.patch
@@ -0,0 +1,277 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 20 Jan 2016 15:01:32 -0800
+Subject: blackfin: convert to dma_map_ops
+Git-commit: 6f62097583e799040d6d18909b670b1e4dbb614d
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Cc: Steven Miao <realmz6@gmail.com>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/blackfin/Kconfig | 1 +
+ arch/blackfin/include/asm/dma-mapping.h | 127 +-------------------------------
+ arch/blackfin/kernel/dma-mapping.c | 52 +++++++++----
+ 3 files changed, 43 insertions(+), 137 deletions(-)
+
+diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
+index af76634..4be2f90 100644
+--- a/arch/blackfin/Kconfig
++++ b/arch/blackfin/Kconfig
+@@ -14,6 +14,7 @@ config BLACKFIN
+ def_bool y
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_TRACEHOOK
++ select HAVE_DMA_ATTRS
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FUNCTION_GRAPH_TRACER
+diff --git a/arch/blackfin/include/asm/dma-mapping.h b/arch/blackfin/include/asm/dma-mapping.h
+index 054d9ec..ea5a2e8 100644
+--- a/arch/blackfin/include/asm/dma-mapping.h
++++ b/arch/blackfin/include/asm/dma-mapping.h
+@@ -8,36 +8,6 @@
+ #define _BLACKFIN_DMA_MAPPING_H
+
+ #include <asm/cacheflush.h>
+-struct scatterlist;
+-
+-void *dma_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t gfp);
+-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+- dma_addr_t dma_handle);
+-
+-/*
+- * Now for the API extensions over the pci_ one
+- */
+-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+-#define dma_supported(d, m) (1)
+-
+-static inline int
+-dma_set_mask(struct device *dev, u64 dma_mask)
+-{
+- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+- return -EIO;
+-
+- *dev->dma_mask = dma_mask;
+-
+- return 0;
+-}
+-
+-static inline int
+-dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+-{
+- return 0;
+-}
+
+ extern void
+ __dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir);
+@@ -66,102 +36,13 @@ _dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir)
+ __dma_sync(addr, size, dir);
+ }
+
+-static inline dma_addr_t
+-dma_map_single(struct device *dev, void *ptr, size_t size,
+- enum dma_data_direction dir)
+-{
+- _dma_sync((dma_addr_t)ptr, size, dir);
+- return (dma_addr_t) ptr;
+-}
+-
+-static inline dma_addr_t
+-dma_map_page(struct device *dev, struct page *page,
+- unsigned long offset, size_t size,
+- enum dma_data_direction dir)
+-{
+- return dma_map_single(dev, page_address(page) + offset, size, dir);
+-}
+-
+-static inline void
+-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+- enum dma_data_direction dir)
+-{
+- BUG_ON(!valid_dma_direction(dir));
+-}
+-
+-static inline void
+-dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
+- enum dma_data_direction dir)
+-{
+- dma_unmap_single(dev, dma_addr, size, dir);
+-}
+-
+-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+- enum dma_data_direction dir);
+-
+-static inline void
+-dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+- int nhwentries, enum dma_data_direction dir)
+-{
+- BUG_ON(!valid_dma_direction(dir));
+-}
+-
+-static inline void
+-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
+- unsigned long offset, size_t size,
+- enum dma_data_direction dir)
+-{
+- BUG_ON(!valid_dma_direction(dir));
+-}
+-
+-static inline void
+-dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
+- unsigned long offset, size_t size,
+- enum dma_data_direction dir)
+-{
+- _dma_sync(handle + offset, size, dir);
+-}
++extern struct dma_map_ops bfin_dma_ops;
+
+-static inline void
+-dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
+- enum dma_data_direction dir)
++static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+- dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
++ return &bfin_dma_ops;
+ }
+
+-static inline void
+-dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
+- enum dma_data_direction dir)
+-{
+- dma_sync_single_range_for_device(dev, handle, 0, size, dir);
+-}
+-
+-static inline void
+-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
+- enum dma_data_direction dir)
+-{
+- BUG_ON(!valid_dma_direction(dir));
+-}
+-
+-extern void
+-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+- int nents, enum dma_data_direction dir);
+-
+-static inline void
+-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+- enum dma_data_direction dir)
+-{
+- _dma_sync((dma_addr_t)vaddr, size, dir);
+-}
+-
+-/* drivers/base/dma-mapping.c */
+-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t dma_addr, size_t size);
+-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+- void *cpu_addr, dma_addr_t dma_addr,
+- size_t size);
+-
+-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
++#include <asm-generic/dma-mapping-common.h>
+
+ #endif /* _BLACKFIN_DMA_MAPPING_H */
+diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
+index df437e5..771afe6 100644
+--- a/arch/blackfin/kernel/dma-mapping.c
++++ b/arch/blackfin/kernel/dma-mapping.c
+@@ -78,8 +78,8 @@ static void __free_dma_pages(unsigned long addr, unsigned int pages)
+ spin_unlock_irqrestore(&dma_page_lock, flags);
+ }
+
+-void *dma_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t gfp)
++static void *bfin_dma_alloc(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ {
+ void *ret;
+
+@@ -92,15 +92,12 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
+
+ return ret;
+ }
+-EXPORT_SYMBOL(dma_alloc_coherent);
+
+-void
+-dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+- dma_addr_t dma_handle)
++static void bfin_dma_free(struct device *dev, size_t size, void *vaddr,
++ dma_addr_t dma_handle, struct dma_attrs *attrs)
+ {
+ __free_dma_pages((unsigned long)vaddr, get_pages(size));
+ }
+-EXPORT_SYMBOL(dma_free_coherent);
+
+ /*
+ * Streaming DMA mappings
+@@ -112,9 +109,9 @@ void __dma_sync(dma_addr_t addr, size_t size,
+ }
+ EXPORT_SYMBOL(__dma_sync);
+
+-int
+-dma_map_sg(struct device *dev, struct scatterlist *sg_list, int nents,
+- enum dma_data_direction direction)
++static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list,
++ int nents, enum dma_data_direction direction,
++ struct dma_attrs *attrs)
+ {
+ struct scatterlist *sg;
+ int i;
+@@ -126,10 +123,10 @@ dma_map_sg(struct device *dev, struct scatterlist *sg_list, int nents,
+
+ return nents;
+ }
+-EXPORT_SYMBOL(dma_map_sg);
+
+-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg_list,
+- int nelems, enum dma_data_direction direction)
++static void bfin_dma_sync_sg_for_device(struct device *dev,
++ struct scatterlist *sg_list, int nelems,
++ enum dma_data_direction direction)
+ {
+ struct scatterlist *sg;
+ int i;
+@@ -139,4 +136,31 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg_list,
+ __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
+ }
+ }
+-EXPORT_SYMBOL(dma_sync_sg_for_device);
++
++static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ dma_addr_t handle = (dma_addr_t)(page_address(page) + offset);
++
++ _dma_sync(handle, size, dir);
++ return handle;
++}
++
++static inline void bfin_dma_sync_single_for_device(struct device *dev,
++ dma_addr_t handle, size_t size, enum dma_data_direction dir)
++{
++ _dma_sync(handle, size, dir);
++}
++
++struct dma_map_ops bfin_dma_ops = {
++ .alloc = bfin_dma_alloc,
++ .free = bfin_dma_free,
++
++ .map_page = bfin_dma_map_page,
++ .map_sg = bfin_dma_map_sg,
++
++ .sync_single_for_device = bfin_dma_sync_single_for_device,
++ .sync_sg_for_device = bfin_dma_sync_sg_for_device,
++};
++EXPORT_SYMBOL(bfin_dma_ops);
+--
+2.10.0
+
diff --git a/patches.arch/0005-c6x-convert-to-dma_map_ops.patch b/patches.arch/0005-c6x-convert-to-dma_map_ops.patch
new file mode 100644
index 0000000000..d69d0c0f92
--- /dev/null
+++ b/patches.arch/0005-c6x-convert-to-dma_map_ops.patch
@@ -0,0 +1,356 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 20 Jan 2016 15:01:35 -0800
+Subject: c6x: convert to dma_map_ops
+Git-commit: 4605f04b2893fb5498b31c54e8f21da2fc4cc736
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+[dan.carpenter@oracle.com: C6X: fix build breakage]
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Cc: Mark Salter <msalter@redhat.com>
+Cc: Aurelien Jacquiot <a-jacquiot@ti.com>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/c6x/Kconfig | 2 +
+ arch/c6x/include/asm/dma-mapping.h | 98 ++++----------------------------------
+ arch/c6x/kernel/dma.c | 95 +++++++++++++++++-------------------
+ arch/c6x/mm/dma-coherent.c | 10 ++--
+ 4 files changed, 58 insertions(+), 147 deletions(-)
+
+diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
+index 77ea09b..8602f72 100644
+--- a/arch/c6x/Kconfig
++++ b/arch/c6x/Kconfig
+@@ -17,6 +17,8 @@ config C6X
+ select OF_EARLY_FLATTREE
+ select GENERIC_CLOCKEVENTS
+ select MODULES_USE_ELF_RELA
++ select ARCH_NO_COHERENT_DMA_MMAP
++ select HAVE_DMA_ATTRS
+
+ config MMU
+ def_bool n
+diff --git a/arch/c6x/include/asm/dma-mapping.h b/arch/c6x/include/asm/dma-mapping.h
+index bbd7774..f881e42 100644
+--- a/arch/c6x/include/asm/dma-mapping.h
++++ b/arch/c6x/include/asm/dma-mapping.h
+@@ -12,104 +12,24 @@
+ #ifndef _ASM_C6X_DMA_MAPPING_H
+ #define _ASM_C6X_DMA_MAPPING_H
+
+-#include <linux/dma-debug.h>
+-#include <asm-generic/dma-coherent.h>
+-
+-#define dma_supported(d, m) 1
+-
+-static inline void dma_sync_single_range_for_device(struct device *dev,
+- dma_addr_t addr,
+- unsigned long offset,
+- size_t size,
+- enum dma_data_direction dir)
+-{
+-}
+-
+-static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+-{
+- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+- return -EIO;
+-
+- *dev->dma_mask = dma_mask;
+-
+- return 0;
+-}
+-
+ /*
+ * DMA errors are defined by all-bits-set in the DMA address.
+ */
+-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+-{
+- debug_dma_mapping_error(dev, dma_addr);
+- return dma_addr == ~0;
+-}
+-
+-extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+- size_t size, enum dma_data_direction dir);
+-
+-extern void dma_unmap_single(struct device *dev, dma_addr_t handle,
+- size_t size, enum dma_data_direction dir);
+-
+-extern int dma_map_sg(struct device *dev, struct scatterlist *sglist,
+- int nents, enum dma_data_direction direction);
+-
+-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+- int nents, enum dma_data_direction direction);
++#define DMA_ERROR_CODE ~0
+
+-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+- unsigned long offset, size_t size,
+- enum dma_data_direction dir)
+-{
+- dma_addr_t handle;
+-
+- handle = dma_map_single(dev, page_address(page) + offset, size, dir);
+-
+- debug_dma_map_page(dev, page, offset, size, dir, handle, false);
+-
+- return handle;
+-}
++extern struct dma_map_ops c6x_dma_ops;
+
+-static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
+- size_t size, enum dma_data_direction dir)
++static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+- dma_unmap_single(dev, handle, size, dir);
+-
+- debug_dma_unmap_page(dev, handle, size, dir, false);
++ return &c6x_dma_ops;
+ }
+
+-extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
+- size_t size, enum dma_data_direction dir);
+-
+-extern void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
+- size_t size,
+- enum dma_data_direction dir);
+-
+-extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+- int nents, enum dma_data_direction dir);
+-
+-extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+- int nents, enum dma_data_direction dir);
++#include <asm-generic/dma-mapping-common.h>
+
+ extern void coherent_mem_init(u32 start, u32 size);
+-extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
+-extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
+-
+-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
+-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
+-
+-/* Not supported for now */
+-static inline int dma_mmap_coherent(struct device *dev,
+- struct vm_area_struct *vma, void *cpu_addr,
+- dma_addr_t dma_addr, size_t size)
+-{
+- return -EINVAL;
+-}
+-
+-static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+- void *cpu_addr, dma_addr_t dma_addr,
+- size_t size)
+-{
+- return -EINVAL;
+-}
++void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
++ gfp_t gfp, struct dma_attrs *attrs);
++void c6x_dma_free(struct device *dev, size_t size, void *vaddr,
++ dma_addr_t dma_handle, struct dma_attrs *attrs);
+
+ #endif /* _ASM_C6X_DMA_MAPPING_H */
+diff --git a/arch/c6x/kernel/dma.c b/arch/c6x/kernel/dma.c
+index ab7b12d..8a80f3a 100644
+--- a/arch/c6x/kernel/dma.c
++++ b/arch/c6x/kernel/dma.c
+@@ -36,110 +36,101 @@ static void c6x_dma_sync(dma_addr_t handle, size_t size,
+ }
+ }
+
+-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
+- enum dma_data_direction dir)
++static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
+ {
+- dma_addr_t addr = virt_to_phys(ptr);
++ dma_addr_t handle = virt_to_phys(page_address(page) + offset);
+
+- c6x_dma_sync(addr, size, dir);
+-
+- debug_dma_map_page(dev, virt_to_page(ptr),
+- (unsigned long)ptr & ~PAGE_MASK, size,
+- dir, addr, true);
+- return addr;
++ c6x_dma_sync(handle, size, dir);
++ return handle;
+ }
+-EXPORT_SYMBOL(dma_map_single);
+-
+
+-void dma_unmap_single(struct device *dev, dma_addr_t handle,
+- size_t size, enum dma_data_direction dir)
++static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle,
++ size_t size, enum dma_data_direction dir, struct dma_attrs *attrs)
+ {
+ c6x_dma_sync(handle, size, dir);
+-
+- debug_dma_unmap_page(dev, handle, size, dir, true);
+ }
+-EXPORT_SYMBOL(dma_unmap_single);
+-
+
+-int dma_map_sg(struct device *dev, struct scatterlist *sglist,
+- int nents, enum dma_data_direction dir)
++static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist,
++ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+ {
+ struct scatterlist *sg;
+ int i;
+
+- for_each_sg(sglist, sg, nents, i)
+- sg->dma_address = dma_map_single(dev, sg_virt(sg), sg->length,
+- dir);
+-
+- debug_dma_map_sg(dev, sglist, nents, nents, dir);
++ for_each_sg(sglist, sg, nents, i) {
++ sg->dma_address = sg_phys(sg);
++ c6x_dma_sync(sg->dma_address, sg->length, dir);
++ }
+
+ return nents;
+ }
+-EXPORT_SYMBOL(dma_map_sg);
+-
+
+-void dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
+- int nents, enum dma_data_direction dir)
++static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
++ int nents, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
+ {
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i)
+- dma_unmap_single(dev, sg_dma_address(sg), sg->length, dir);
++ c6x_dma_sync(sg_dma_address(sg), sg->length, dir);
+
+- debug_dma_unmap_sg(dev, sglist, nents, dir);
+ }
+-EXPORT_SYMBOL(dma_unmap_sg);
+
+-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
+- size_t size, enum dma_data_direction dir)
++static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
++ size_t size, enum dma_data_direction dir)
+ {
+ c6x_dma_sync(handle, size, dir);
+
+- debug_dma_sync_single_for_cpu(dev, handle, size, dir);
+ }
+-EXPORT_SYMBOL(dma_sync_single_for_cpu);
+
+-
+-void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
+- size_t size, enum dma_data_direction dir)
++static void c6x_dma_sync_single_for_device(struct device *dev,
++ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+ {
+ c6x_dma_sync(handle, size, dir);
+
+- debug_dma_sync_single_for_device(dev, handle, size, dir);
+ }
+-EXPORT_SYMBOL(dma_sync_single_for_device);
+-
+
+-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist,
+- int nents, enum dma_data_direction dir)
++static void c6x_dma_sync_sg_for_cpu(struct device *dev,
++ struct scatterlist *sglist, int nents,
++ enum dma_data_direction dir)
+ {
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i)
+- dma_sync_single_for_cpu(dev, sg_dma_address(sg),
++ c6x_dma_sync_single_for_cpu(dev, sg_dma_address(sg),
+ sg->length, dir);
+
+- debug_dma_sync_sg_for_cpu(dev, sglist, nents, dir);
+ }
+-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
+-
+
+-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
+- int nents, enum dma_data_direction dir)
++static void c6x_dma_sync_sg_for_device(struct device *dev,
++ struct scatterlist *sglist, int nents,
++ enum dma_data_direction dir)
+ {
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nents, i)
+- dma_sync_single_for_device(dev, sg_dma_address(sg),
++ c6x_dma_sync_single_for_device(dev, sg_dma_address(sg),
+ sg->length, dir);
+
+- debug_dma_sync_sg_for_device(dev, sglist, nents, dir);
+ }
+-EXPORT_SYMBOL(dma_sync_sg_for_device);
+
++struct dma_map_ops c6x_dma_ops = {
++ .alloc = c6x_dma_alloc,
++ .free = c6x_dma_free,
++ .map_page = c6x_dma_map_page,
++ .unmap_page = c6x_dma_unmap_page,
++ .map_sg = c6x_dma_map_sg,
++ .unmap_sg = c6x_dma_unmap_sg,
++ .sync_single_for_device = c6x_dma_sync_single_for_device,
++ .sync_single_for_cpu = c6x_dma_sync_single_for_cpu,
++ .sync_sg_for_device = c6x_dma_sync_sg_for_device,
++ .sync_sg_for_cpu = c6x_dma_sync_sg_for_cpu,
++};
++EXPORT_SYMBOL(c6x_dma_ops);
+
+ /* Number of entries preallocated for DMA-API debugging */
+ #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
+diff --git a/arch/c6x/mm/dma-coherent.c b/arch/c6x/mm/dma-coherent.c
+index 4187e51..f7ee63a 100644
+--- a/arch/c6x/mm/dma-coherent.c
++++ b/arch/c6x/mm/dma-coherent.c
+@@ -73,8 +73,8 @@ static void __free_dma_pages(u32 addr, int order)
+ * Allocate DMA coherent memory space and return both the kernel
+ * virtual and DMA address for that space.
+ */
+-void *dma_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *handle, gfp_t gfp)
++void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
++ gfp_t gfp, struct dma_attrs *attrs)
+ {
+ u32 paddr;
+ int order;
+@@ -94,13 +94,12 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
+
+ return phys_to_virt(paddr);
+ }
+-EXPORT_SYMBOL(dma_alloc_coherent);
+
+ /*
+ * Free DMA coherent memory as defined by the above mapping.
+ */
+-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+- dma_addr_t dma_handle)
++void c6x_dma_free(struct device *dev, size_t size, void *vaddr,
++ dma_addr_t dma_handle, struct dma_attrs *attrs)
+ {
+ int order;
+
+@@ -111,7 +110,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+
+ __free_dma_pages(virt_to_phys(vaddr), order);
+ }
+-EXPORT_SYMBOL(dma_free_coherent);
+
+ /*
+ * Initialise the coherent DMA memory allocator using the given uncached region.
+--
+2.10.0
+
diff --git a/patches.arch/0006-cris-convert-to-dma_map_ops.patch b/patches.arch/0006-cris-convert-to-dma_map_ops.patch
new file mode 100644
index 0000000000..053f5b15a4
--- /dev/null
+++ b/patches.arch/0006-cris-convert-to-dma_map_ops.patch
@@ -0,0 +1,303 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 20 Jan 2016 15:01:38 -0800
+Subject: cris: convert to dma_map_ops
+Git-commit: e20dd88995dffe262934f355b3e96daa2458b331
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Cc: Mikael Starvik <starvik@axis.com>
+Cc: Jesper Nilsson <jesper.nilsson@axis.com>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/cris/Kconfig | 1 +
+ arch/cris/arch-v32/drivers/pci/dma.c | 54 +++++++++---
+ arch/cris/include/asm/dma-mapping.h | 161 ++---------------------------------
+ 3 files changed, 51 insertions(+), 165 deletions(-)
+
+diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
+index e086f9e..20d919c 100644
+--- a/arch/cris/Kconfig
++++ b/arch/cris/Kconfig
+@@ -54,6 +54,7 @@ config CRIS
+ select GENERIC_ATOMIC64
+ select HAVE_UID16
+ select VIRT_TO_BUS
++ select HAVE_DMA_ATTRS
+ select ARCH_WANT_IPC_PARSE_VERSION
+ select GENERIC_IRQ_SHOW
+ select GENERIC_IOMAP
+diff --git a/arch/cris/arch-v32/drivers/pci/dma.c b/arch/cris/arch-v32/drivers/pci/dma.c
+index ee55578..8d5efa5 100644
+--- a/arch/cris/arch-v32/drivers/pci/dma.c
++++ b/arch/cris/arch-v32/drivers/pci/dma.c
+@@ -16,21 +16,18 @@
+ #include <linux/gfp.h>
+ #include <asm/io.h>
+
+-void *dma_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t gfp)
++static void *v32_dma_alloc(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ {
+ void *ret;
+- int order = get_order(size);
++
+ /* ignore region specifiers */
+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+
+- if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
+- return ret;
+-
+ if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
+ gfp |= GFP_DMA;
+
+- ret = (void *)__get_free_pages(gfp, order);
++ ret = (void *)__get_free_pages(gfp, get_order(size));
+
+ if (ret != NULL) {
+ memset(ret, 0, size);
+@@ -39,12 +36,45 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
+ return ret;
+ }
+
+-void dma_free_coherent(struct device *dev, size_t size,
+- void *vaddr, dma_addr_t dma_handle)
++static void v32_dma_free(struct device *dev, size_t size, void *vaddr,
++ dma_addr_t dma_handle, struct dma_attrs *attrs)
++{
++ free_pages((unsigned long)vaddr, get_order(size));
++}
++
++static inline dma_addr_t v32_dma_map_page(struct device *dev,
++ struct page *page, unsigned long offset, size_t size,
++ enum dma_data_direction direction,
++ struct dma_attrs *attrs)
+ {
+- int order = get_order(size);
++ return page_to_phys(page) + offset;
++}
+
+- if (!dma_release_from_coherent(dev, order, vaddr))
+- free_pages((unsigned long)vaddr, order);
++static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg,
++ int nents, enum dma_data_direction direction,
++ struct dma_attrs *attrs)
++{
++ printk("Map sg\n");
++ return nents;
++}
++
++static inline int v32_dma_supported(struct device *dev, u64 mask)
++{
++ /*
++ * we fall back to GFP_DMA when the mask isn't all 1s,
++ * so we can't guarantee allocations that must be
++ * within a tighter range than GFP_DMA..
++ */
++ if (mask < 0x00ffffff)
++ return 0;
++ return 1;
+ }
+
++struct dma_map_ops v32_dma_ops = {
++ .alloc = v32_dma_alloc,
++ .free = v32_dma_free,
++ .map_page = v32_dma_map_page,
++ .map_sg = v32_dma_map_sg,
++ .dma_supported = v32_dma_supported,
++};
++EXPORT_SYMBOL(v32_dma_ops);
+diff --git a/arch/cris/include/asm/dma-mapping.h b/arch/cris/include/asm/dma-mapping.h
+index 57f794e..34e7c7c7 100644
+--- a/arch/cris/include/asm/dma-mapping.h
++++ b/arch/cris/include/asm/dma-mapping.h
+@@ -1,156 +1,22 @@
+-/* DMA mapping. Nothing tricky here, just virt_to_phys */
+-
+ #ifndef _ASM_CRIS_DMA_MAPPING_H
+ #define _ASM_CRIS_DMA_MAPPING_H
+
+-#include <linux/mm.h>
+-#include <linux/kernel.h>
+-#include <linux/scatterlist.h>
+-
+-#include <asm/cache.h>
+-#include <asm/io.h>
+-
+-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+-
+ #ifdef CONFIG_PCI
+-#include <asm-generic/dma-coherent.h>
+-
+-void *dma_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t flag);
++extern struct dma_map_ops v32_dma_ops;
+
+-void dma_free_coherent(struct device *dev, size_t size,
+- void *vaddr, dma_addr_t dma_handle);
+-#else
+-static inline void *
+-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+- gfp_t flag)
++static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+- BUG();
+- return NULL;
++ return &v32_dma_ops;
+ }
+-
+-static inline void
+-dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+- dma_addr_t dma_handle)
++#else
++static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+- BUG();
++ BUG();
++ return NULL;
+ }
+ #endif
+-static inline dma_addr_t
+-dma_map_single(struct device *dev, void *ptr, size_t size,
+- enum dma_data_direction direction)
+-{
+- BUG_ON(direction == DMA_NONE);
+- return virt_to_phys(ptr);
+-}
+-
+-static inline void
+-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+- enum dma_data_direction direction)
+-{
+- BUG_ON(direction == DMA_NONE);
+-}
+-
+-static inline int
+-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+- enum dma_data_direction direction)
+-{
+- printk("Map sg\n");
+- return nents;
+-}
+-
+-static inline dma_addr_t
+-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+- size_t size, enum dma_data_direction direction)
+-{
+- BUG_ON(direction == DMA_NONE);
+- return page_to_phys(page) + offset;
+-}
+-
+-static inline void
+-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+- enum dma_data_direction direction)
+-{
+- BUG_ON(direction == DMA_NONE);
+-}
+-
+-
+-static inline void
+-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+- enum dma_data_direction direction)
+-{
+- BUG_ON(direction == DMA_NONE);
+-}
+-
+-static inline void
+-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+- enum dma_data_direction direction)
+-{
+-}
+-
+-static inline void
+-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
+- enum dma_data_direction direction)
+-{
+-}
+-
+-static inline void
+-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction)
+-{
+-}
+
+-static inline void
+-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction)
+-{
+-}
+-
+-static inline void
+-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+- enum dma_data_direction direction)
+-{
+-}
+-
+-static inline void
+-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+- enum dma_data_direction direction)
+-{
+-}
+-
+-static inline int
+-dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+-{
+- return 0;
+-}
+-
+-static inline int
+-dma_supported(struct device *dev, u64 mask)
+-{
+- /*
+- * we fall back to GFP_DMA when the mask isn't all 1s,
+- * so we can't guarantee allocations that must be
+- * within a tighter range than GFP_DMA..
+- */
+- if(mask < 0x00ffffff)
+- return 0;
+-
+- return 1;
+-}
+-
+-static inline int
+-dma_set_mask(struct device *dev, u64 mask)
+-{
+- if(!dev->dma_mask || !dma_supported(dev, mask))
+- return -EIO;
+-
+- *dev->dma_mask = mask;
+-
+- return 0;
+-}
++#include <asm-generic/dma-mapping-common.h>
+
+ static inline void
+ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+@@ -158,15 +24,4 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ {
+ }
+
+-/* drivers/base/dma-mapping.c */
+-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t dma_addr, size_t size);
+-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+- void *cpu_addr, dma_addr_t dma_addr,
+- size_t size);
+-
+-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+-
+-
+ #endif
+--
+2.10.0
+
diff --git a/patches.arch/0007-nios2-convert-to-dma_map_ops.patch b/patches.arch/0007-nios2-convert-to-dma_map_ops.patch
new file mode 100644
index 0000000000..3f1ac6beed
--- /dev/null
+++ b/patches.arch/0007-nios2-convert-to-dma_map_ops.patch
@@ -0,0 +1,408 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 20 Jan 2016 15:01:41 -0800
+Subject: nios2: convert to dma_map_ops
+Git-commit: 5a1a67f1d7fef42eaa5a4cc3d48094fbec75d685
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Cc: Ley Foon Tan <lftan@altera.com>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/nios2/Kconfig | 1 +
+ arch/nios2/include/asm/dma-mapping.h | 123 ++---------------------------
+ arch/nios2/mm/dma-mapping.c | 149 +++++++++++++++++++----------------
+ 3 files changed, 87 insertions(+), 186 deletions(-)
+
+diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig
+index 4375554..4b2504d 100644
+--- a/arch/nios2/Kconfig
++++ b/arch/nios2/Kconfig
+@@ -16,6 +16,7 @@ config NIOS2
+ select SOC_BUS
+ select SPARSE_IRQ
+ select USB_ARCH_HAS_HCD if USB_SUPPORT
++ select HAVE_DMA_ATTRS
+
+ config GENERIC_CSUM
+ def_bool y
+diff --git a/arch/nios2/include/asm/dma-mapping.h b/arch/nios2/include/asm/dma-mapping.h
+index b556723..bec8ac8 100644
+--- a/arch/nios2/include/asm/dma-mapping.h
++++ b/arch/nios2/include/asm/dma-mapping.h
+@@ -10,131 +10,20 @@
+ #ifndef _ASM_NIOS2_DMA_MAPPING_H
+ #define _ASM_NIOS2_DMA_MAPPING_H
+
+-#include <linux/scatterlist.h>
+-#include <linux/cache.h>
+-#include <asm/cacheflush.h>
++extern struct dma_map_ops nios2_dma_ops;
+
+-static inline void __dma_sync_for_device(void *vaddr, size_t size,
+- enum dma_data_direction direction)
++static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+- switch (direction) {
+- case DMA_FROM_DEVICE:
+- invalidate_dcache_range((unsigned long)vaddr,
+- (unsigned long)(vaddr + size));
+- break;
+- case DMA_TO_DEVICE:
+- /*
+- * We just need to flush the caches here , but Nios2 flush
+- * instruction will do both writeback and invalidate.
+- */
+- case DMA_BIDIRECTIONAL: /* flush and invalidate */
+- flush_dcache_range((unsigned long)vaddr,
+- (unsigned long)(vaddr + size));
+- break;
+- default:
+- BUG();
+- }
+-}
+-
+-static inline void __dma_sync_for_cpu(void *vaddr, size_t size,
+- enum dma_data_direction direction)
+-{
+- switch (direction) {
+- case DMA_BIDIRECTIONAL:
+- case DMA_FROM_DEVICE:
+- invalidate_dcache_range((unsigned long)vaddr,
+- (unsigned long)(vaddr + size));
+- break;
+- case DMA_TO_DEVICE:
+- break;
+- default:
+- BUG();
+- }
+-}
+-
+-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+-
+-void *dma_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t flag);
+-
+-void dma_free_coherent(struct device *dev, size_t size,
+- void *vaddr, dma_addr_t dma_handle);
+-
+-static inline dma_addr_t dma_map_single(struct device *dev, void *ptr,
+- size_t size,
+- enum dma_data_direction direction)
+-{
+- BUG_ON(!valid_dma_direction(direction));
+- __dma_sync_for_device(ptr, size, direction);
+- return virt_to_phys(ptr);
+-}
+-
+-static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+- size_t size, enum dma_data_direction direction)
+-{
+-}
+-
+-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+- enum dma_data_direction direction);
+-extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
+- unsigned long offset, size_t size, enum dma_data_direction direction);
+-extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
+- size_t size, enum dma_data_direction direction);
+-extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+- int nhwentries, enum dma_data_direction direction);
+-extern void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+- size_t size, enum dma_data_direction direction);
+-extern void dma_sync_single_for_device(struct device *dev,
+- dma_addr_t dma_handle, size_t size, enum dma_data_direction direction);
+-extern void dma_sync_single_range_for_cpu(struct device *dev,
+- dma_addr_t dma_handle, unsigned long offset, size_t size,
+- enum dma_data_direction direction);
+-extern void dma_sync_single_range_for_device(struct device *dev,
+- dma_addr_t dma_handle, unsigned long offset, size_t size,
+- enum dma_data_direction direction);
+-extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+- int nelems, enum dma_data_direction direction);
+-extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+- int nelems, enum dma_data_direction direction);
+-
+-static inline int dma_supported(struct device *dev, u64 mask)
+-{
+- return 1;
+-}
+-
+-static inline int dma_set_mask(struct device *dev, u64 mask)
+-{
+- if (!dev->dma_mask || !dma_supported(dev, mask))
+- return -EIO;
+-
+- *dev->dma_mask = mask;
+-
+- return 0;
+-}
+-
+-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+-{
+- return 0;
++ return &nios2_dma_ops;
+ }
+
+ /*
+-* dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to
+-* do any flushing here.
+-*/
++ * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to
++ * do any flushing here.
++ */
+ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+ {
+ }
+
+-/* drivers/base/dma-mapping.c */
+-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t dma_addr, size_t size);
+-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+- void *cpu_addr, dma_addr_t dma_addr,
+- size_t size);
+-
+-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+-
+ #endif /* _ASM_NIOS2_DMA_MAPPING_H */
+diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c
+index ac5da75..90422c3 100644
+--- a/arch/nios2/mm/dma-mapping.c
++++ b/arch/nios2/mm/dma-mapping.c
+@@ -20,9 +20,46 @@
+ #include <linux/cache.h>
+ #include <asm/cacheflush.h>
+
++static inline void __dma_sync_for_device(void *vaddr, size_t size,
++ enum dma_data_direction direction)
++{
++ switch (direction) {
++ case DMA_FROM_DEVICE:
++ invalidate_dcache_range((unsigned long)vaddr,
++ (unsigned long)(vaddr + size));
++ break;
++ case DMA_TO_DEVICE:
++ /*
++ * We just need to flush the caches here , but Nios2 flush
++ * instruction will do both writeback and invalidate.
++ */
++ case DMA_BIDIRECTIONAL: /* flush and invalidate */
++ flush_dcache_range((unsigned long)vaddr,
++ (unsigned long)(vaddr + size));
++ break;
++ default:
++ BUG();
++ }
++}
+
+-void *dma_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t gfp)
++static inline void __dma_sync_for_cpu(void *vaddr, size_t size,
++ enum dma_data_direction direction)
++{
++ switch (direction) {
++ case DMA_BIDIRECTIONAL:
++ case DMA_FROM_DEVICE:
++ invalidate_dcache_range((unsigned long)vaddr,
++ (unsigned long)(vaddr + size));
++ break;
++ case DMA_TO_DEVICE:
++ break;
++ default:
++ BUG();
++ }
++}
++
++static void *nios2_dma_alloc(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ {
+ void *ret;
+
+@@ -45,24 +82,21 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
+
+ return ret;
+ }
+-EXPORT_SYMBOL(dma_alloc_coherent);
+
+-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+- dma_addr_t dma_handle)
++static void nios2_dma_free(struct device *dev, size_t size, void *vaddr,
++ dma_addr_t dma_handle, struct dma_attrs *attrs)
+ {
+ unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr);
+
+ free_pages(addr, get_order(size));
+ }
+-EXPORT_SYMBOL(dma_free_coherent);
+
+-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+- enum dma_data_direction direction)
++static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg,
++ int nents, enum dma_data_direction direction,
++ struct dma_attrs *attrs)
+ {
+ int i;
+
+- BUG_ON(!valid_dma_direction(direction));
+-
+ for_each_sg(sg, sg, nents, i) {
+ void *addr;
+
+@@ -75,40 +109,32 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+
+ return nents;
+ }
+-EXPORT_SYMBOL(dma_map_sg);
+
+-dma_addr_t dma_map_page(struct device *dev, struct page *page,
++static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+- enum dma_data_direction direction)
++ enum dma_data_direction direction,
++ struct dma_attrs *attrs)
+ {
+- void *addr;
+-
+- BUG_ON(!valid_dma_direction(direction));
++ void *addr = page_address(page) + offset;
+
+- addr = page_address(page) + offset;
+ __dma_sync_for_device(addr, size, direction);
+-
+ return page_to_phys(page) + offset;
+ }
+-EXPORT_SYMBOL(dma_map_page);
+
+-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+- enum dma_data_direction direction)
++static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
++ size_t size, enum dma_data_direction direction,
++ struct dma_attrs *attrs)
+ {
+- BUG_ON(!valid_dma_direction(direction));
+-
+ __dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
+ }
+-EXPORT_SYMBOL(dma_unmap_page);
+
+-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+- enum dma_data_direction direction)
++static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
++ int nhwentries, enum dma_data_direction direction,
++ struct dma_attrs *attrs)
+ {
+ void *addr;
+ int i;
+
+- BUG_ON(!valid_dma_direction(direction));
+-
+ if (direction == DMA_TO_DEVICE)
+ return;
+
+@@ -118,69 +144,54 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+ __dma_sync_for_cpu(addr, sg->length, direction);
+ }
+ }
+-EXPORT_SYMBOL(dma_unmap_sg);
+-
+-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+- size_t size, enum dma_data_direction direction)
+-{
+- BUG_ON(!valid_dma_direction(direction));
+
+- __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
+-}
+-EXPORT_SYMBOL(dma_sync_single_for_cpu);
+-
+-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+- size_t size, enum dma_data_direction direction)
+-{
+- BUG_ON(!valid_dma_direction(direction));
+-
+- __dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
+-}
+-EXPORT_SYMBOL(dma_sync_single_for_device);
+-
+-void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction)
++static void nios2_dma_sync_single_for_cpu(struct device *dev,
++ dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction)
+ {
+- BUG_ON(!valid_dma_direction(direction));
+-
+ __dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
+ }
+-EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
+
+-void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction)
++static void nios2_dma_sync_single_for_device(struct device *dev,
++ dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction)
+ {
+- BUG_ON(!valid_dma_direction(direction));
+-
+ __dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
+ }
+-EXPORT_SYMBOL(dma_sync_single_range_for_device);
+
+-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+- enum dma_data_direction direction)
++static void nios2_dma_sync_sg_for_cpu(struct device *dev,
++ struct scatterlist *sg, int nelems,
++ enum dma_data_direction direction)
+ {
+ int i;
+
+- BUG_ON(!valid_dma_direction(direction));
+-
+ /* Make sure that gcc doesn't leave the empty loop body. */
+ for_each_sg(sg, sg, nelems, i)
+ __dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+ }
+-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
+
+-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+- int nelems, enum dma_data_direction direction)
++static void nios2_dma_sync_sg_for_device(struct device *dev,
++ struct scatterlist *sg, int nelems,
++ enum dma_data_direction direction)
+ {
+ int i;
+
+- BUG_ON(!valid_dma_direction(direction));
+-
+ /* Make sure that gcc doesn't leave the empty loop body. */
+ for_each_sg(sg, sg, nelems, i)
+ __dma_sync_for_device(sg_virt(sg), sg->length, direction);
+
+ }
+-EXPORT_SYMBOL(dma_sync_sg_for_device);
++
++struct dma_map_ops nios2_dma_ops = {
++ .alloc = nios2_dma_alloc,
++ .free = nios2_dma_free,
++ .map_page = nios2_dma_map_page,
++ .unmap_page = nios2_dma_unmap_page,
++ .map_sg = nios2_dma_map_sg,
++ .unmap_sg = nios2_dma_unmap_sg,
++ .sync_single_for_device = nios2_dma_sync_single_for_device,
++ .sync_single_for_cpu = nios2_dma_sync_single_for_cpu,
++ .sync_sg_for_cpu = nios2_dma_sync_sg_for_cpu,
++ .sync_sg_for_device = nios2_dma_sync_sg_for_device,
++};
++EXPORT_SYMBOL(nios2_dma_ops);
+--
+2.10.0
+
diff --git a/patches.arch/0008-frv-convert-to-dma_map_ops.patch b/patches.arch/0008-frv-convert-to-dma_map_ops.patch
new file mode 100644
index 0000000000..fdc2a22bca
--- /dev/null
+++ b/patches.arch/0008-frv-convert-to-dma_map_ops.patch
@@ -0,0 +1,407 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 20 Jan 2016 15:01:44 -0800
+Subject: frv: convert to dma_map_ops
+Git-commit: eae075196305549513335c2fc7d5d63712246bfd
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Cc: David Howells <dhowells@redhat.com>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/frv/Kconfig | 2 +
+ arch/frv/include/asm/dma-mapping.h | 132 ++--------------------------------
+ arch/frv/mb93090-mb00/pci-dma-nommu.c | 72 ++++++++++++-------
+ arch/frv/mb93090-mb00/pci-dma.c | 74 ++++++++++++-------
+ 4 files changed, 101 insertions(+), 179 deletions(-)
+
+diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
+index 03bfd6b..e383781 100644
+--- a/arch/frv/Kconfig
++++ b/arch/frv/Kconfig
+@@ -15,6 +15,8 @@ config FRV
+ select OLD_SIGSUSPEND3
+ select OLD_SIGACTION
+ select HAVE_DEBUG_STACKOVERFLOW
++ select ARCH_NO_COHERENT_DMA_MMAP
++ select HAVE_DMA_ATTRS
+
+ config ZONE_DMA
+ bool
+diff --git a/arch/frv/include/asm/dma-mapping.h b/arch/frv/include/asm/dma-mapping.h
+index 2840adc..750951c 100644
+--- a/arch/frv/include/asm/dma-mapping.h
++++ b/arch/frv/include/asm/dma-mapping.h
+@@ -1,128 +1,17 @@
+ #ifndef _ASM_DMA_MAPPING_H
+ #define _ASM_DMA_MAPPING_H
+
+-#include <linux/device.h>
+-#include <linux/scatterlist.h>
+ #include <asm/cache.h>
+ #include <asm/cacheflush.h>
+-#include <asm/io.h>
+-
+-/*
+- * See Documentation/DMA-API.txt for the description of how the
+- * following DMA API should work.
+- */
+-
+-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+ extern unsigned long __nongprelbss dma_coherent_mem_start;
+ extern unsigned long __nongprelbss dma_coherent_mem_end;
+
+-void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp);
+-void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle);
+-
+-extern dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
+- enum dma_data_direction direction);
+-
+-static inline
+-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+- enum dma_data_direction direction)
+-{
+- BUG_ON(direction == DMA_NONE);
+-}
+-
+-extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+- enum dma_data_direction direction);
+-
+-static inline
+-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+- enum dma_data_direction direction)
+-{
+- BUG_ON(direction == DMA_NONE);
+-}
+-
+-extern
+-dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+- size_t size, enum dma_data_direction direction);
+-
+-static inline
+-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+- enum dma_data_direction direction)
+-{
+- BUG_ON(direction == DMA_NONE);
+-}
+-
+-
+-static inline
+-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+- enum dma_data_direction direction)
+-{
+-}
+-
+-static inline
+-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
+- enum dma_data_direction direction)
+-{
+- flush_write_buffers();
+-}
++extern struct dma_map_ops frv_dma_ops;
+
+-static inline
+-void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction)
++static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+-}
+-
+-static inline
+-void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction)
+-{
+- flush_write_buffers();
+-}
+-
+-static inline
+-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+- enum dma_data_direction direction)
+-{
+-}
+-
+-static inline
+-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+- enum dma_data_direction direction)
+-{
+- flush_write_buffers();
+-}
+-
+-static inline
+-int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+-{
+- return 0;
+-}
+-
+-static inline
+-int dma_supported(struct device *dev, u64 mask)
+-{
+- /*
+- * we fall back to GFP_DMA when the mask isn't all 1s,
+- * so we can't guarantee allocations that must be
+- * within a tighter range than GFP_DMA..
+- */
+- if (mask < 0x00ffffff)
+- return 0;
+-
+- return 1;
+-}
+-
+-static inline
+-int dma_set_mask(struct device *dev, u64 mask)
+-{
+- if (!dev->dma_mask || !dma_supported(dev, mask))
+- return -EIO;
+-
+- *dev->dma_mask = mask;
+-
+- return 0;
++ return &frv_dma_ops;
+ }
+
+ static inline
+@@ -132,19 +21,6 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ flush_write_buffers();
+ }
+
+-/* Not supported for now */
+-static inline int dma_mmap_coherent(struct device *dev,
+- struct vm_area_struct *vma, void *cpu_addr,
+- dma_addr_t dma_addr, size_t size)
+-{
+- return -EINVAL;
+-}
+-
+-static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+- void *cpu_addr, dma_addr_t dma_addr,
+- size_t size)
+-{
+- return -EINVAL;
+-}
++#include <asm-generic/dma-mapping-common.h>
+
+ #endif /* _ASM_DMA_MAPPING_H */
+diff --git a/arch/frv/mb93090-mb00/pci-dma-nommu.c b/arch/frv/mb93090-mb00/pci-dma-nommu.c
+index 8eeea0d..082be49 100644
+--- a/arch/frv/mb93090-mb00/pci-dma-nommu.c
++++ b/arch/frv/mb93090-mb00/pci-dma-nommu.c
+@@ -34,7 +34,8 @@ struct dma_alloc_record {
+ static DEFINE_SPINLOCK(dma_alloc_lock);
+ static LIST_HEAD(dma_alloc_list);
+
+-void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp)
++static void *frv_dma_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
++ gfp_t gfp, struct dma_attrs *attrs)
+ {
+ struct dma_alloc_record *new;
+ struct list_head *this = &dma_alloc_list;
+@@ -84,9 +85,8 @@ void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_hand
+ return NULL;
+ }
+
+-EXPORT_SYMBOL(dma_alloc_coherent);
+-
+-void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
++static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
++ dma_addr_t dma_handle, struct dma_attrs *attrs)
+ {
+ struct dma_alloc_record *rec;
+ unsigned long flags;
+@@ -105,22 +105,9 @@ void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_
+ BUG();
+ }
+
+-EXPORT_SYMBOL(dma_free_coherent);
+-
+-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
+- enum dma_data_direction direction)
+-{
+- BUG_ON(direction == DMA_NONE);
+-
+- frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size);
+-
+- return virt_to_bus(ptr);
+-}
+-
+-EXPORT_SYMBOL(dma_map_single);
+-
+-int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
+- enum dma_data_direction direction)
++static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
++ int nents, enum dma_data_direction direction,
++ struct dma_attrs *attrs)
+ {
+ int i;
+ struct scatterlist *sg;
+@@ -135,14 +122,49 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
+ return nents;
+ }
+
+-EXPORT_SYMBOL(dma_map_sg);
+-
+-dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+- size_t size, enum dma_data_direction direction)
++static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction, struct dma_attrs *attrs)
+ {
+ BUG_ON(direction == DMA_NONE);
+ flush_dcache_page(page);
+ return (dma_addr_t) page_to_phys(page) + offset;
+ }
+
+-EXPORT_SYMBOL(dma_map_page);
++static void frv_dma_sync_single_for_device(struct device *dev,
++ dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction)
++{
++ flush_write_buffers();
++}
++
++static void frv_dma_sync_sg_for_device(struct device *dev,
++ struct scatterlist *sg, int nelems,
++ enum dma_data_direction direction)
++{
++ flush_write_buffers();
++}
++
++
++static int frv_dma_supported(struct device *dev, u64 mask)
++{
++ /*
++ * we fall back to GFP_DMA when the mask isn't all 1s,
++ * so we can't guarantee allocations that must be
++ * within a tighter range than GFP_DMA..
++ */
++ if (mask < 0x00ffffff)
++ return 0;
++ return 1;
++}
++
++struct dma_map_ops frv_dma_ops = {
++ .alloc = frv_dma_alloc,
++ .free = frv_dma_free,
++ .map_page = frv_dma_map_page,
++ .map_sg = frv_dma_map_sg,
++ .sync_single_for_device = frv_dma_sync_single_for_device,
++ .sync_sg_for_device = frv_dma_sync_sg_for_device,
++ .dma_supported = frv_dma_supported,
++};
++EXPORT_SYMBOL(frv_dma_ops);
+diff --git a/arch/frv/mb93090-mb00/pci-dma.c b/arch/frv/mb93090-mb00/pci-dma.c
+index 4d1f01d..316b7b6 100644
+--- a/arch/frv/mb93090-mb00/pci-dma.c
++++ b/arch/frv/mb93090-mb00/pci-dma.c
+@@ -18,7 +18,9 @@
+ #include <linux/scatterlist.h>
+ #include <asm/io.h>
+
+-void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t gfp)
++static void *frv_dma_alloc(struct device *hwdev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp,
++ struct dma_attrs *attrs)
+ {
+ void *ret;
+
+@@ -29,29 +31,15 @@ void *dma_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_hand
+ return ret;
+ }
+
+-EXPORT_SYMBOL(dma_alloc_coherent);
+-
+-void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
++static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
++ dma_addr_t dma_handle, struct dma_attrs *attrs)
+ {
+ consistent_free(vaddr);
+ }
+
+-EXPORT_SYMBOL(dma_free_coherent);
+-
+-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
+- enum dma_data_direction direction)
+-{
+- BUG_ON(direction == DMA_NONE);
+-
+- frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size);
+-
+- return virt_to_bus(ptr);
+-}
+-
+-EXPORT_SYMBOL(dma_map_single);
+-
+-int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
+- enum dma_data_direction direction)
++static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
++ int nents, enum dma_data_direction direction,
++ struct dma_attrs *attrs)
+ {
+ unsigned long dampr2;
+ void *vaddr;
+@@ -79,14 +67,48 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
+ return nents;
+ }
+
+-EXPORT_SYMBOL(dma_map_sg);
+-
+-dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+- size_t size, enum dma_data_direction direction)
++static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction, struct dma_attrs *attrs)
+ {
+- BUG_ON(direction == DMA_NONE);
+ flush_dcache_page(page);
+ return (dma_addr_t) page_to_phys(page) + offset;
+ }
+
+-EXPORT_SYMBOL(dma_map_page);
++static void frv_dma_sync_single_for_device(struct device *dev,
++ dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction)
++{
++ flush_write_buffers();
++}
++
++static void frv_dma_sync_sg_for_device(struct device *dev,
++ struct scatterlist *sg, int nelems,
++ enum dma_data_direction direction)
++{
++ flush_write_buffers();
++}
++
++
++static int frv_dma_supported(struct device *dev, u64 mask)
++{
++ /*
++ * we fall back to GFP_DMA when the mask isn't all 1s,
++ * so we can't guarantee allocations that must be
++ * within a tighter range than GFP_DMA..
++ */
++ if (mask < 0x00ffffff)
++ return 0;
++ return 1;
++}
++
++struct dma_map_ops frv_dma_ops = {
++ .alloc = frv_dma_alloc,
++ .free = frv_dma_free,
++ .map_page = frv_dma_map_page,
++ .map_sg = frv_dma_map_sg,
++ .sync_single_for_device = frv_dma_sync_single_for_device,
++ .sync_sg_for_device = frv_dma_sync_sg_for_device,
++ .dma_supported = frv_dma_supported,
++};
++EXPORT_SYMBOL(frv_dma_ops);
+--
+2.10.0
+
diff --git a/patches.arch/0009-parisc-convert-to-dma_map_ops.patch b/patches.arch/0009-parisc-convert-to-dma_map_ops.patch
new file mode 100644
index 0000000000..9c904fc0c1
--- /dev/null
+++ b/patches.arch/0009-parisc-convert-to-dma_map_ops.patch
@@ -0,0 +1,732 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 20 Jan 2016 15:01:47 -0800
+Subject: parisc: convert to dma_map_ops
+Git-commit: 79387179e2e4fede52326e4c4e26145dbd6b505c
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Tested-by: Helge Deller <deller@gmx.de>
+Acked-by: Helge Deller <deller@gmx.de>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/parisc/Kconfig | 2 +
+ arch/parisc/include/asm/dma-mapping.h | 189 ++--------------------------------
+ arch/parisc/kernel/drivers.c | 2 +-
+ arch/parisc/kernel/pci-dma.c | 92 ++++++++++-------
+ drivers/parisc/ccio-dma.c | 57 +++++-----
+ drivers/parisc/sba_iommu.c | 52 +++++-----
+ 6 files changed, 124 insertions(+), 270 deletions(-)
+
+diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
+index 7c34caf..1489351 100644
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -29,6 +29,8 @@ config PARISC
+ select TTY # Needed for pdc_cons.c
+ select HAVE_DEBUG_STACKOVERFLOW
+ select HAVE_ARCH_AUDITSYSCALL
++ select ARCH_NO_COHERENT_DMA_MMAP
++ select HAVE_DMA_ATTRS
+
+ help
+ The PA-RISC microprocessor is designed by Hewlett-Packard and used
+diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
+index d8d60a5..4de5186 100644
+--- a/arch/parisc/include/asm/dma-mapping.h
++++ b/arch/parisc/include/asm/dma-mapping.h
+@@ -1,30 +1,11 @@
+ #ifndef _PARISC_DMA_MAPPING_H
+ #define _PARISC_DMA_MAPPING_H
+
+-#include <linux/mm.h>
+-#include <linux/scatterlist.h>
+ #include <asm/cacheflush.h>
+
+-/* See Documentation/DMA-API-HOWTO.txt */
+-struct hppa_dma_ops {
+- int (*dma_supported)(struct device *dev, u64 mask);
+- void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
+- void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
+- void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova);
+- dma_addr_t (*map_single)(struct device *dev, void *addr, size_t size, enum dma_data_direction direction);
+- void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction direction);
+- int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction);
+- void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nhwents, enum dma_data_direction direction);
+- void (*dma_sync_single_for_cpu)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
+- void (*dma_sync_single_for_device)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t size, enum dma_data_direction direction);
+- void (*dma_sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
+- void (*dma_sync_sg_for_device)(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction);
+-};
+-
+ /*
+-** We could live without the hppa_dma_ops indirection if we didn't want
+-** to support 4 different coherent dma models with one binary (they will
+-** someday be loadable modules):
++** We need to support 4 different coherent dma models with one binary:
++**
+ ** I/O MMU consistent method dma_sync behavior
+ ** ============= ====================== =======================
+ ** a) PA-7x00LC uncachable host memory flush/purge
+@@ -40,158 +21,22 @@ struct hppa_dma_ops {
+ */
+
+ #ifdef CONFIG_PA11
+-extern struct hppa_dma_ops pcxl_dma_ops;
+-extern struct hppa_dma_ops pcx_dma_ops;
++extern struct dma_map_ops pcxl_dma_ops;
++extern struct dma_map_ops pcx_dma_ops;
+ #endif
+
+-extern struct hppa_dma_ops *hppa_dma_ops;
+-
+-#define dma_alloc_attrs(d, s, h, f, a) dma_alloc_coherent(d, s, h, f)
+-#define dma_free_attrs(d, s, h, f, a) dma_free_coherent(d, s, h, f)
+-
+-static inline void *
+-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+- gfp_t flag)
+-{
+- return hppa_dma_ops->alloc_consistent(dev, size, dma_handle, flag);
+-}
+-
+-static inline void *
+-dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+- gfp_t flag)
+-{
+- return hppa_dma_ops->alloc_noncoherent(dev, size, dma_handle, flag);
+-}
+-
+-static inline void
+-dma_free_coherent(struct device *dev, size_t size,
+- void *vaddr, dma_addr_t dma_handle)
+-{
+- hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
+-}
+-
+-static inline void
+-dma_free_noncoherent(struct device *dev, size_t size,
+- void *vaddr, dma_addr_t dma_handle)
+-{
+- hppa_dma_ops->free_consistent(dev, size, vaddr, dma_handle);
+-}
+-
+-static inline dma_addr_t
+-dma_map_single(struct device *dev, void *ptr, size_t size,
+- enum dma_data_direction direction)
+-{
+- return hppa_dma_ops->map_single(dev, ptr, size, direction);
+-}
+-
+-static inline void
+-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+- enum dma_data_direction direction)
+-{
+- hppa_dma_ops->unmap_single(dev, dma_addr, size, direction);
+-}
+-
+-static inline int
+-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+- enum dma_data_direction direction)
+-{
+- return hppa_dma_ops->map_sg(dev, sg, nents, direction);
+-}
+-
+-static inline void
+-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+- enum dma_data_direction direction)
+-{
+- hppa_dma_ops->unmap_sg(dev, sg, nhwentries, direction);
+-}
+-
+-static inline dma_addr_t
+-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+- size_t size, enum dma_data_direction direction)
+-{
+- return dma_map_single(dev, (page_address(page) + (offset)), size, direction);
+-}
+-
+-static inline void
+-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+- enum dma_data_direction direction)
+-{
+- dma_unmap_single(dev, dma_address, size, direction);
+-}
+-
+-
+-static inline void
+-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+- enum dma_data_direction direction)
+-{
+- if(hppa_dma_ops->dma_sync_single_for_cpu)
+- hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, 0, size, direction);
+-}
+-
+-static inline void
+-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
+- enum dma_data_direction direction)
+-{
+- if(hppa_dma_ops->dma_sync_single_for_device)
+- hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, 0, size, direction);
+-}
+-
+-static inline void
+-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction)
+-{
+- if(hppa_dma_ops->dma_sync_single_for_cpu)
+- hppa_dma_ops->dma_sync_single_for_cpu(dev, dma_handle, offset, size, direction);
+-}
++extern struct dma_map_ops *hppa_dma_ops;
+
+-static inline void
+-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction)
++static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+- if(hppa_dma_ops->dma_sync_single_for_device)
+- hppa_dma_ops->dma_sync_single_for_device(dev, dma_handle, offset, size, direction);
+-}
+-
+-static inline void
+-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+- enum dma_data_direction direction)
+-{
+- if(hppa_dma_ops->dma_sync_sg_for_cpu)
+- hppa_dma_ops->dma_sync_sg_for_cpu(dev, sg, nelems, direction);
+-}
+-
+-static inline void
+-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+- enum dma_data_direction direction)
+-{
+- if(hppa_dma_ops->dma_sync_sg_for_device)
+- hppa_dma_ops->dma_sync_sg_for_device(dev, sg, nelems, direction);
+-}
+-
+-static inline int
+-dma_supported(struct device *dev, u64 mask)
+-{
+- return hppa_dma_ops->dma_supported(dev, mask);
+-}
+-
+-static inline int
+-dma_set_mask(struct device *dev, u64 mask)
+-{
+- if(!dev->dma_mask || !dma_supported(dev, mask))
+- return -EIO;
+-
+- *dev->dma_mask = mask;
+-
+- return 0;
++ return hppa_dma_ops;
+ }
+
+ static inline void
+ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+ {
+- if(hppa_dma_ops->dma_sync_single_for_cpu)
++ if (hppa_dma_ops->sync_single_for_cpu)
+ flush_kernel_dcache_range((unsigned long)vaddr, size);
+ }
+
+@@ -238,22 +83,6 @@ struct parisc_device;
+ void * sba_get_iommu(struct parisc_device *dev);
+ #endif
+
+-/* At the moment, we panic on error for IOMMU resource exaustion */
+-#define dma_mapping_error(dev, x) 0
+-
+-/* This API cannot be supported on PA-RISC */
+-static inline int dma_mmap_coherent(struct device *dev,
+- struct vm_area_struct *vma, void *cpu_addr,
+- dma_addr_t dma_addr, size_t size)
+-{
+- return -EINVAL;
+-}
+-
+-static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+- void *cpu_addr, dma_addr_t dma_addr,
+- size_t size)
+-{
+- return -EINVAL;
+-}
++#include <asm-generic/dma-mapping-common.h>
+
+ #endif
+diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
+index dba508f..f815066 100644
+--- a/arch/parisc/kernel/drivers.c
++++ b/arch/parisc/kernel/drivers.c
+@@ -40,7 +40,7 @@
+ #include <asm/parisc-device.h>
+
+ /* See comments in include/asm-parisc/pci.h */
+-struct hppa_dma_ops *hppa_dma_ops __read_mostly;
++struct dma_map_ops *hppa_dma_ops __read_mostly;
+ EXPORT_SYMBOL(hppa_dma_ops);
+
+ static struct device root = {
+diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c
+index b9402c9..a27e492 100644
+--- a/arch/parisc/kernel/pci-dma.c
++++ b/arch/parisc/kernel/pci-dma.c
+@@ -413,7 +413,8 @@ pcxl_dma_init(void)
+
+ __initcall(pcxl_dma_init);
+
+-static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
++static void *pa11_dma_alloc(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
+ {
+ unsigned long vaddr;
+ unsigned long paddr;
+@@ -439,7 +440,8 @@ static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_ad
+ return (void *)vaddr;
+ }
+
+-static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
++static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
++ dma_addr_t dma_handle, struct dma_attrs *attrs)
+ {
+ int order;
+
+@@ -450,15 +452,20 @@ static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vad
+ free_pages((unsigned long)__va(dma_handle), order);
+ }
+
+-static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction direction)
++static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction, struct dma_attrs *attrs)
+ {
++ void *addr = page_address(page) + offset;
+ BUG_ON(direction == DMA_NONE);
+
+ flush_kernel_dcache_range((unsigned long) addr, size);
+ return virt_to_phys(addr);
+ }
+
+-static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
++static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
++ size_t size, enum dma_data_direction direction,
++ struct dma_attrs *attrs)
+ {
+ BUG_ON(direction == DMA_NONE);
+
+@@ -475,7 +482,9 @@ static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, siz
+ return;
+ }
+
+-static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
++static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
++ int nents, enum dma_data_direction direction,
++ struct dma_attrs *attrs)
+ {
+ int i;
+ struct scatterlist *sg;
+@@ -492,7 +501,9 @@ static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int n
+ return nents;
+ }
+
+-static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
++static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
++ int nents, enum dma_data_direction direction,
++ struct dma_attrs *attrs)
+ {
+ int i;
+ struct scatterlist *sg;
+@@ -509,18 +520,24 @@ static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, in
+ return;
+ }
+
+-static void pa11_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
++static void pa11_dma_sync_single_for_cpu(struct device *dev,
++ dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction)
+ {
+ BUG_ON(direction == DMA_NONE);
+
+- flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
++ flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
++ size);
+ }
+
+-static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
++static void pa11_dma_sync_single_for_device(struct device *dev,
++ dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction)
+ {
+ BUG_ON(direction == DMA_NONE);
+
+- flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
++ flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
++ size);
+ }
+
+ static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
+@@ -545,32 +562,28 @@ static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *
+ flush_kernel_vmap_range(sg_virt(sg), sg->length);
+ }
+
+-struct hppa_dma_ops pcxl_dma_ops = {
++struct dma_map_ops pcxl_dma_ops = {
+ .dma_supported = pa11_dma_supported,
+- .alloc_consistent = pa11_dma_alloc_consistent,
+- .alloc_noncoherent = pa11_dma_alloc_consistent,
+- .free_consistent = pa11_dma_free_consistent,
+- .map_single = pa11_dma_map_single,
+- .unmap_single = pa11_dma_unmap_single,
++ .alloc = pa11_dma_alloc,
++ .free = pa11_dma_free,
++ .map_page = pa11_dma_map_page,
++ .unmap_page = pa11_dma_unmap_page,
+ .map_sg = pa11_dma_map_sg,
+ .unmap_sg = pa11_dma_unmap_sg,
+- .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
+- .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
+- .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
+- .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
++ .sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
++ .sync_single_for_device = pa11_dma_sync_single_for_device,
++ .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
++ .sync_sg_for_device = pa11_dma_sync_sg_for_device,
+ };
+
+-static void *fail_alloc_consistent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t flag)
+-{
+- return NULL;
+-}
+-
+-static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t flag)
++static void *pcx_dma_alloc(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
+ {
+ void *addr;
+
++ if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
++ return NULL;
++
+ addr = (void *)__get_free_pages(flag, get_order(size));
+ if (addr)
+ *dma_handle = (dma_addr_t)virt_to_phys(addr);
+@@ -578,24 +591,23 @@ static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
+ return addr;
+ }
+
+-static void pa11_dma_free_noncoherent(struct device *dev, size_t size,
+- void *vaddr, dma_addr_t iova)
++static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
++ dma_addr_t iova, struct dma_attrs *attrs)
+ {
+ free_pages((unsigned long)vaddr, get_order(size));
+ return;
+ }
+
+-struct hppa_dma_ops pcx_dma_ops = {
++struct dma_map_ops pcx_dma_ops = {
+ .dma_supported = pa11_dma_supported,
+- .alloc_consistent = fail_alloc_consistent,
+- .alloc_noncoherent = pa11_dma_alloc_noncoherent,
+- .free_consistent = pa11_dma_free_noncoherent,
+- .map_single = pa11_dma_map_single,
+- .unmap_single = pa11_dma_unmap_single,
++ .alloc = pcx_dma_alloc,
++ .free = pcx_dma_free,
++ .map_page = pa11_dma_map_page,
++ .unmap_page = pa11_dma_unmap_page,
+ .map_sg = pa11_dma_map_sg,
+ .unmap_sg = pa11_dma_unmap_sg,
+- .dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
+- .dma_sync_single_for_device = pa11_dma_sync_single_for_device,
+- .dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
+- .dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
++ .sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
++ .sync_single_for_device = pa11_dma_sync_single_for_device,
++ .sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
++ .sync_sg_for_device = pa11_dma_sync_sg_for_device,
+ };
+diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
+index 8e11fb2..e24b059 100644
+--- a/drivers/parisc/ccio-dma.c
++++ b/drivers/parisc/ccio-dma.c
+@@ -786,18 +786,27 @@ ccio_map_single(struct device *dev, void *addr, size_t size,
+ return CCIO_IOVA(iovp, offset);
+ }
+
++
++static dma_addr_t
++ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
++ size_t size, enum dma_data_direction direction,
++ struct dma_attrs *attrs)
++{
++ return ccio_map_single(dev, page_address(page) + offset, size,
++ direction);
++}
++
++
+ /**
+- * ccio_unmap_single - Unmap an address range from the IOMMU.
++ * ccio_unmap_page - Unmap an address range from the IOMMU.
+ * @dev: The PCI device.
+ * @addr: The start address of the DMA region.
+ * @size: The length of the DMA region.
+ * @direction: The direction of the DMA transaction (to/from device).
+- *
+- * This function implements the pci_unmap_single function.
+ */
+ static void
+-ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
+- enum dma_data_direction direction)
++ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
++ enum dma_data_direction direction, struct dma_attrs *attrs)
+ {
+ struct ioc *ioc;
+ unsigned long flags;
+@@ -826,7 +835,7 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
+ }
+
+ /**
+- * ccio_alloc_consistent - Allocate a consistent DMA mapping.
++ * ccio_alloc - Allocate a consistent DMA mapping.
+ * @dev: The PCI device.
+ * @size: The length of the DMA region.
+ * @dma_handle: The DMA address handed back to the device (not the cpu).
+@@ -834,7 +843,8 @@ ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
+ * This function implements the pci_alloc_consistent function.
+ */
+ static void *
+-ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
++ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
++ struct dma_attrs *attrs)
+ {
+ void *ret;
+ #if 0
+@@ -858,7 +868,7 @@ ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, g
+ }
+
+ /**
+- * ccio_free_consistent - Free a consistent DMA mapping.
++ * ccio_free - Free a consistent DMA mapping.
+ * @dev: The PCI device.
+ * @size: The length of the DMA region.
+ * @cpu_addr: The cpu address returned from the ccio_alloc_consistent.
+@@ -867,10 +877,10 @@ ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, g
+ * This function implements the pci_free_consistent function.
+ */
+ static void
+-ccio_free_consistent(struct device *dev, size_t size, void *cpu_addr,
+- dma_addr_t dma_handle)
++ccio_free(struct device *dev, size_t size, void *cpu_addr,
++ dma_addr_t dma_handle, struct dma_attrs *attrs)
+ {
+- ccio_unmap_single(dev, dma_handle, size, 0);
++ ccio_unmap_page(dev, dma_handle, size, 0, NULL);
+ free_pages((unsigned long)cpu_addr, get_order(size));
+ }
+
+@@ -897,7 +907,7 @@ ccio_free_consistent(struct device *dev, size_t size, void *cpu_addr,
+ */
+ static int
+ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
+- enum dma_data_direction direction)
++ enum dma_data_direction direction, struct dma_attrs *attrs)
+ {
+ struct ioc *ioc;
+ int coalesced, filled = 0;
+@@ -974,7 +984,7 @@ ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
+ */
+ static void
+ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
+- enum dma_data_direction direction)
++ enum dma_data_direction direction, struct dma_attrs *attrs)
+ {
+ struct ioc *ioc;
+
+@@ -993,27 +1003,22 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
+ #ifdef CCIO_COLLECT_STATS
+ ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
+ #endif
+- ccio_unmap_single(dev, sg_dma_address(sglist),
+- sg_dma_len(sglist), direction);
++ ccio_unmap_page(dev, sg_dma_address(sglist),
++ sg_dma_len(sglist), direction, NULL);
+ ++sglist;
+ }
+
+ DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
+ }
+
+-static struct hppa_dma_ops ccio_ops = {
++static struct dma_map_ops ccio_ops = {
+ .dma_supported = ccio_dma_supported,
+- .alloc_consistent = ccio_alloc_consistent,
+- .alloc_noncoherent = ccio_alloc_consistent,
+- .free_consistent = ccio_free_consistent,
+- .map_single = ccio_map_single,
+- .unmap_single = ccio_unmap_single,
++ .alloc = ccio_alloc,
++ .free = ccio_free,
++ .map_page = ccio_map_page,
++ .unmap_page = ccio_unmap_page,
+ .map_sg = ccio_map_sg,
+ .unmap_sg = ccio_unmap_sg,
+- .dma_sync_single_for_cpu = NULL, /* NOP for U2/Uturn */
+- .dma_sync_single_for_device = NULL, /* NOP for U2/Uturn */
+- .dma_sync_sg_for_cpu = NULL, /* ditto */
+- .dma_sync_sg_for_device = NULL, /* ditto */
+ };
+
+ #ifdef CONFIG_PROC_FS
+@@ -1062,7 +1067,7 @@ static int ccio_proc_info(struct seq_file *m, void *p)
+ ioc->msingle_calls, ioc->msingle_pages,
+ (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
+
+- /* KLUGE - unmap_sg calls unmap_single for each mapped page */
++ /* KLUGE - unmap_sg calls unmap_page for each mapped page */
+ min = ioc->usingle_calls - ioc->usg_calls;
+ max = ioc->usingle_pages - ioc->usg_pages;
+ seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n",
+diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
+index 225049b..42ec460 100644
+--- a/drivers/parisc/sba_iommu.c
++++ b/drivers/parisc/sba_iommu.c
+@@ -780,8 +780,18 @@ sba_map_single(struct device *dev, void *addr, size_t size,
+ }
+
+
++static dma_addr_t
++sba_map_page(struct device *dev, struct page *page, unsigned long offset,
++ size_t size, enum dma_data_direction direction,
++ struct dma_attrs *attrs)
++{
++ return sba_map_single(dev, page_address(page) + offset, size,
++ direction);
++}
++
++
+ /**
+- * sba_unmap_single - unmap one IOVA and free resources
++ * sba_unmap_page - unmap one IOVA and free resources
+ * @dev: instance of PCI owned by the driver that's asking.
+ * @iova: IOVA of driver buffer previously mapped.
+ * @size: number of bytes mapped in driver buffer.
+@@ -790,8 +800,8 @@ sba_map_single(struct device *dev, void *addr, size_t size,
+ * See Documentation/DMA-API-HOWTO.txt
+ */
+ static void
+-sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
+- enum dma_data_direction direction)
++sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
++ enum dma_data_direction direction, struct dma_attrs *attrs)
+ {
+ struct ioc *ioc;
+ #if DELAYED_RESOURCE_CNT > 0
+@@ -858,15 +868,15 @@ sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size,
+
+
+ /**
+- * sba_alloc_consistent - allocate/map shared mem for DMA
++ * sba_alloc - allocate/map shared mem for DMA
+ * @hwdev: instance of PCI owned by the driver that's asking.
+ * @size: number of bytes mapped in driver buffer.
+ * @dma_handle: IOVA of new buffer.
+ *
+ * See Documentation/DMA-API-HOWTO.txt
+ */
+-static void *sba_alloc_consistent(struct device *hwdev, size_t size,
+- dma_addr_t *dma_handle, gfp_t gfp)
++static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
++ gfp_t gfp, struct dma_attrs *attrs)
+ {
+ void *ret;
+
+@@ -888,7 +898,7 @@ static void *sba_alloc_consistent(struct device *hwdev, size_t size,
+
+
+ /**
+- * sba_free_consistent - free/unmap shared mem for DMA
++ * sba_free - free/unmap shared mem for DMA
+ * @hwdev: instance of PCI owned by the driver that's asking.
+ * @size: number of bytes mapped in driver buffer.
+ * @vaddr: virtual address IOVA of "consistent" buffer.
+@@ -897,10 +907,10 @@ static void *sba_alloc_consistent(struct device *hwdev, size_t size,
+ * See Documentation/DMA-API-HOWTO.txt
+ */
+ static void
+-sba_free_consistent(struct device *hwdev, size_t size, void *vaddr,
+- dma_addr_t dma_handle)
++sba_free(struct device *hwdev, size_t size, void *vaddr,
++ dma_addr_t dma_handle, struct dma_attrs *attrs)
+ {
+- sba_unmap_single(hwdev, dma_handle, size, 0);
++ sba_unmap_page(hwdev, dma_handle, size, 0, NULL);
+ free_pages((unsigned long) vaddr, get_order(size));
+ }
+
+@@ -933,7 +943,7 @@ int dump_run_sg = 0;
+ */
+ static int
+ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
+- enum dma_data_direction direction)
++ enum dma_data_direction direction, struct dma_attrs *attrs)
+ {
+ struct ioc *ioc;
+ int coalesced, filled = 0;
+@@ -1016,7 +1026,7 @@ sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
+ */
+ static void
+ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
+- enum dma_data_direction direction)
++ enum dma_data_direction direction, struct dma_attrs *attrs)
+ {
+ struct ioc *ioc;
+ #ifdef ASSERT_PDIR_SANITY
+@@ -1040,7 +1050,8 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
+
+ while (sg_dma_len(sglist) && nents--) {
+
+- sba_unmap_single(dev, sg_dma_address(sglist), sg_dma_len(sglist), direction);
++ sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
++ direction, NULL);
+ #ifdef SBA_COLLECT_STATS
+ ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
+ ioc->usingle_calls--; /* kluge since call is unmap_sg() */
+@@ -1058,19 +1069,14 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
+
+ }
+
+-static struct hppa_dma_ops sba_ops = {
++static struct dma_map_ops sba_ops = {
+ .dma_supported = sba_dma_supported,
+- .alloc_consistent = sba_alloc_consistent,
+- .alloc_noncoherent = sba_alloc_consistent,
+- .free_consistent = sba_free_consistent,
+- .map_single = sba_map_single,
+- .unmap_single = sba_unmap_single,
++ .alloc = sba_alloc,
++ .free = sba_free,
++ .map_page = sba_map_page,
++ .unmap_page = sba_unmap_page,
+ .map_sg = sba_map_sg,
+ .unmap_sg = sba_unmap_sg,
+- .dma_sync_single_for_cpu = NULL,
+- .dma_sync_single_for_device = NULL,
+- .dma_sync_sg_for_cpu = NULL,
+- .dma_sync_sg_for_device = NULL,
+ };
+
+
+--
+2.10.0
+
diff --git a/patches.arch/0010-mn10300-convert-to-dma_map_ops.patch b/patches.arch/0010-mn10300-convert-to-dma_map_ops.patch
new file mode 100644
index 0000000000..3dc6eb3643
--- /dev/null
+++ b/patches.arch/0010-mn10300-convert-to-dma_map_ops.patch
@@ -0,0 +1,310 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 20 Jan 2016 15:01:50 -0800
+Subject: mn10300: convert to dma_map_ops
+Git-commit: f151341ca00e0418f98a5131e1a4a2a3ec219653
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Cc: David Howells <dhowells@redhat.com>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/mn10300/Kconfig | 2 +
+ arch/mn10300/include/asm/dma-mapping.h | 161 +--------------------------------
+ arch/mn10300/mm/dma-alloc.c | 67 ++++++++++++--
+ 3 files changed, 67 insertions(+), 163 deletions(-)
+
+diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
+index 78ae555..e8ebf78 100644
+--- a/arch/mn10300/Kconfig
++++ b/arch/mn10300/Kconfig
+@@ -14,6 +14,8 @@ config MN10300
+ select OLD_SIGSUSPEND3
+ select OLD_SIGACTION
+ select HAVE_DEBUG_STACKOVERFLOW
++ select ARCH_NO_COHERENT_DMA_MMAP
++ select HAVE_DMA_ATTRS
+
+ config AM33_2
+ def_bool n
+diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h
+index a18abfc..e69b013 100644
+--- a/arch/mn10300/include/asm/dma-mapping.h
++++ b/arch/mn10300/include/asm/dma-mapping.h
+@@ -11,154 +11,14 @@
+ #ifndef _ASM_DMA_MAPPING_H
+ #define _ASM_DMA_MAPPING_H
+
+-#include <linux/mm.h>
+-#include <linux/scatterlist.h>
+-
+ #include <asm/cache.h>
+ #include <asm/io.h>
+
+-/*
+- * See Documentation/DMA-API.txt for the description of how the
+- * following DMA API should work.
+- */
+-
+-extern void *dma_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, int flag);
+-
+-extern void dma_free_coherent(struct device *dev, size_t size,
+- void *vaddr, dma_addr_t dma_handle);
+-
+-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
+-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
+-
+-static inline
+-dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
+- enum dma_data_direction direction)
+-{
+- BUG_ON(direction == DMA_NONE);
+- mn10300_dcache_flush_inv();
+- return virt_to_bus(ptr);
+-}
+-
+-static inline
+-void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+- enum dma_data_direction direction)
+-{
+- BUG_ON(direction == DMA_NONE);
+-}
+-
+-static inline
+-int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
+- enum dma_data_direction direction)
+-{
+- struct scatterlist *sg;
+- int i;
+-
+- BUG_ON(!valid_dma_direction(direction));
+- WARN_ON(nents == 0 || sglist[0].length == 0);
+-
+- for_each_sg(sglist, sg, nents, i) {
+- BUG_ON(!sg_page(sg));
+-
+- sg->dma_address = sg_phys(sg);
+- }
+-
+- mn10300_dcache_flush_inv();
+- return nents;
+-}
+-
+-static inline
+-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+- enum dma_data_direction direction)
+-{
+- BUG_ON(!valid_dma_direction(direction));
+-}
+-
+-static inline
+-dma_addr_t dma_map_page(struct device *dev, struct page *page,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction)
+-{
+- BUG_ON(direction == DMA_NONE);
+- return page_to_bus(page) + offset;
+-}
+-
+-static inline
+-void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+- enum dma_data_direction direction)
+-{
+- BUG_ON(direction == DMA_NONE);
+-}
+-
+-static inline
+-void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+- size_t size, enum dma_data_direction direction)
+-{
+-}
+-
+-static inline
+-void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+- size_t size, enum dma_data_direction direction)
+-{
+- mn10300_dcache_flush_inv();
+-}
+-
+-static inline
+-void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction)
+-{
+-}
+-
+-static inline void
+-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction)
+-{
+- mn10300_dcache_flush_inv();
+-}
+-
++extern struct dma_map_ops mn10300_dma_ops;
+
+-static inline
+-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+- int nelems, enum dma_data_direction direction)
++static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+-}
+-
+-static inline
+-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+- int nelems, enum dma_data_direction direction)
+-{
+- mn10300_dcache_flush_inv();
+-}
+-
+-static inline
+-int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+-{
+- return 0;
+-}
+-
+-static inline
+-int dma_supported(struct device *dev, u64 mask)
+-{
+- /*
+- * we fall back to GFP_DMA when the mask isn't all 1s, so we can't
+- * guarantee allocations that must be within a tighter range than
+- * GFP_DMA
+- */
+- if (mask < 0x00ffffff)
+- return 0;
+- return 1;
+-}
+-
+-static inline
+-int dma_set_mask(struct device *dev, u64 mask)
+-{
+- if (!dev->dma_mask || !dma_supported(dev, mask))
+- return -EIO;
+-
+- *dev->dma_mask = mask;
+- return 0;
++ return &mn10300_dma_ops;
+ }
+
+ static inline
+@@ -168,19 +28,6 @@ void dma_cache_sync(void *vaddr, size_t size,
+ mn10300_dcache_flush_inv();
+ }
+
+-/* Not supported for now */
+-static inline int dma_mmap_coherent(struct device *dev,
+- struct vm_area_struct *vma, void *cpu_addr,
+- dma_addr_t dma_addr, size_t size)
+-{
+- return -EINVAL;
+-}
+-
+-static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
+- void *cpu_addr, dma_addr_t dma_addr,
+- size_t size)
+-{
+- return -EINVAL;
+-}
++#include <asm-generic/dma-mapping-common.h>
+
+ #endif
+diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c
+index e244ebe..8842394 100644
+--- a/arch/mn10300/mm/dma-alloc.c
++++ b/arch/mn10300/mm/dma-alloc.c
+@@ -20,8 +20,8 @@
+
+ static unsigned long pci_sram_allocated = 0xbc000000;
+
+-void *dma_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, int gfp)
++static void *mn10300_dma_alloc(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ {
+ unsigned long addr;
+ void *ret;
+@@ -61,10 +61,9 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
+ printk("dma_alloc_coherent() = %p [%x]\n", ret, *dma_handle);
+ return ret;
+ }
+-EXPORT_SYMBOL(dma_alloc_coherent);
+
+-void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+- dma_addr_t dma_handle)
++static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr,
++ dma_addr_t dma_handle, struct dma_attrs *attrs)
+ {
+ unsigned long addr = (unsigned long) vaddr & ~0x20000000;
+
+@@ -73,4 +72,60 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
+
+ free_pages(addr, get_order(size));
+ }
+-EXPORT_SYMBOL(dma_free_coherent);
++
++static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist,
++ int nents, enum dma_data_direction direction,
++ struct dma_attrs *attrs)
++{
++ struct scatterlist *sg;
++ int i;
++
++ for_each_sg(sglist, sg, nents, i) {
++ BUG_ON(!sg_page(sg));
++
++ sg->dma_address = sg_phys(sg);
++ }
++
++ mn10300_dcache_flush_inv();
++ return nents;
++}
++
++static dma_addr_t mn10300_dma_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction, struct dma_attrs *attrs)
++{
++ return page_to_bus(page) + offset;
++}
++
++static void mn10300_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
++ size_t size, enum dma_data_direction direction)
++{
++ mn10300_dcache_flush_inv();
++}
++
++static void mn10300_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
++ int nelems, enum dma_data_direction direction)
++{
++ mn10300_dcache_flush_inv();
++}
++
++static int mn10300_dma_supported(struct device *dev, u64 mask)
++{
++ /*
++ * we fall back to GFP_DMA when the mask isn't all 1s, so we can't
++ * guarantee allocations that must be within a tighter range than
++ * GFP_DMA
++ */
++ if (mask < 0x00ffffff)
++ return 0;
++ return 1;
++}
++
++struct dma_map_ops mn10300_dma_ops = {
++ .alloc = mn10300_dma_alloc,
++ .free = mn10300_dma_free,
++ .map_page = mn10300_dma_map_page,
++ .map_sg = mn10300_dma_map_sg,
++ .sync_single_for_device = mn10300_dma_sync_single_for_device,
++ .sync_sg_for_device = mn10300_dma_sync_sg_for_device,
++};
+--
+2.10.0
+
diff --git a/patches.arch/0011-m68k-convert-to-dma_map_ops.patch b/patches.arch/0011-m68k-convert-to-dma_map_ops.patch
new file mode 100644
index 0000000000..3a6b291712
--- /dev/null
+++ b/patches.arch/0011-m68k-convert-to-dma_map_ops.patch
@@ -0,0 +1,294 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 20 Jan 2016 15:01:53 -0800
+Subject: m68k: convert to dma_map_ops
+Git-commit: 340f3039acd67ec7750e36bd327caadadaacaaf4
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Cc: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/m68k/Kconfig | 1 +
+ arch/m68k/include/asm/dma-mapping.h | 112 ++----------------------------------
+ arch/m68k/kernel/dma.c | 61 +++++++++-----------
+ 3 files changed, 32 insertions(+), 142 deletions(-)
+
+diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
+index 498b567..d5d75b3 100644
+--- a/arch/m68k/Kconfig
++++ b/arch/m68k/Kconfig
+@@ -23,6 +23,7 @@ config M68K
+ select MODULES_USE_ELF_RELA
+ select OLD_SIGSUSPEND3
+ select OLD_SIGACTION
++ select HAVE_DMA_ATTRS
+
+ config RWSEM_GENERIC_SPINLOCK
+ bool
+diff --git a/arch/m68k/include/asm/dma-mapping.h b/arch/m68k/include/asm/dma-mapping.h
+index 05aa535..2c082a6 100644
+--- a/arch/m68k/include/asm/dma-mapping.h
++++ b/arch/m68k/include/asm/dma-mapping.h
+@@ -1,123 +1,19 @@
+ #ifndef _M68K_DMA_MAPPING_H
+ #define _M68K_DMA_MAPPING_H
+
+-#include <asm/cache.h>
++extern struct dma_map_ops m68k_dma_ops;
+
+-struct scatterlist;
+-
+-static inline int dma_supported(struct device *dev, u64 mask)
+-{
+- return 1;
+-}
+-
+-static inline int dma_set_mask(struct device *dev, u64 mask)
++static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+- return 0;
++ return &m68k_dma_ops;
+ }
+
+-extern void *dma_alloc_coherent(struct device *, size_t,
+- dma_addr_t *, gfp_t);
+-extern void dma_free_coherent(struct device *, size_t,
+- void *, dma_addr_t);
++#include <asm-generic/dma-mapping-common.h>
+
+-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t flag,
+- struct dma_attrs *attrs)
+-{
+- /* attrs is not supported and ignored */
+- return dma_alloc_coherent(dev, size, dma_handle, flag);
+-}
+-
+-static inline void dma_free_attrs(struct device *dev, size_t size,
+- void *cpu_addr, dma_addr_t dma_handle,
+- struct dma_attrs *attrs)
+-{
+- /* attrs is not supported and ignored */
+- dma_free_coherent(dev, size, cpu_addr, dma_handle);
+-}
+-
+-static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
+- dma_addr_t *handle, gfp_t flag)
+-{
+- return dma_alloc_coherent(dev, size, handle, flag);
+-}
+-static inline void dma_free_noncoherent(struct device *dev, size_t size,
+- void *addr, dma_addr_t handle)
+-{
+- dma_free_coherent(dev, size, addr, handle);
+-}
+ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir)
+ {
+ /* we use coherent allocation, so not much to do here. */
+ }
+
+-extern dma_addr_t dma_map_single(struct device *, void *, size_t,
+- enum dma_data_direction);
+-static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
+- size_t size, enum dma_data_direction dir)
+-{
+-}
+-
+-extern dma_addr_t dma_map_page(struct device *, struct page *,
+- unsigned long, size_t size,
+- enum dma_data_direction);
+-static inline void dma_unmap_page(struct device *dev, dma_addr_t address,
+- size_t size, enum dma_data_direction dir)
+-{
+-}
+-
+-extern int dma_map_sg(struct device *, struct scatterlist *, int,
+- enum dma_data_direction);
+-static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+- int nhwentries, enum dma_data_direction dir)
+-{
+-}
+-
+-extern void dma_sync_single_for_device(struct device *, dma_addr_t, size_t,
+- enum dma_data_direction);
+-extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
+- enum dma_data_direction);
+-
+-static inline void dma_sync_single_range_for_device(struct device *dev,
+- dma_addr_t dma_handle, unsigned long offset, size_t size,
+- enum dma_data_direction direction)
+-{
+- /* just sync everything for now */
+- dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
+-}
+-
+-static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
+- size_t size, enum dma_data_direction dir)
+-{
+-}
+-
+-static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+- int nents, enum dma_data_direction dir)
+-{
+-}
+-
+-static inline void dma_sync_single_range_for_cpu(struct device *dev,
+- dma_addr_t dma_handle, unsigned long offset, size_t size,
+- enum dma_data_direction direction)
+-{
+- /* just sync everything for now */
+- dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
+-}
+-
+-static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
+-{
+- return 0;
+-}
+-
+-/* drivers/base/dma-mapping.c */
+-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t dma_addr, size_t size);
+-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+- void *cpu_addr, dma_addr_t dma_addr,
+- size_t size);
+-
+-#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+-
+ #endif /* _M68K_DMA_MAPPING_H */
+diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
+index 564665f..cbc78b4 100644
+--- a/arch/m68k/kernel/dma.c
++++ b/arch/m68k/kernel/dma.c
+@@ -18,8 +18,8 @@
+
+ #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
+
+-void *dma_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *handle, gfp_t flag)
++static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
++ gfp_t flag, struct dma_attrs *attrs)
+ {
+ struct page *page, **map;
+ pgprot_t pgprot;
+@@ -61,8 +61,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
+ return addr;
+ }
+
+-void dma_free_coherent(struct device *dev, size_t size,
+- void *addr, dma_addr_t handle)
++static void m68k_dma_free(struct device *dev, size_t size, void *addr,
++ dma_addr_t handle, struct dma_attrs *attrs)
+ {
+ pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
+ vfree(addr);
+@@ -72,8 +72,8 @@ void dma_free_coherent(struct device *dev, size_t size,
+
+ #include <asm/cacheflush.h>
+
+-void *dma_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t gfp)
++static void *m68k_dma_alloc(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ {
+ void *ret;
+ /* ignore region specifiers */
+@@ -90,19 +90,16 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
+ return ret;
+ }
+
+-void dma_free_coherent(struct device *dev, size_t size,
+- void *vaddr, dma_addr_t dma_handle)
++static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
++ dma_addr_t dma_handle, struct dma_attrs *attrs)
+ {
+ free_pages((unsigned long)vaddr, get_order(size));
+ }
+
+ #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
+
+-EXPORT_SYMBOL(dma_alloc_coherent);
+-EXPORT_SYMBOL(dma_free_coherent);
+-
+-void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
+- size_t size, enum dma_data_direction dir)
++static void m68k_dma_sync_single_for_device(struct device *dev,
++ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+ {
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+@@ -118,10 +115,9 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
+ break;
+ }
+ }
+-EXPORT_SYMBOL(dma_sync_single_for_device);
+
+-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
+- int nents, enum dma_data_direction dir)
++static void m68k_dma_sync_sg_for_device(struct device *dev,
++ struct scatterlist *sglist, int nents, enum dma_data_direction dir)
+ {
+ int i;
+ struct scatterlist *sg;
+@@ -131,31 +127,19 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
+ dir);
+ }
+ }
+-EXPORT_SYMBOL(dma_sync_sg_for_device);
+-
+-dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
+- enum dma_data_direction dir)
+-{
+- dma_addr_t handle = virt_to_bus(addr);
+-
+- dma_sync_single_for_device(dev, handle, size, dir);
+- return handle;
+-}
+-EXPORT_SYMBOL(dma_map_single);
+
+-dma_addr_t dma_map_page(struct device *dev, struct page *page,
+- unsigned long offset, size_t size,
+- enum dma_data_direction dir)
++static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
+ {
+ dma_addr_t handle = page_to_phys(page) + offset;
+
+ dma_sync_single_for_device(dev, handle, size, dir);
+ return handle;
+ }
+-EXPORT_SYMBOL(dma_map_page);
+
+-int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
+- enum dma_data_direction dir)
++static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
++ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+ {
+ int i;
+ struct scatterlist *sg;
+@@ -167,4 +151,13 @@ int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
+ }
+ return nents;
+ }
+-EXPORT_SYMBOL(dma_map_sg);
++
++struct dma_map_ops m68k_dma_ops = {
++ .alloc = m68k_dma_alloc,
++ .free = m68k_dma_free,
++ .map_page = m68k_dma_map_page,
++ .map_sg = m68k_dma_map_sg,
++ .sync_single_for_device = m68k_dma_sync_single_for_device,
++ .sync_sg_for_device = m68k_dma_sync_sg_for_device,
++};
++EXPORT_SYMBOL(m68k_dma_ops);
+--
+2.10.0
+
diff --git a/patches.arch/0012-metag-convert-to-dma_map_ops.patch b/patches.arch/0012-metag-convert-to-dma_map_ops.patch
new file mode 100644
index 0000000000..b0f830f921
--- /dev/null
+++ b/patches.arch/0012-metag-convert-to-dma_map_ops.patch
@@ -0,0 +1,443 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 20 Jan 2016 15:01:56 -0800
+Subject: metag: convert to dma_map_ops
+Git-commit: 5348c1e9e0dc2b62a484c4b74a8d1d59aa9620a4
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Cc: James Hogan <james.hogan@imgtec.com>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/metag/Kconfig | 1 +
+ arch/metag/include/asm/dma-mapping.h | 179 +----------------------------------
+ arch/metag/kernel/dma.c | 146 +++++++++++++++++++++-------
+ 3 files changed, 117 insertions(+), 209 deletions(-)
+
+diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig
+index a0fa88d..ad8604c 100644
+--- a/arch/metag/Kconfig
++++ b/arch/metag/Kconfig
+@@ -29,6 +29,7 @@ config METAG
+ select OF
+ select OF_EARLY_FLATTREE
+ select SPARSE_IRQ
++ select HAVE_DMA_ATTRS
+
+ config STACKTRACE_SUPPORT
+ def_bool y
+diff --git a/arch/metag/include/asm/dma-mapping.h b/arch/metag/include/asm/dma-mapping.h
+index eb5cdec..768f2e3 100644
+--- a/arch/metag/include/asm/dma-mapping.h
++++ b/arch/metag/include/asm/dma-mapping.h
+@@ -1,178 +1,14 @@
+ #ifndef _ASM_METAG_DMA_MAPPING_H
+ #define _ASM_METAG_DMA_MAPPING_H
+
+-#include <linux/mm.h>
++extern struct dma_map_ops metag_dma_ops;
+
+-#include <asm/cache.h>
+-#include <asm/io.h>
+-#include <linux/scatterlist.h>
+-#include <asm/bug.h>
+-
+-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+-
+-void *dma_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t flag);
+-
+-void dma_free_coherent(struct device *dev, size_t size,
+- void *vaddr, dma_addr_t dma_handle);
+-
+-void dma_sync_for_device(void *vaddr, size_t size, int dma_direction);
+-void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction);
+-
+-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t dma_addr, size_t size);
+-
+-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t dma_addr, size_t size);
+-
+-static inline dma_addr_t
+-dma_map_single(struct device *dev, void *ptr, size_t size,
+- enum dma_data_direction direction)
+-{
+- BUG_ON(!valid_dma_direction(direction));
+- WARN_ON(size == 0);
+- dma_sync_for_device(ptr, size, direction);
+- return virt_to_phys(ptr);
+-}
+-
+-static inline void
+-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+- enum dma_data_direction direction)
+-{
+- BUG_ON(!valid_dma_direction(direction));
+- dma_sync_for_cpu(phys_to_virt(dma_addr), size, direction);
+-}
+-
+-static inline int
+-dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
+- enum dma_data_direction direction)
+-{
+- struct scatterlist *sg;
+- int i;
+-
+- BUG_ON(!valid_dma_direction(direction));
+- WARN_ON(nents == 0 || sglist[0].length == 0);
+-
+- for_each_sg(sglist, sg, nents, i) {
+- BUG_ON(!sg_page(sg));
+-
+- sg->dma_address = sg_phys(sg);
+- dma_sync_for_device(sg_virt(sg), sg->length, direction);
+- }
+-
+- return nents;
+-}
+-
+-static inline dma_addr_t
+-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+- size_t size, enum dma_data_direction direction)
+-{
+- BUG_ON(!valid_dma_direction(direction));
+- dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
+- direction);
+- return page_to_phys(page) + offset;
+-}
+-
+-static inline void
+-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+- enum dma_data_direction direction)
++static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+ {
+- BUG_ON(!valid_dma_direction(direction));
+- dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
++ return &metag_dma_ops;
+ }
+
+-
+-static inline void
+-dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nhwentries,
+- enum dma_data_direction direction)
+-{
+- struct scatterlist *sg;
+- int i;
+-
+- BUG_ON(!valid_dma_direction(direction));
+- WARN_ON(nhwentries == 0 || sglist[0].length == 0);
+-
+- for_each_sg(sglist, sg, nhwentries, i) {
+- BUG_ON(!sg_page(sg));
+-
+- sg->dma_address = sg_phys(sg);
+- dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+- }
+-}
+-
+-static inline void
+-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+- enum dma_data_direction direction)
+-{
+- dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
+-}
+-
+-static inline void
+-dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+- size_t size, enum dma_data_direction direction)
+-{
+- dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
+-}
+-
+-static inline void
+-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction)
+-{
+- dma_sync_for_cpu(phys_to_virt(dma_handle)+offset, size,
+- direction);
+-}
+-
+-static inline void
+-dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction)
+-{
+- dma_sync_for_device(phys_to_virt(dma_handle)+offset, size,
+- direction);
+-}
+-
+-static inline void
+-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems,
+- enum dma_data_direction direction)
+-{
+- int i;
+- struct scatterlist *sg;
+-
+- for_each_sg(sglist, sg, nelems, i)
+- dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
+-}
+-
+-static inline void
+-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
+- int nelems, enum dma_data_direction direction)
+-{
+- int i;
+- struct scatterlist *sg;
+-
+- for_each_sg(sglist, sg, nelems, i)
+- dma_sync_for_device(sg_virt(sg), sg->length, direction);
+-}
+-
+-static inline int
+-dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+-{
+- return 0;
+-}
+-
+-#define dma_supported(dev, mask) (1)
+-
+-static inline int
+-dma_set_mask(struct device *dev, u64 mask)
+-{
+- if (!dev->dma_mask || !dma_supported(dev, mask))
+- return -EIO;
+-
+- *dev->dma_mask = mask;
+-
+- return 0;
+-}
++#include <asm-generic/dma-mapping-common.h>
+
+ /*
+ * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to
+@@ -184,11 +20,4 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ {
+ }
+
+-/* drivers/base/dma-mapping.c */
+-extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+- void *cpu_addr, dma_addr_t dma_addr,
+- size_t size);
+-
+-#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+-
+ #endif
+diff --git a/arch/metag/kernel/dma.c b/arch/metag/kernel/dma.c
+index c700d62..e12368d 100644
+--- a/arch/metag/kernel/dma.c
++++ b/arch/metag/kernel/dma.c
+@@ -171,8 +171,8 @@ static struct metag_vm_region *metag_vm_region_find(struct metag_vm_region
+ * Allocate DMA-coherent memory space and return both the kernel remapped
+ * virtual and bus address for that space.
+ */
+-void *dma_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *handle, gfp_t gfp)
++static void *metag_dma_alloc(struct device *dev, size_t size,
++ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+ {
+ struct page *page;
+ struct metag_vm_region *c;
+@@ -263,13 +263,12 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
+ no_page:
+ return NULL;
+ }
+-EXPORT_SYMBOL(dma_alloc_coherent);
+
+ /*
+ * free a page as defined by the above mapping.
+ */
+-void dma_free_coherent(struct device *dev, size_t size,
+- void *vaddr, dma_addr_t dma_handle)
++static void metag_dma_free(struct device *dev, size_t size, void *vaddr,
++ dma_addr_t dma_handle, struct dma_attrs *attrs)
+ {
+ struct metag_vm_region *c;
+ unsigned long flags, addr;
+@@ -329,16 +328,19 @@ void dma_free_coherent(struct device *dev, size_t size,
+ __func__, vaddr);
+ dump_stack();
+ }
+-EXPORT_SYMBOL(dma_free_coherent);
+
+-
+-static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t dma_addr, size_t size)
++static int metag_dma_mmap(struct device *dev, struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size,
++ struct dma_attrs *attrs)
+ {
+- int ret = -ENXIO;
+-
+ unsigned long flags, user_size, kern_size;
+ struct metag_vm_region *c;
++ int ret = -ENXIO;
++
++ if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++ else
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+
+@@ -364,25 +366,6 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ return ret;
+ }
+
+-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t dma_addr, size_t size)
+-{
+- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+- return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
+-}
+-EXPORT_SYMBOL(dma_mmap_coherent);
+-
+-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t dma_addr, size_t size)
+-{
+- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+- return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
+-}
+-EXPORT_SYMBOL(dma_mmap_writecombine);
+-
+-
+-
+-
+ /*
+ * Initialise the consistent memory allocation.
+ */
+@@ -423,7 +406,7 @@ early_initcall(dma_alloc_init);
+ /*
+ * make an area consistent to devices.
+ */
+-void dma_sync_for_device(void *vaddr, size_t size, int dma_direction)
++static void dma_sync_for_device(void *vaddr, size_t size, int dma_direction)
+ {
+ /*
+ * Ensure any writes get through the write combiner. This is necessary
+@@ -465,12 +448,11 @@ void dma_sync_for_device(void *vaddr, size_t size, int dma_direction)
+
+ wmb();
+ }
+-EXPORT_SYMBOL(dma_sync_for_device);
+
+ /*
+ * make an area consistent to the core.
+ */
+-void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction)
++static void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction)
+ {
+ /*
+ * Hardware L2 cache prefetch doesn't occur across 4K physical
+@@ -497,4 +479,100 @@ void dma_sync_for_cpu(void *vaddr, size_t size, int dma_direction)
+
+ rmb();
+ }
+-EXPORT_SYMBOL(dma_sync_for_cpu);
++
++static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction, struct dma_attrs *attrs)
++{
++ dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
++ direction);
++ return page_to_phys(page) + offset;
++}
++
++static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
++ size_t size, enum dma_data_direction direction,
++ struct dma_attrs *attrs)
++{
++ dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
++}
++
++static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist,
++ int nents, enum dma_data_direction direction,
++ struct dma_attrs *attrs)
++{
++ struct scatterlist *sg;
++ int i;
++
++ for_each_sg(sglist, sg, nents, i) {
++ BUG_ON(!sg_page(sg));
++
++ sg->dma_address = sg_phys(sg);
++ dma_sync_for_device(sg_virt(sg), sg->length, direction);
++ }
++
++ return nents;
++}
++
++
++static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
++ int nhwentries, enum dma_data_direction direction,
++ struct dma_attrs *attrs)
++{
++ struct scatterlist *sg;
++ int i;
++
++ for_each_sg(sglist, sg, nhwentries, i) {
++ BUG_ON(!sg_page(sg));
++
++ sg->dma_address = sg_phys(sg);
++ dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
++ }
++}
++
++static void metag_dma_sync_single_for_cpu(struct device *dev,
++ dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction)
++{
++ dma_sync_for_cpu(phys_to_virt(dma_handle), size, direction);
++}
++
++static void metag_dma_sync_single_for_device(struct device *dev,
++ dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction)
++{
++ dma_sync_for_device(phys_to_virt(dma_handle), size, direction);
++}
++
++static void metag_dma_sync_sg_for_cpu(struct device *dev,
++ struct scatterlist *sglist, int nelems,
++ enum dma_data_direction direction)
++{
++ int i;
++ struct scatterlist *sg;
++
++ for_each_sg(sglist, sg, nelems, i)
++ dma_sync_for_cpu(sg_virt(sg), sg->length, direction);
++}
++
++static void metag_dma_sync_sg_for_device(struct device *dev,
++ struct scatterlist *sglist, int nelems,
++ enum dma_data_direction direction)
++{
++ int i;
++ struct scatterlist *sg;
++
++ for_each_sg(sglist, sg, nelems, i)
++ dma_sync_for_device(sg_virt(sg), sg->length, direction);
++}
++
++struct dma_map_ops metag_dma_ops = {
++ .alloc = metag_dma_alloc,
++ .free = metag_dma_free,
++ .map_page = metag_dma_map_page,
++ .map_sg = metag_dma_map_sg,
++ .sync_single_for_device = metag_dma_sync_single_for_device,
++ .sync_single_for_cpu = metag_dma_sync_single_for_cpu,
++ .sync_sg_for_cpu = metag_dma_sync_sg_for_cpu,
++ .mmap = metag_dma_mmap,
++};
++EXPORT_SYMBOL(metag_dma_ops);
+--
+2.10.0
+
diff --git a/patches.arch/0013-sparc-use-generic-dma_set_mask.patch b/patches.arch/0013-sparc-use-generic-dma_set_mask.patch
new file mode 100644
index 0000000000..d743b67b36
--- /dev/null
+++ b/patches.arch/0013-sparc-use-generic-dma_set_mask.patch
@@ -0,0 +1,53 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 20 Jan 2016 15:01:59 -0800
+Subject: sparc: use generic dma_set_mask
+Git-commit: 30081d8ea47d521e8804398b25f59b8e49a2ed0b
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+Sparc already uses the same code as the generic code for the PCI
+implementation but just fails the call sbus. This moves to the generic
+implemenation which eventually return -EIO due to the NULL dma_mask
+pointer in the device.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/sparc/include/asm/dma-mapping.h | 15 ---------------
+ 1 file changed, 15 deletions(-)
+
+diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
+index a21da59..2777092 100644
+--- a/arch/sparc/include/asm/dma-mapping.h
++++ b/arch/sparc/include/asm/dma-mapping.h
+@@ -37,21 +37,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+ return dma_ops;
+ }
+
+-#define HAVE_ARCH_DMA_SET_MASK 1
+-
+-static inline int dma_set_mask(struct device *dev, u64 mask)
+-{
+-#ifdef CONFIG_PCI
+- if (dev->bus == &pci_bus_type) {
+- if (!dev->dma_mask || !dma_supported(dev, mask))
+- return -EINVAL;
+- *dev->dma_mask = mask;
+- return 0;
+- }
+-#endif
+- return -EINVAL;
+-}
+-
+ #include <asm-generic/dma-mapping-common.h>
+
+ #endif
+--
+2.10.0
+
diff --git a/patches.arch/0014-tile-uninline-dma_set_mask.patch b/patches.arch/0014-tile-uninline-dma_set_mask.patch
new file mode 100644
index 0000000000..15ececdf30
--- /dev/null
+++ b/patches.arch/0014-tile-uninline-dma_set_mask.patch
@@ -0,0 +1,109 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 20 Jan 2016 15:02:02 -0800
+Subject: tile: uninline dma_set_mask
+Git-commit: bd38118f9c57b22f57f9c2fccca4a82aef15cc5f
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+We'll soon merge <asm-generic/dma-mapping-common.h> into
+<linux/dma-mapping.h> and the reference to dma_capable in the tile
+dma_set_mask would create a circular dependency.
+
+Fix this by moving the implementation out of line.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Cc: Chris Metcalf <cmetcalf@ezchip.com>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/tile/include/asm/dma-mapping.h | 29 +----------------------------
+ arch/tile/kernel/pci-dma.c | 29 +++++++++++++++++++++++++++++
+ 2 files changed, 30 insertions(+), 28 deletions(-)
+
+diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h
+index 96ac6cc..c342736 100644
+--- a/arch/tile/include/asm/dma-mapping.h
++++ b/arch/tile/include/asm/dma-mapping.h
+@@ -76,34 +76,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+
+ #include <asm-generic/dma-mapping-common.h>
+
+-static inline int
+-dma_set_mask(struct device *dev, u64 mask)
+-{
+- struct dma_map_ops *dma_ops = get_dma_ops(dev);
+-
+- /*
+- * For PCI devices with 64-bit DMA addressing capability, promote
+- * the dma_ops to hybrid, with the consistent memory DMA space limited
+- * to 32-bit. For 32-bit capable devices, limit the streaming DMA
+- * address range to max_direct_dma_addr.
+- */
+- if (dma_ops == gx_pci_dma_map_ops ||
+- dma_ops == gx_hybrid_pci_dma_map_ops ||
+- dma_ops == gx_legacy_pci_dma_map_ops) {
+- if (mask == DMA_BIT_MASK(64) &&
+- dma_ops == gx_legacy_pci_dma_map_ops)
+- set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
+- else if (mask > dev->archdata.max_direct_dma_addr)
+- mask = dev->archdata.max_direct_dma_addr;
+- }
+-
+- if (!dev->dma_mask || !dma_supported(dev, mask))
+- return -EIO;
+-
+- *dev->dma_mask = mask;
+-
+- return 0;
+-}
++int dma_set_mask(struct device *dev, u64 mask);
+
+ /*
+ * dma_alloc_noncoherent() is #defined to return coherent memory,
+diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
+index 09b5870..b6bc054 100644
+--- a/arch/tile/kernel/pci-dma.c
++++ b/arch/tile/kernel/pci-dma.c
+@@ -583,6 +583,35 @@ struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
+ EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
+ EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops);
+
++int dma_set_mask(struct device *dev, u64 mask)
++{
++ struct dma_map_ops *dma_ops = get_dma_ops(dev);
++
++ /*
++ * For PCI devices with 64-bit DMA addressing capability, promote
++ * the dma_ops to hybrid, with the consistent memory DMA space limited
++ * to 32-bit. For 32-bit capable devices, limit the streaming DMA
++ * address range to max_direct_dma_addr.
++ */
++ if (dma_ops == gx_pci_dma_map_ops ||
++ dma_ops == gx_hybrid_pci_dma_map_ops ||
++ dma_ops == gx_legacy_pci_dma_map_ops) {
++ if (mask == DMA_BIT_MASK(64) &&
++ dma_ops == gx_legacy_pci_dma_map_ops)
++ set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
++ else if (mask > dev->archdata.max_direct_dma_addr)
++ mask = dev->archdata.max_direct_dma_addr;
++ }
++
++ if (!dev->dma_mask || !dma_supported(dev, mask))
++ return -EIO;
++
++ *dev->dma_mask = mask;
++
++ return 0;
++}
++EXPORT_SYMBOL(dma_set_mask);
++
+ #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
+ int dma_set_coherent_mask(struct device *dev, u64 mask)
+ {
+--
+2.10.0
+
diff --git a/patches.arch/0015-dma-mapping-always-provide-the-dma_map_ops-based-imp.patch b/patches.arch/0015-dma-mapping-always-provide-the-dma_map_ops-based-imp.patch
new file mode 100644
index 0000000000..4e37264e4e
--- /dev/null
+++ b/patches.arch/0015-dma-mapping-always-provide-the-dma_map_ops-based-imp.patch
@@ -0,0 +1,1739 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 20 Jan 2016 15:02:05 -0800
+Subject: dma-mapping: always provide the dma_map_ops based implementation
+Git-commit: e1c7e324539ada3b2b13ca2898bcb4948a9ef9db
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+Move the generic implementation to <linux/dma-mapping.h> now that all
+architectures support it and remove the HAVE_DMA_ATTR Kconfig symbol now
+that everyone supports them.
+
+[valentinrothberg@gmail.com: remove leftovers in Kconfig]
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Aurelien Jacquiot <a-jacquiot@ti.com>
+Cc: Chris Metcalf <cmetcalf@ezchip.com>
+Cc: David Howells <dhowells@redhat.com>
+Cc: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: Haavard Skinnemoen <hskinnemoen@gmail.com>
+Cc: Hans-Christian Egtvedt <egtvedt@samfundet.no>
+Cc: Helge Deller <deller@gmx.de>
+Cc: James Hogan <james.hogan@imgtec.com>
+Cc: Jesper Nilsson <jesper.nilsson@axis.com>
+Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
+Cc: Ley Foon Tan <lftan@altera.com>
+Cc: Mark Salter <msalter@redhat.com>
+Cc: Mikael Starvik <starvik@axis.com>
+Cc: Steven Miao <realmz6@gmail.com>
+Cc: Vineet Gupta <vgupta@synopsys.com>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Valentin Rothberg <valentinrothberg@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ Documentation/DMA-API-HOWTO.txt | 10
+ Documentation/features/io/dma_map_attrs/arch-support.txt | 40 -
+ arch/Kconfig | 3
+ arch/alpha/Kconfig | 1
+ arch/alpha/include/asm/dma-mapping.h | 2
+ arch/arc/Kconfig | 1
+ arch/arc/include/asm/dma-mapping.h | 2
+ arch/arm/Kconfig | 1
+ arch/arm/include/asm/dma-mapping.h | 7
+ arch/arm64/Kconfig | 1
+ arch/arm64/include/asm/dma-mapping.h | 2
+ arch/avr32/Kconfig | 1
+ arch/avr32/include/asm/dma-mapping.h | 2
+ arch/blackfin/Kconfig | 1
+ arch/blackfin/include/asm/dma-mapping.h | 2
+ arch/c6x/Kconfig | 1
+ arch/c6x/include/asm/dma-mapping.h | 2
+ arch/cris/Kconfig | 1
+ arch/cris/include/asm/dma-mapping.h | 2
+ arch/frv/Kconfig | 1
+ arch/frv/include/asm/dma-mapping.h | 2
+ arch/h8300/Kconfig | 1
+ arch/h8300/include/asm/dma-mapping.h | 2
+ arch/hexagon/Kconfig | 1
+ arch/hexagon/include/asm/dma-mapping.h | 2
+ arch/ia64/Kconfig | 1
+ arch/ia64/include/asm/dma-mapping.h | 2
+ arch/m68k/Kconfig | 1
+ arch/m68k/include/asm/dma-mapping.h | 2
+ arch/metag/Kconfig | 1
+ arch/metag/include/asm/dma-mapping.h | 2
+ arch/microblaze/Kconfig | 1
+ arch/microblaze/include/asm/dma-mapping.h | 2
+ arch/mips/Kconfig | 1
+ arch/mips/include/asm/dma-mapping.h | 2
+ arch/mn10300/Kconfig | 1
+ arch/mn10300/include/asm/dma-mapping.h | 2
+ arch/nios2/Kconfig | 1
+ arch/openrisc/Kconfig | 3
+ arch/openrisc/include/asm/dma-mapping.h | 2
+ arch/parisc/Kconfig | 1
+ arch/parisc/include/asm/dma-mapping.h | 2
+ arch/powerpc/Kconfig | 1
+ arch/powerpc/include/asm/dma-mapping.h | 2
+ arch/s390/Kconfig | 1
+ arch/s390/include/asm/dma-mapping.h | 2
+ arch/sh/Kconfig | 1
+ arch/sh/include/asm/dma-mapping.h | 2
+ arch/sparc/Kconfig | 1
+ arch/sparc/include/asm/dma-mapping.h | 2
+ arch/tile/Kconfig | 1
+ arch/tile/include/asm/dma-mapping.h | 3
+ arch/unicore32/Kconfig | 1
+ arch/unicore32/include/asm/dma-mapping.h | 2
+ arch/x86/Kconfig | 1
+ arch/x86/include/asm/dma-mapping.h | 2
+ arch/xtensa/Kconfig | 1
+ arch/xtensa/include/asm/dma-mapping.h | 2
+ drivers/gpu/drm/Kconfig | 4
+ drivers/gpu/drm/imx/Kconfig | 2
+ drivers/gpu/drm/rcar-du/Kconfig | 2
+ drivers/gpu/drm/shmobile/Kconfig | 2
+ drivers/gpu/drm/sti/Kconfig | 2
+ drivers/gpu/drm/tilcdc/Kconfig | 2
+ drivers/gpu/drm/vc4/Kconfig | 2
+ drivers/media/platform/Kconfig | 1
+ include/asm-generic/dma-mapping-broken.h | 95 ---
+ include/asm-generic/dma-mapping-common.h | 358 --------------
+ include/linux/dma-attrs.h | 10
+ include/linux/dma-mapping.h | 379 ++++++++++++++-
+ 70 files changed, 369 insertions(+), 633 deletions(-)
+ delete mode 100644 Documentation/features/io/dma_map_attrs/arch-support.txt
+ delete mode 100644 include/asm-generic/dma-mapping-broken.h
+ delete mode 100644 include/asm-generic/dma-mapping-common.h
+
+--- a/Documentation/DMA-API-HOWTO.txt
++++ b/Documentation/DMA-API-HOWTO.txt
+@@ -951,16 +951,6 @@ to "Closing".
+ alignment constraints (e.g. the alignment constraints about 64-bit
+ objects).
+
+-3) Supporting multiple types of IOMMUs
+-
+- If your architecture needs to support multiple types of IOMMUs, you
+- can use include/linux/asm-generic/dma-mapping-common.h. It's a
+- library to support the DMA API with multiple types of IOMMUs. Lots
+- of architectures (x86, powerpc, sh, alpha, ia64, microblaze and
+- sparc) use it. Choose one to see how it can be used. If you need to
+- support multiple types of IOMMUs in a single system, the example of
+- x86 or powerpc helps.
+-
+ Closing
+
+ This document, and the API itself, would not be in its current
+--- a/Documentation/features/io/dma_map_attrs/arch-support.txt
++++ /dev/null
+@@ -1,40 +0,0 @@
+-#
+-# Feature name: dma_map_attrs
+-# Kconfig: HAVE_DMA_ATTRS
+-# description: arch provides dma_*map*_attrs() APIs
+-#
+- -----------------------
+- | arch |status|
+- -----------------------
+- | alpha: | ok |
+- | arc: | TODO |
+- | arm: | ok |
+- | arm64: | ok |
+- | avr32: | TODO |
+- | blackfin: | TODO |
+- | c6x: | TODO |
+- | cris: | TODO |
+- | frv: | TODO |
+- | h8300: | ok |
+- | hexagon: | ok |
+- | ia64: | ok |
+- | m32r: | TODO |
+- | m68k: | TODO |
+- | metag: | TODO |
+- | microblaze: | ok |
+- | mips: | ok |
+- | mn10300: | TODO |
+- | nios2: | TODO |
+- | openrisc: | ok |
+- | parisc: | TODO |
+- | powerpc: | ok |
+- | s390: | ok |
+- | score: | TODO |
+- | sh: | ok |
+- | sparc: | ok |
+- | tile: | ok |
+- | um: | TODO |
+- | unicore32: | ok |
+- | x86: | ok |
+- | xtensa: | TODO |
+- -----------------------
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -205,9 +205,6 @@ config HAVE_NMI_WATCHDOG
+ config HAVE_ARCH_TRACEHOOK
+ bool
+
+-config HAVE_DMA_ATTRS
+- bool
+-
+ config HAVE_DMA_CONTIGUOUS
+ bool
+
+--- a/arch/alpha/Kconfig
++++ b/arch/alpha/Kconfig
+@@ -9,7 +9,6 @@ config ALPHA
+ select HAVE_OPROFILE
+ select HAVE_PCSPKR_PLATFORM
+ select HAVE_PERF_EVENTS
+- select HAVE_DMA_ATTRS
+ select VIRT_TO_BUS
+ select GENERIC_IRQ_PROBE
+ select AUTO_IRQ_AFFINITY if SMP
+--- a/arch/alpha/include/asm/dma-mapping.h
++++ b/arch/alpha/include/asm/dma-mapping.h
+@@ -10,8 +10,6 @@ static inline struct dma_map_ops *get_dm
+ return dma_ops;
+ }
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ #define dma_cache_sync(dev, va, size, dir) ((void)0)
+
+ #endif /* _ALPHA_DMA_MAPPING_H */
+--- a/arch/arc/Kconfig
++++ b/arch/arc/Kconfig
+@@ -38,7 +38,6 @@ config ARC
+ select OF_EARLY_FLATTREE
+ select PERF_USE_VMALLOC
+ select HAVE_DEBUG_STACKOVERFLOW
+- select HAVE_DMA_ATTRS
+
+ config TRACE_IRQFLAGS_SUPPORT
+ def_bool y
+--- a/arch/arc/include/asm/dma-mapping.h
++++ b/arch/arc/include/asm/dma-mapping.h
+@@ -18,6 +18,4 @@ static inline struct dma_map_ops *get_dm
+ return &arc_dma_ops;
+ }
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ #endif
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -44,7 +44,6 @@ config ARM
+ select HAVE_C_RECORDMCOUNT
+ select HAVE_DEBUG_KMEMLEAK
+ select HAVE_DMA_API_DEBUG
+- select HAVE_DMA_ATTRS
+ select HAVE_DMA_CONTIGUOUS if MMU
+ select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL) && !CPU_ENDIAN_BE32
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU
+--- a/arch/arm/include/asm/dma-mapping.h
++++ b/arch/arm/include/asm/dma-mapping.h
+@@ -41,13 +41,6 @@ static inline void set_dma_ops(struct de
+ #define HAVE_ARCH_DMA_SUPPORTED 1
+ extern int dma_supported(struct device *dev, u64 mask);
+
+-/*
+- * Note that while the generic code provides dummy dma_{alloc,free}_noncoherent
+- * implementations, we don't provide a dma_cache_sync function so drivers using
+- * this API are highlighted with build warnings.
+- */
+-#include <asm-generic/dma-mapping-common.h>
+-
+ #ifdef __arch_page_to_dma
+ #error Please update to __arch_pfn_to_dma
+ #endif
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -63,7 +63,6 @@ config ARM64
+ select HAVE_DEBUG_BUGVERBOSE
+ select HAVE_DEBUG_KMEMLEAK
+ select HAVE_DMA_API_DEBUG
+- select HAVE_DMA_ATTRS
+ select HAVE_DMA_CONTIGUOUS
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS
+--- a/arch/arm64/include/asm/dma-mapping.h
++++ b/arch/arm64/include/asm/dma-mapping.h
+@@ -64,8 +64,6 @@ static inline bool is_device_dma_coheren
+ return dev->archdata.dma_coherent;
+ }
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+ {
+ dma_addr_t dev_addr = (dma_addr_t)paddr;
+--- a/arch/avr32/Kconfig
++++ b/arch/avr32/Kconfig
+@@ -8,7 +8,6 @@ config AVR32
+ select HAVE_OPROFILE
+ select HAVE_KPROBES
+ select VIRT_TO_BUS
+- select HAVE_DMA_ATTRS
+ select GENERIC_IRQ_PROBE
+ select GENERIC_ATOMIC64
+ select HARDIRQS_SW_RESEND
+--- a/arch/avr32/include/asm/dma-mapping.h
++++ b/arch/avr32/include/asm/dma-mapping.h
+@@ -11,6 +11,4 @@ static inline struct dma_map_ops *get_dm
+ return &avr32_dma_ops;
+ }
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ #endif /* __ASM_AVR32_DMA_MAPPING_H */
+--- a/arch/blackfin/Kconfig
++++ b/arch/blackfin/Kconfig
+@@ -14,7 +14,6 @@ config BLACKFIN
+ def_bool y
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_TRACEHOOK
+- select HAVE_DMA_ATTRS
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FUNCTION_GRAPH_TRACER
+--- a/arch/blackfin/include/asm/dma-mapping.h
++++ b/arch/blackfin/include/asm/dma-mapping.h
+@@ -43,6 +43,4 @@ static inline struct dma_map_ops *get_dm
+ return &bfin_dma_ops;
+ }
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ #endif /* _BLACKFIN_DMA_MAPPING_H */
+--- a/arch/c6x/Kconfig
++++ b/arch/c6x/Kconfig
+@@ -18,7 +18,6 @@ config C6X
+ select GENERIC_CLOCKEVENTS
+ select MODULES_USE_ELF_RELA
+ select ARCH_NO_COHERENT_DMA_MMAP
+- select HAVE_DMA_ATTRS
+
+ config MMU
+ def_bool n
+--- a/arch/c6x/include/asm/dma-mapping.h
++++ b/arch/c6x/include/asm/dma-mapping.h
+@@ -24,8 +24,6 @@ static inline struct dma_map_ops *get_dm
+ return &c6x_dma_ops;
+ }
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ extern void coherent_mem_init(u32 start, u32 size);
+ void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ gfp_t gfp, struct dma_attrs *attrs);
+--- a/arch/cris/Kconfig
++++ b/arch/cris/Kconfig
+@@ -54,7 +54,6 @@ config CRIS
+ select GENERIC_ATOMIC64
+ select HAVE_UID16
+ select VIRT_TO_BUS
+- select HAVE_DMA_ATTRS
+ select ARCH_WANT_IPC_PARSE_VERSION
+ select GENERIC_IRQ_SHOW
+ select GENERIC_IOMAP
+--- a/arch/cris/include/asm/dma-mapping.h
++++ b/arch/cris/include/asm/dma-mapping.h
+@@ -16,8 +16,6 @@ static inline struct dma_map_ops *get_dm
+ }
+ #endif
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ static inline void
+ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+--- a/arch/frv/Kconfig
++++ b/arch/frv/Kconfig
+@@ -15,7 +15,6 @@ config FRV
+ select OLD_SIGACTION
+ select HAVE_DEBUG_STACKOVERFLOW
+ select ARCH_NO_COHERENT_DMA_MMAP
+- select HAVE_DMA_ATTRS
+
+ config ZONE_DMA
+ bool
+--- a/arch/frv/include/asm/dma-mapping.h
++++ b/arch/frv/include/asm/dma-mapping.h
+@@ -21,6 +21,4 @@ void dma_cache_sync(struct device *dev,
+ flush_write_buffers();
+ }
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ #endif /* _ASM_DMA_MAPPING_H */
+--- a/arch/h8300/Kconfig
++++ b/arch/h8300/Kconfig
+@@ -15,7 +15,6 @@ config H8300
+ select OF_IRQ
+ select OF_EARLY_FLATTREE
+ select HAVE_MEMBLOCK
+- select HAVE_DMA_ATTRS
+ select CLKSRC_OF
+
+ config RWSEM_GENERIC_SPINLOCK
+--- a/arch/h8300/include/asm/dma-mapping.h
++++ b/arch/h8300/include/asm/dma-mapping.h
+@@ -8,6 +8,4 @@ static inline struct dma_map_ops *get_dm
+ return &h8300_dma_map_ops;
+ }
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ #endif
+--- a/arch/hexagon/Kconfig
++++ b/arch/hexagon/Kconfig
+@@ -27,7 +27,6 @@ config HEXAGON
+ select GENERIC_CLOCKEVENTS_BROADCAST
+ select MODULES_USE_ELF_RELA
+ select GENERIC_CPU_DEVICES
+- select HAVE_DMA_ATTRS
+ ---help---
+ Qualcomm Hexagon is a processor architecture designed for high
+ performance and low power across a wide variety of applications.
+--- a/arch/hexagon/include/asm/dma-mapping.h
++++ b/arch/hexagon/include/asm/dma-mapping.h
+@@ -49,8 +49,6 @@ extern int dma_is_consistent(struct devi
+ extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction);
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+ {
+ if (!dev->dma_mask)
+--- a/arch/ia64/Kconfig
++++ b/arch/ia64/Kconfig
+@@ -26,7 +26,6 @@ config IA64
+ select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_DYNAMIC_FTRACE if (!ITANIUM)
+ select HAVE_FUNCTION_TRACER
+- select HAVE_DMA_ATTRS
+ select TTY
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_DMA_API_DEBUG
+--- a/arch/ia64/include/asm/dma-mapping.h
++++ b/arch/ia64/include/asm/dma-mapping.h
+@@ -25,8 +25,6 @@ extern void machvec_dma_sync_sg(struct d
+
+ #define get_dma_ops(dev) platform_dma_get_ops(dev)
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+ {
+ if (!dev->dma_mask)
+--- a/arch/m68k/Kconfig
++++ b/arch/m68k/Kconfig
+@@ -23,7 +23,6 @@ config M68K
+ select MODULES_USE_ELF_RELA
+ select OLD_SIGSUSPEND3
+ select OLD_SIGACTION
+- select HAVE_DMA_ATTRS
+
+ config RWSEM_GENERIC_SPINLOCK
+ bool
+--- a/arch/m68k/include/asm/dma-mapping.h
++++ b/arch/m68k/include/asm/dma-mapping.h
+@@ -8,8 +8,6 @@ static inline struct dma_map_ops *get_dm
+ return &m68k_dma_ops;
+ }
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir)
+ {
+--- a/arch/metag/Kconfig
++++ b/arch/metag/Kconfig
+@@ -30,7 +30,6 @@ config METAG
+ select OF
+ select OF_EARLY_FLATTREE
+ select SPARSE_IRQ
+- select HAVE_DMA_ATTRS
+
+ config STACKTRACE_SUPPORT
+ def_bool y
+--- a/arch/metag/include/asm/dma-mapping.h
++++ b/arch/metag/include/asm/dma-mapping.h
+@@ -8,8 +8,6 @@ static inline struct dma_map_ops *get_dm
+ return &metag_dma_ops;
+ }
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ /*
+ * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to
+ * do any flushing here.
+--- a/arch/microblaze/Kconfig
++++ b/arch/microblaze/Kconfig
+@@ -19,7 +19,6 @@ config MICROBLAZE
+ select HAVE_ARCH_KGDB
+ select HAVE_DEBUG_KMEMLEAK
+ select HAVE_DMA_API_DEBUG
+- select HAVE_DMA_ATTRS
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FUNCTION_GRAPH_TRACER
+--- a/arch/microblaze/include/asm/dma-mapping.h
++++ b/arch/microblaze/include/asm/dma-mapping.h
+@@ -44,8 +44,6 @@ static inline struct dma_map_ops *get_dm
+ return &dma_direct_ops;
+ }
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ static inline void __dma_sync(unsigned long paddr,
+ size_t size, enum dma_data_direction direction)
+ {
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -31,7 +31,6 @@ config MIPS
+ select RTC_LIB if !MACH_LOONGSON64
+ select GENERIC_ATOMIC64 if !64BIT
+ select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+- select HAVE_DMA_ATTRS
+ select HAVE_DMA_CONTIGUOUS
+ select HAVE_DMA_API_DEBUG
+ select GENERIC_IRQ_PROBE
+--- a/arch/mips/include/asm/dma-mapping.h
++++ b/arch/mips/include/asm/dma-mapping.h
+@@ -29,8 +29,6 @@ static inline bool dma_capable(struct de
+
+ static inline void dma_mark_clean(void *addr, size_t size) {}
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction);
+
+--- a/arch/mn10300/Kconfig
++++ b/arch/mn10300/Kconfig
+@@ -16,7 +16,6 @@ config MN10300
+ select OLD_SIGACTION
+ select HAVE_DEBUG_STACKOVERFLOW
+ select ARCH_NO_COHERENT_DMA_MMAP
+- select HAVE_DMA_ATTRS
+
+ config AM33_2
+ def_bool n
+--- a/arch/mn10300/include/asm/dma-mapping.h
++++ b/arch/mn10300/include/asm/dma-mapping.h
+@@ -28,6 +28,4 @@ void dma_cache_sync(void *vaddr, size_t
+ mn10300_dcache_flush_inv();
+ }
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ #endif
+--- a/arch/nios2/Kconfig
++++ b/arch/nios2/Kconfig
+@@ -16,7 +16,6 @@ config NIOS2
+ select SOC_BUS
+ select SPARSE_IRQ
+ select USB_ARCH_HAS_HCD if USB_SUPPORT
+- select HAVE_DMA_ATTRS
+
+ config GENERIC_CSUM
+ def_bool y
+--- a/arch/openrisc/Kconfig
++++ b/arch/openrisc/Kconfig
+@@ -29,9 +29,6 @@ config OPENRISC
+ config MMU
+ def_bool y
+
+-config HAVE_DMA_ATTRS
+- def_bool y
+-
+ config RWSEM_GENERIC_SPINLOCK
+ def_bool y
+
+--- a/arch/openrisc/include/asm/dma-mapping.h
++++ b/arch/openrisc/include/asm/dma-mapping.h
+@@ -42,6 +42,4 @@ static inline int dma_supported(struct d
+ return dma_mask == DMA_BIT_MASK(32);
+ }
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ #endif /* __ASM_OPENRISC_DMA_MAPPING_H */
+--- a/arch/parisc/Kconfig
++++ b/arch/parisc/Kconfig
+@@ -30,7 +30,6 @@ config PARISC
+ select HAVE_DEBUG_STACKOVERFLOW
+ select HAVE_ARCH_AUDITSYSCALL
+ select ARCH_NO_COHERENT_DMA_MMAP
+- select HAVE_DMA_ATTRS
+
+ help
+ The PA-RISC microprocessor is designed by Hewlett-Packard and used
+--- a/arch/parisc/include/asm/dma-mapping.h
++++ b/arch/parisc/include/asm/dma-mapping.h
+@@ -83,6 +83,4 @@ struct parisc_device;
+ void * sba_get_iommu(struct parisc_device *dev);
+ #endif
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ #endif
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -111,7 +111,6 @@ config PPC
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
+- select HAVE_DMA_ATTRS
+ select HAVE_DMA_API_DEBUG
+ select HAVE_OPROFILE
+ select HAVE_DEBUG_KMEMLEAK
+--- a/arch/powerpc/include/asm/dma-mapping.h
++++ b/arch/powerpc/include/asm/dma-mapping.h
+@@ -125,8 +125,6 @@ static inline void set_dma_offset(struct
+ #define HAVE_ARCH_DMA_SET_MASK 1
+ extern int dma_set_mask(struct device *dev, u64 dma_mask);
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ extern int __dma_set_mask(struct device *dev, u64 dma_mask);
+ extern u64 __dma_get_required_mask(struct device *dev);
+
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -583,7 +583,6 @@ config QDIO
+
+ menuconfig PCI
+ bool "PCI support"
+- select HAVE_DMA_ATTRS
+ select PCI_MSI
+ select IOMMU_SUPPORT
+ help
+--- a/arch/s390/include/asm/dma-mapping.h
++++ b/arch/s390/include/asm/dma-mapping.h
+@@ -23,8 +23,6 @@ static inline void dma_cache_sync(struct
+ {
+ }
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+ {
+ if (!dev->dma_mask)
+--- a/arch/sh/Kconfig
++++ b/arch/sh/Kconfig
+@@ -11,7 +11,6 @@ config SUPERH
+ select HAVE_GENERIC_DMA_COHERENT
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_DMA_API_DEBUG
+- select HAVE_DMA_ATTRS
+ select HAVE_PERF_EVENTS
+ select HAVE_DEBUG_BUGVERBOSE
+ select ARCH_HAVE_CUSTOM_GPIO_H
+--- a/arch/sh/include/asm/dma-mapping.h
++++ b/arch/sh/include/asm/dma-mapping.h
+@@ -11,8 +11,6 @@ static inline struct dma_map_ops *get_dm
+
+ #define DMA_ERROR_CODE 0
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir);
+
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -27,7 +27,6 @@ config SPARC
+ select RTC_CLASS
+ select RTC_DRV_M48T59
+ select RTC_SYSTOHC
+- select HAVE_DMA_ATTRS
+ select HAVE_DMA_API_DEBUG
+ select HAVE_ARCH_JUMP_LABEL if SPARC64
+ select GENERIC_IRQ_SHOW
+--- a/arch/sparc/include/asm/dma-mapping.h
++++ b/arch/sparc/include/asm/dma-mapping.h
+@@ -37,6 +37,4 @@ static inline struct dma_map_ops *get_dm
+ return dma_ops;
+ }
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ #endif
+--- a/arch/tile/Kconfig
++++ b/arch/tile/Kconfig
+@@ -6,7 +6,6 @@ config TILE
+ select HAVE_EXIT_THREAD
+ select HAVE_PERF_EVENTS
+ select USE_PMC if PERF_EVENTS
+- select HAVE_DMA_ATTRS
+ select HAVE_DMA_API_DEBUG
+ select HAVE_KVM if !TILEGX
+ select GENERIC_FIND_FIRST_BIT
+--- a/arch/tile/include/asm/dma-mapping.h
++++ b/arch/tile/include/asm/dma-mapping.h
+@@ -73,9 +73,6 @@ static inline bool dma_capable(struct de
+ }
+
+ #define HAVE_ARCH_DMA_SET_MASK 1
+-
+-#include <asm-generic/dma-mapping-common.h>
+-
+ int dma_set_mask(struct device *dev, u64 mask);
+
+ /*
+--- a/arch/unicore32/Kconfig
++++ b/arch/unicore32/Kconfig
+@@ -4,7 +4,6 @@ config UNICORE32
+ select ARCH_MIGHT_HAVE_PC_SERIO
+ select HAVE_MEMBLOCK
+ select HAVE_GENERIC_DMA_COHERENT
+- select HAVE_DMA_ATTRS
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_BZIP2
+ select GENERIC_ATOMIC64
+--- a/arch/unicore32/include/asm/dma-mapping.h
++++ b/arch/unicore32/include/asm/dma-mapping.h
+@@ -28,8 +28,6 @@ static inline struct dma_map_ops *get_dm
+ return &swiotlb_dma_map_ops;
+ }
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+ {
+ if (dev && dev->dma_mask)
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -96,7 +96,6 @@ config X86
+ select HAVE_DEBUG_KMEMLEAK
+ select HAVE_DEBUG_STACKOVERFLOW
+ select HAVE_DMA_API_DEBUG
+- select HAVE_DMA_ATTRS
+ select HAVE_DMA_CONTIGUOUS
+ select HAVE_DYNAMIC_FTRACE
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS
+--- a/arch/x86/include/asm/dma-mapping.h
++++ b/arch/x86/include/asm/dma-mapping.h
+@@ -46,8 +46,6 @@ bool arch_dma_alloc_attrs(struct device
+ #define HAVE_ARCH_DMA_SUPPORTED 1
+ extern int dma_supported(struct device *hwdev, u64 mask);
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t flag,
+ struct dma_attrs *attrs);
+--- a/arch/xtensa/Kconfig
++++ b/arch/xtensa/Kconfig
+@@ -15,7 +15,6 @@ config XTENSA
+ select GENERIC_PCI_IOMAP
+ select GENERIC_SCHED_CLOCK
+ select HAVE_DMA_API_DEBUG
+- select HAVE_DMA_ATTRS
+ select HAVE_EXIT_THREAD
+ select HAVE_FUNCTION_TRACER
+ select HAVE_FUTEX_CMPXCHG if !MMU
+--- a/arch/xtensa/include/asm/dma-mapping.h
++++ b/arch/xtensa/include/asm/dma-mapping.h
+@@ -30,8 +30,6 @@ static inline struct dma_map_ops *get_dm
+ return &xtensa_dma_map_ops;
+ }
+
+-#include <asm-generic/dma-mapping-common.h>
+-
+ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction);
+
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -82,13 +82,13 @@ config DRM_TTM
+
+ config DRM_GEM_CMA_HELPER
+ bool
+- depends on DRM && HAVE_DMA_ATTRS
++ depends on DRM
+ help
+ Choose this if you need the GEM CMA helper functions
+
+ config DRM_KMS_CMA_HELPER
+ bool
+- depends on DRM && HAVE_DMA_ATTRS
++ depends on DRM
+ select DRM_GEM_CMA_HELPER
+ select DRM_KMS_FB_HELPER
+ select FB_SYS_FILLRECT
+--- a/drivers/gpu/drm/imx/Kconfig
++++ b/drivers/gpu/drm/imx/Kconfig
+@@ -5,7 +5,7 @@ config DRM_IMX
+ select VIDEOMODE_HELPERS
+ select DRM_GEM_CMA_HELPER
+ select DRM_KMS_CMA_HELPER
+- depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS
++ depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM)
+ depends on IMX_IPUV3_CORE
+ help
+ enable i.MX graphics support
+--- a/drivers/gpu/drm/rcar-du/Kconfig
++++ b/drivers/gpu/drm/rcar-du/Kconfig
+@@ -1,6 +1,6 @@
+ config DRM_RCAR_DU
+ tristate "DRM Support for R-Car Display Unit"
+- depends on DRM && ARM && HAVE_DMA_ATTRS && OF
++ depends on DRM && ARM && OF
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+--- a/drivers/gpu/drm/shmobile/Kconfig
++++ b/drivers/gpu/drm/shmobile/Kconfig
+@@ -1,6 +1,6 @@
+ config DRM_SHMOBILE
+ tristate "DRM Support for SH Mobile"
+- depends on DRM && ARM && HAVE_DMA_ATTRS
++ depends on DRM && ARM
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on FB_SH_MOBILE_MERAM || !FB_SH_MOBILE_MERAM
+ select BACKLIGHT_CLASS_DEVICE
+--- a/drivers/gpu/drm/sti/Kconfig
++++ b/drivers/gpu/drm/sti/Kconfig
+@@ -1,6 +1,6 @@
+ config DRM_STI
+ tristate "DRM Support for STMicroelectronics SoC stiH41x Series"
+- depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM) && HAVE_DMA_ATTRS
++ depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM)
+ select RESET_CONTROLLER
+ select DRM_KMS_HELPER
+ select DRM_GEM_CMA_HELPER
+--- a/drivers/gpu/drm/tilcdc/Kconfig
++++ b/drivers/gpu/drm/tilcdc/Kconfig
+@@ -1,6 +1,6 @@
+ config DRM_TILCDC
+ tristate "DRM Support for TI LCDC Display Controller"
+- depends on DRM && OF && ARM && HAVE_DMA_ATTRS
++ depends on DRM && OF && ARM
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_KMS_CMA_HELPER
+--- a/drivers/gpu/drm/vc4/Kconfig
++++ b/drivers/gpu/drm/vc4/Kconfig
+@@ -1,7 +1,7 @@
+ config DRM_VC4
+ tristate "Broadcom VC4 Graphics"
+ depends on ARCH_BCM2835 || COMPILE_TEST
+- depends on DRM && HAVE_DMA_ATTRS
++ depends on DRM
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+--- a/drivers/media/platform/Kconfig
++++ b/drivers/media/platform/Kconfig
+@@ -216,7 +216,6 @@ config VIDEO_STI_BDISP
+ tristate "STMicroelectronics BDISP 2D blitter driver"
+ depends on VIDEO_DEV && VIDEO_V4L2
+ depends on ARCH_STI || COMPILE_TEST
+- depends on HAVE_DMA_ATTRS
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ help
+--- a/include/asm-generic/dma-mapping-broken.h
++++ /dev/null
+@@ -1,95 +0,0 @@
+-#ifndef _ASM_GENERIC_DMA_MAPPING_H
+-#define _ASM_GENERIC_DMA_MAPPING_H
+-
+-/* define the dma api to allow compilation but not linking of
+- * dma dependent code. Code that depends on the dma-mapping
+- * API needs to set 'depends on HAS_DMA' in its Kconfig
+- */
+-
+-struct scatterlist;
+-
+-extern void *
+-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+- gfp_t flag);
+-
+-extern void
+-dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+- dma_addr_t dma_handle);
+-
+-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t flag,
+- struct dma_attrs *attrs)
+-{
+- /* attrs is not supported and ignored */
+- return dma_alloc_coherent(dev, size, dma_handle, flag);
+-}
+-
+-static inline void dma_free_attrs(struct device *dev, size_t size,
+- void *cpu_addr, dma_addr_t dma_handle,
+- struct dma_attrs *attrs)
+-{
+- /* attrs is not supported and ignored */
+- dma_free_coherent(dev, size, cpu_addr, dma_handle);
+-}
+-
+-#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+-#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+-
+-extern dma_addr_t
+-dma_map_single(struct device *dev, void *ptr, size_t size,
+- enum dma_data_direction direction);
+-
+-extern void
+-dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+- enum dma_data_direction direction);
+-
+-extern int
+-dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+- enum dma_data_direction direction);
+-
+-extern void
+-dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+- enum dma_data_direction direction);
+-
+-extern dma_addr_t
+-dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+- size_t size, enum dma_data_direction direction);
+-
+-extern void
+-dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+- enum dma_data_direction direction);
+-
+-extern void
+-dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+- enum dma_data_direction direction);
+-
+-extern void
+-dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+- unsigned long offset, size_t size,
+- enum dma_data_direction direction);
+-
+-extern void
+-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+- enum dma_data_direction direction);
+-
+-#define dma_sync_single_for_device dma_sync_single_for_cpu
+-#define dma_sync_single_range_for_device dma_sync_single_range_for_cpu
+-#define dma_sync_sg_for_device dma_sync_sg_for_cpu
+-
+-extern int
+-dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
+-
+-extern int
+-dma_supported(struct device *dev, u64 mask);
+-
+-extern int
+-dma_set_mask(struct device *dev, u64 mask);
+-
+-extern int
+-dma_get_cache_alignment(void);
+-
+-extern void
+-dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+- enum dma_data_direction direction);
+-
+-#endif /* _ASM_GENERIC_DMA_MAPPING_H */
+--- a/include/asm-generic/dma-mapping-common.h
++++ /dev/null
+@@ -1,358 +0,0 @@
+-#ifndef _ASM_GENERIC_DMA_MAPPING_H
+-#define _ASM_GENERIC_DMA_MAPPING_H
+-
+-#include <linux/kmemcheck.h>
+-#include <linux/bug.h>
+-#include <linux/scatterlist.h>
+-#include <linux/dma-debug.h>
+-#include <linux/dma-attrs.h>
+-#include <asm-generic/dma-coherent.h>
+-
+-static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
+- size_t size,
+- enum dma_data_direction dir,
+- struct dma_attrs *attrs)
+-{
+- struct dma_map_ops *ops = get_dma_ops(dev);
+- dma_addr_t addr;
+-
+- kmemcheck_mark_initialized(ptr, size);
+- BUG_ON(!valid_dma_direction(dir));
+- addr = ops->map_page(dev, virt_to_page(ptr),
+- (unsigned long)ptr & ~PAGE_MASK, size,
+- dir, attrs);
+- debug_dma_map_page(dev, virt_to_page(ptr),
+- (unsigned long)ptr & ~PAGE_MASK, size,
+- dir, addr, true);
+- return addr;
+-}
+-
+-static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
+- size_t size,
+- enum dma_data_direction dir,
+- struct dma_attrs *attrs)
+-{
+- struct dma_map_ops *ops = get_dma_ops(dev);
+-
+- BUG_ON(!valid_dma_direction(dir));
+- if (ops->unmap_page)
+- ops->unmap_page(dev, addr, size, dir, attrs);
+- debug_dma_unmap_page(dev, addr, size, dir, true);
+-}
+-
+-/*
+- * dma_maps_sg_attrs returns 0 on error and > 0 on success.
+- * It should never return a value < 0.
+- */
+-static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
+- int nents, enum dma_data_direction dir,
+- struct dma_attrs *attrs)
+-{
+- struct dma_map_ops *ops = get_dma_ops(dev);
+- int i, ents;
+- struct scatterlist *s;
+-
+- for_each_sg(sg, s, nents, i)
+- kmemcheck_mark_initialized(sg_virt(s), s->length);
+- BUG_ON(!valid_dma_direction(dir));
+- ents = ops->map_sg(dev, sg, nents, dir, attrs);
+- BUG_ON(ents < 0);
+- debug_dma_map_sg(dev, sg, nents, ents, dir);
+-
+- return ents;
+-}
+-
+-static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
+- int nents, enum dma_data_direction dir,
+- struct dma_attrs *attrs)
+-{
+- struct dma_map_ops *ops = get_dma_ops(dev);
+-
+- BUG_ON(!valid_dma_direction(dir));
+- debug_dma_unmap_sg(dev, sg, nents, dir);
+- if (ops->unmap_sg)
+- ops->unmap_sg(dev, sg, nents, dir, attrs);
+-}
+-
+-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+- size_t offset, size_t size,
+- enum dma_data_direction dir)
+-{
+- struct dma_map_ops *ops = get_dma_ops(dev);
+- dma_addr_t addr;
+-
+- kmemcheck_mark_initialized(page_address(page) + offset, size);
+- BUG_ON(!valid_dma_direction(dir));
+- addr = ops->map_page(dev, page, offset, size, dir, NULL);
+- debug_dma_map_page(dev, page, offset, size, dir, addr, false);
+-
+- return addr;
+-}
+-
+-static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
+- size_t size, enum dma_data_direction dir)
+-{
+- struct dma_map_ops *ops = get_dma_ops(dev);
+-
+- BUG_ON(!valid_dma_direction(dir));
+- if (ops->unmap_page)
+- ops->unmap_page(dev, addr, size, dir, NULL);
+- debug_dma_unmap_page(dev, addr, size, dir, false);
+-}
+-
+-static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+- size_t size,
+- enum dma_data_direction dir)
+-{
+- struct dma_map_ops *ops = get_dma_ops(dev);
+-
+- BUG_ON(!valid_dma_direction(dir));
+- if (ops->sync_single_for_cpu)
+- ops->sync_single_for_cpu(dev, addr, size, dir);
+- debug_dma_sync_single_for_cpu(dev, addr, size, dir);
+-}
+-
+-static inline void dma_sync_single_for_device(struct device *dev,
+- dma_addr_t addr, size_t size,
+- enum dma_data_direction dir)
+-{
+- struct dma_map_ops *ops = get_dma_ops(dev);
+-
+- BUG_ON(!valid_dma_direction(dir));
+- if (ops->sync_single_for_device)
+- ops->sync_single_for_device(dev, addr, size, dir);
+- debug_dma_sync_single_for_device(dev, addr, size, dir);
+-}
+-
+-static inline void dma_sync_single_range_for_cpu(struct device *dev,
+- dma_addr_t addr,
+- unsigned long offset,
+- size_t size,
+- enum dma_data_direction dir)
+-{
+- const struct dma_map_ops *ops = get_dma_ops(dev);
+-
+- BUG_ON(!valid_dma_direction(dir));
+- if (ops->sync_single_for_cpu)
+- ops->sync_single_for_cpu(dev, addr + offset, size, dir);
+- debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
+-}
+-
+-static inline void dma_sync_single_range_for_device(struct device *dev,
+- dma_addr_t addr,
+- unsigned long offset,
+- size_t size,
+- enum dma_data_direction dir)
+-{
+- const struct dma_map_ops *ops = get_dma_ops(dev);
+-
+- BUG_ON(!valid_dma_direction(dir));
+- if (ops->sync_single_for_device)
+- ops->sync_single_for_device(dev, addr + offset, size, dir);
+- debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
+-}
+-
+-static inline void
+-dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+- int nelems, enum dma_data_direction dir)
+-{
+- struct dma_map_ops *ops = get_dma_ops(dev);
+-
+- BUG_ON(!valid_dma_direction(dir));
+- if (ops->sync_sg_for_cpu)
+- ops->sync_sg_for_cpu(dev, sg, nelems, dir);
+- debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
+-}
+-
+-static inline void
+-dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+- int nelems, enum dma_data_direction dir)
+-{
+- struct dma_map_ops *ops = get_dma_ops(dev);
+-
+- BUG_ON(!valid_dma_direction(dir));
+- if (ops->sync_sg_for_device)
+- ops->sync_sg_for_device(dev, sg, nelems, dir);
+- debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
+-
+-}
+-
+-#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
+-#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
+-#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
+-#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
+-
+-extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t dma_addr, size_t size);
+-
+-void *dma_common_contiguous_remap(struct page *page, size_t size,
+- unsigned long vm_flags,
+- pgprot_t prot, const void *caller);
+-
+-void *dma_common_pages_remap(struct page **pages, size_t size,
+- unsigned long vm_flags, pgprot_t prot,
+- const void *caller);
+-void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
+-
+-/**
+- * dma_mmap_attrs - map a coherent DMA allocation into user space
+- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+- * @vma: vm_area_struct describing requested user mapping
+- * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
+- * @handle: device-view address returned from dma_alloc_attrs
+- * @size: size of memory originally requested in dma_alloc_attrs
+- * @attrs: attributes of mapping properties requested in dma_alloc_attrs
+- *
+- * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
+- * into user space. The coherent DMA buffer must not be freed by the
+- * driver until the user space mapping has been released.
+- */
+-static inline int
+-dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
+- dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+-{
+- struct dma_map_ops *ops = get_dma_ops(dev);
+- BUG_ON(!ops);
+- if (ops->mmap)
+- return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+- return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+-}
+-
+-#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
+-
+-int
+-dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+- void *cpu_addr, dma_addr_t dma_addr, size_t size);
+-
+-static inline int
+-dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
+- dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+-{
+- struct dma_map_ops *ops = get_dma_ops(dev);
+- BUG_ON(!ops);
+- if (ops->get_sgtable)
+- return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
+- attrs);
+- return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
+-}
+-
+-#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
+-
+-#ifndef arch_dma_alloc_attrs
+-#define arch_dma_alloc_attrs(dev, flag) (true)
+-#endif
+-
+-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t flag,
+- struct dma_attrs *attrs)
+-{
+- struct dma_map_ops *ops = get_dma_ops(dev);
+- void *cpu_addr;
+-
+- BUG_ON(!ops);
+-
+- if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
+- return cpu_addr;
+-
+- if (!arch_dma_alloc_attrs(&dev, &flag))
+- return NULL;
+- if (!ops->alloc)
+- return NULL;
+-
+- cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+- debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+- return cpu_addr;
+-}
+-
+-static inline void dma_free_attrs(struct device *dev, size_t size,
+- void *cpu_addr, dma_addr_t dma_handle,
+- struct dma_attrs *attrs)
+-{
+- struct dma_map_ops *ops = get_dma_ops(dev);
+-
+- BUG_ON(!ops);
+- WARN_ON(irqs_disabled());
+-
+- if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
+- return;
+-
+- if (!ops->free)
+- return;
+-
+- debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+- ops->free(dev, size, cpu_addr, dma_handle, attrs);
+-}
+-
+-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t flag)
+-{
+- return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
+-}
+-
+-static inline void dma_free_coherent(struct device *dev, size_t size,
+- void *cpu_addr, dma_addr_t dma_handle)
+-{
+- return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
+-}
+-
+-static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
+- dma_addr_t *dma_handle, gfp_t gfp)
+-{
+- DEFINE_DMA_ATTRS(attrs);
+-
+- dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+- return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
+-}
+-
+-static inline void dma_free_noncoherent(struct device *dev, size_t size,
+- void *cpu_addr, dma_addr_t dma_handle)
+-{
+- DEFINE_DMA_ATTRS(attrs);
+-
+- dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+- dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+-}
+-
+-static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+-{
+- debug_dma_mapping_error(dev, dma_addr);
+-
+- if (get_dma_ops(dev)->mapping_error)
+- return get_dma_ops(dev)->mapping_error(dev, dma_addr);
+-
+-#ifdef DMA_ERROR_CODE
+- return dma_addr == DMA_ERROR_CODE;
+-#else
+- return 0;
+-#endif
+-}
+-
+-#ifndef HAVE_ARCH_DMA_SUPPORTED
+-static inline int dma_supported(struct device *dev, u64 mask)
+-{
+- struct dma_map_ops *ops = get_dma_ops(dev);
+-
+- if (!ops)
+- return 0;
+- if (!ops->dma_supported)
+- return 1;
+- return ops->dma_supported(dev, mask);
+-}
+-#endif
+-
+-#ifndef HAVE_ARCH_DMA_SET_MASK
+-static inline int dma_set_mask(struct device *dev, u64 mask)
+-{
+- struct dma_map_ops *ops = get_dma_ops(dev);
+-
+- if (ops->set_dma_mask)
+- return ops->set_dma_mask(dev, mask);
+-
+- if (!dev->dma_mask || !dma_supported(dev, mask))
+- return -EIO;
+- *dev->dma_mask = mask;
+- return 0;
+-}
+-#endif
+-
+-#endif
+--- a/include/linux/dma-attrs.h
++++ b/include/linux/dma-attrs.h
+@@ -41,7 +41,6 @@ static inline void init_dma_attrs(struct
+ bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS);
+ }
+
+-#ifdef CONFIG_HAVE_DMA_ATTRS
+ /**
+ * dma_set_attr - set a specific attribute
+ * @attr: attribute to set
+@@ -67,14 +66,5 @@ static inline int dma_get_attr(enum dma_
+ BUG_ON(attr >= DMA_ATTR_MAX);
+ return test_bit(attr, attrs->flags);
+ }
+-#else /* !CONFIG_HAVE_DMA_ATTRS */
+-static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
+-{
+-}
+
+-static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
+-{
+- return 0;
+-}
+-#endif /* CONFIG_HAVE_DMA_ATTRS */
+ #endif /* _DMA_ATTR_H */
+--- a/include/linux/dma-mapping.h
++++ b/include/linux/dma-mapping.h
+@@ -6,8 +6,12 @@
+ #include <linux/device.h>
+ #include <linux/err.h>
+ #include <linux/dma-attrs.h>
++#include <linux/dma-debug.h>
+ #include <linux/dma-direction.h>
+ #include <linux/scatterlist.h>
++#include <linux/kmemcheck.h>
++#include <linux/bug.h>
++#include <asm-generic/dma-coherent.h>
+
+ /*
+ * A dma_addr_t can hold any valid DMA or bus address for the platform.
+@@ -86,7 +90,363 @@ static inline int is_device_dma_capable(
+ #ifdef CONFIG_HAS_DMA
+ #include <asm/dma-mapping.h>
+ #else
+-#include <asm-generic/dma-mapping-broken.h>
++/*
++ * Define the dma api to allow compilation but not linking of
++ * dma dependent code. Code that depends on the dma-mapping
++ * API needs to set 'depends on HAS_DMA' in its Kconfig
++ */
++extern struct dma_map_ops bad_dma_ops;
++static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++{
++ return &bad_dma_ops;
++}
++#endif
++
++static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
++ size_t size,
++ enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ dma_addr_t addr;
++
++ kmemcheck_mark_initialized(ptr, size);
++ BUG_ON(!valid_dma_direction(dir));
++ addr = ops->map_page(dev, virt_to_page(ptr),
++ (unsigned long)ptr & ~PAGE_MASK, size,
++ dir, attrs);
++ debug_dma_map_page(dev, virt_to_page(ptr),
++ (unsigned long)ptr & ~PAGE_MASK, size,
++ dir, addr, true);
++ return addr;
++}
++
++static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
++ size_t size,
++ enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++
++ BUG_ON(!valid_dma_direction(dir));
++ if (ops->unmap_page)
++ ops->unmap_page(dev, addr, size, dir, attrs);
++ debug_dma_unmap_page(dev, addr, size, dir, true);
++}
++
++/*
++ * dma_maps_sg_attrs returns 0 on error and > 0 on success.
++ * It should never return a value < 0.
++ */
++static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
++ int nents, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ int i, ents;
++ struct scatterlist *s;
++
++ for_each_sg(sg, s, nents, i)
++ kmemcheck_mark_initialized(sg_virt(s), s->length);
++ BUG_ON(!valid_dma_direction(dir));
++ ents = ops->map_sg(dev, sg, nents, dir, attrs);
++ BUG_ON(ents < 0);
++ debug_dma_map_sg(dev, sg, nents, ents, dir);
++
++ return ents;
++}
++
++static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
++ int nents, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++
++ BUG_ON(!valid_dma_direction(dir));
++ debug_dma_unmap_sg(dev, sg, nents, dir);
++ if (ops->unmap_sg)
++ ops->unmap_sg(dev, sg, nents, dir, attrs);
++}
++
++static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
++ size_t offset, size_t size,
++ enum dma_data_direction dir)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ dma_addr_t addr;
++
++ kmemcheck_mark_initialized(page_address(page) + offset, size);
++ BUG_ON(!valid_dma_direction(dir));
++ addr = ops->map_page(dev, page, offset, size, dir, NULL);
++ debug_dma_map_page(dev, page, offset, size, dir, addr, false);
++
++ return addr;
++}
++
++static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
++ size_t size, enum dma_data_direction dir)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++
++ BUG_ON(!valid_dma_direction(dir));
++ if (ops->unmap_page)
++ ops->unmap_page(dev, addr, size, dir, NULL);
++ debug_dma_unmap_page(dev, addr, size, dir, false);
++}
++
++static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
++ size_t size,
++ enum dma_data_direction dir)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++
++ BUG_ON(!valid_dma_direction(dir));
++ if (ops->sync_single_for_cpu)
++ ops->sync_single_for_cpu(dev, addr, size, dir);
++ debug_dma_sync_single_for_cpu(dev, addr, size, dir);
++}
++
++static inline void dma_sync_single_for_device(struct device *dev,
++ dma_addr_t addr, size_t size,
++ enum dma_data_direction dir)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++
++ BUG_ON(!valid_dma_direction(dir));
++ if (ops->sync_single_for_device)
++ ops->sync_single_for_device(dev, addr, size, dir);
++ debug_dma_sync_single_for_device(dev, addr, size, dir);
++}
++
++static inline void dma_sync_single_range_for_cpu(struct device *dev,
++ dma_addr_t addr,
++ unsigned long offset,
++ size_t size,
++ enum dma_data_direction dir)
++{
++ const struct dma_map_ops *ops = get_dma_ops(dev);
++
++ BUG_ON(!valid_dma_direction(dir));
++ if (ops->sync_single_for_cpu)
++ ops->sync_single_for_cpu(dev, addr + offset, size, dir);
++ debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
++}
++
++static inline void dma_sync_single_range_for_device(struct device *dev,
++ dma_addr_t addr,
++ unsigned long offset,
++ size_t size,
++ enum dma_data_direction dir)
++{
++ const struct dma_map_ops *ops = get_dma_ops(dev);
++
++ BUG_ON(!valid_dma_direction(dir));
++ if (ops->sync_single_for_device)
++ ops->sync_single_for_device(dev, addr + offset, size, dir);
++ debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
++}
++
++static inline void
++dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
++ int nelems, enum dma_data_direction dir)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++
++ BUG_ON(!valid_dma_direction(dir));
++ if (ops->sync_sg_for_cpu)
++ ops->sync_sg_for_cpu(dev, sg, nelems, dir);
++ debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
++}
++
++static inline void
++dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
++ int nelems, enum dma_data_direction dir)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++
++ BUG_ON(!valid_dma_direction(dir));
++ if (ops->sync_sg_for_device)
++ ops->sync_sg_for_device(dev, sg, nelems, dir);
++ debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
++
++}
++
++#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
++#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
++#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
++#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
++
++extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size);
++
++void *dma_common_contiguous_remap(struct page *page, size_t size,
++ unsigned long vm_flags,
++ pgprot_t prot, const void *caller);
++
++void *dma_common_pages_remap(struct page **pages, size_t size,
++ unsigned long vm_flags, pgprot_t prot,
++ const void *caller);
++void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
++
++/**
++ * dma_mmap_attrs - map a coherent DMA allocation into user space
++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
++ * @vma: vm_area_struct describing requested user mapping
++ * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
++ * @handle: device-view address returned from dma_alloc_attrs
++ * @size: size of memory originally requested in dma_alloc_attrs
++ * @attrs: attributes of mapping properties requested in dma_alloc_attrs
++ *
++ * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
++ * into user space. The coherent DMA buffer must not be freed by the
++ * driver until the user space mapping has been released.
++ */
++static inline int
++dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
++ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ BUG_ON(!ops);
++ if (ops->mmap)
++ return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
++ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
++}
++
++#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
++
++int
++dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size);
++
++static inline int
++dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
++ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ BUG_ON(!ops);
++ if (ops->get_sgtable)
++ return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
++ attrs);
++ return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
++}
++
++#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
++
++#ifndef arch_dma_alloc_attrs
++#define arch_dma_alloc_attrs(dev, flag) (true)
++#endif
++
++static inline void *dma_alloc_attrs(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t flag,
++ struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ void *cpu_addr;
++
++ BUG_ON(!ops);
++
++ if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
++ return cpu_addr;
++
++ if (!arch_dma_alloc_attrs(&dev, &flag))
++ return NULL;
++ if (!ops->alloc)
++ return NULL;
++
++ cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
++ debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
++ return cpu_addr;
++}
++
++static inline void dma_free_attrs(struct device *dev, size_t size,
++ void *cpu_addr, dma_addr_t dma_handle,
++ struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++
++ BUG_ON(!ops);
++ WARN_ON(irqs_disabled());
++
++ if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
++ return;
++
++ if (!ops->free)
++ return;
++
++ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
++ ops->free(dev, size, cpu_addr, dma_handle, attrs);
++}
++
++static inline void *dma_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t flag)
++{
++ return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
++}
++
++static inline void dma_free_coherent(struct device *dev, size_t size,
++ void *cpu_addr, dma_addr_t dma_handle)
++{
++ return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
++}
++
++static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp)
++{
++ DEFINE_DMA_ATTRS(attrs);
++
++ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
++ return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
++}
++
++static inline void dma_free_noncoherent(struct device *dev, size_t size,
++ void *cpu_addr, dma_addr_t dma_handle)
++{
++ DEFINE_DMA_ATTRS(attrs);
++
++ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
++ dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
++}
++
++static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
++{
++ debug_dma_mapping_error(dev, dma_addr);
++
++ if (get_dma_ops(dev)->mapping_error)
++ return get_dma_ops(dev)->mapping_error(dev, dma_addr);
++
++#ifdef DMA_ERROR_CODE
++ return dma_addr == DMA_ERROR_CODE;
++#else
++ return 0;
++#endif
++}
++
++#ifndef HAVE_ARCH_DMA_SUPPORTED
++static inline int dma_supported(struct device *dev, u64 mask)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++
++ if (!ops)
++ return 0;
++ if (!ops->dma_supported)
++ return 1;
++ return ops->dma_supported(dev, mask);
++}
++#endif
++
++#ifndef HAVE_ARCH_DMA_SET_MASK
++static inline int dma_set_mask(struct device *dev, u64 mask)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++
++ if (ops->set_dma_mask)
++ return ops->set_dma_mask(dev, mask);
++
++ if (!dev->dma_mask || !dma_supported(dev, mask))
++ return -EIO;
++ *dev->dma_mask = mask;
++ return 0;
++}
+ #endif
+
+ static inline u64 dma_get_mask(struct device *dev)
+@@ -259,22 +619,6 @@ static inline void dmam_release_declared
+ }
+ #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
+
+-#ifndef CONFIG_HAVE_DMA_ATTRS
+-struct dma_attrs;
+-
+-#define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \
+- dma_map_single(dev, cpu_addr, size, dir)
+-
+-#define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \
+- dma_unmap_single(dev, dma_addr, size, dir)
+-
+-#define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \
+- dma_map_sg(dev, sgl, nents, dir)
+-
+-#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
+- dma_unmap_sg(dev, sgl, nents, dir)
+-
+-#else
+ static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t gfp)
+ {
+@@ -300,7 +644,6 @@ static inline int dma_mmap_writecombine(
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+ }
+-#endif /* CONFIG_HAVE_DMA_ATTRS */
+
+ #ifdef CONFIG_NEED_DMA_MAP_STATE
+ #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
diff --git a/patches.arch/0016-dma-mapping-remove-asm-generic-dma-coherent.h.patch b/patches.arch/0016-dma-mapping-remove-asm-generic-dma-coherent.h.patch
new file mode 100644
index 0000000000..0bd9ce8222
--- /dev/null
+++ b/patches.arch/0016-dma-mapping-remove-asm-generic-dma-coherent.h.patch
@@ -0,0 +1,201 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 20 Jan 2016 15:02:09 -0800
+Subject: dma-mapping: remove <asm-generic/dma-coherent.h>
+Git-commit: 20d666e41166f8023ff3d960e832d87ded18c5c4
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+This wasn't an asm-generic header to start with, and can be merged into
+dma-mapping.h trivially.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Aurelien Jacquiot <a-jacquiot@ti.com>
+Cc: Chris Metcalf <cmetcalf@ezchip.com>
+Cc: David Howells <dhowells@redhat.com>
+Cc: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: Haavard Skinnemoen <hskinnemoen@gmail.com>
+Cc: Hans-Christian Egtvedt <egtvedt@samfundet.no>
+Cc: Helge Deller <deller@gmx.de>
+Cc: James Hogan <james.hogan@imgtec.com>
+Cc: Jesper Nilsson <jesper.nilsson@axis.com>
+Cc: Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
+Cc: Ley Foon Tan <lftan@altera.com>
+Cc: Mark Salter <msalter@redhat.com>
+Cc: Mikael Starvik <starvik@axis.com>
+Cc: Steven Miao <realmz6@gmail.com>
+Cc: Vineet Gupta <vgupta@synopsys.com>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Joerg Roedel <jroedel@suse.de>
+Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/xtensa/include/asm/dma-mapping.h | 2 --
+ drivers/base/dma-mapping.c | 3 +--
+ include/asm-generic/dma-coherent.h | 32 --------------------------------
+ include/linux/dma-mapping.h | 34 ++++++++++++++++++++++++++++------
+ 4 files changed, 29 insertions(+), 42 deletions(-)
+ delete mode 100644 include/asm-generic/dma-coherent.h
+
+diff --git a/arch/xtensa/include/asm/dma-mapping.h b/arch/xtensa/include/asm/dma-mapping.h
+index 87b7a7df..3fc1170 100644
+--- a/arch/xtensa/include/asm/dma-mapping.h
++++ b/arch/xtensa/include/asm/dma-mapping.h
+@@ -13,8 +13,6 @@
+ #include <asm/cache.h>
+ #include <asm/io.h>
+
+-#include <asm-generic/dma-coherent.h>
+-
+ #include <linux/mm.h>
+ #include <linux/scatterlist.h>
+
+diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
+index 381e39d..d799662 100644
+--- a/drivers/base/dma-mapping.c
++++ b/drivers/base/dma-mapping.c
+@@ -12,7 +12,6 @@
+ #include <linux/gfp.h>
+ #include <linux/slab.h>
+ #include <linux/vmalloc.h>
+-#include <asm-generic/dma-coherent.h>
+
+ /*
+ * Managed DMA API
+@@ -167,7 +166,7 @@ void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+ }
+ EXPORT_SYMBOL(dmam_free_noncoherent);
+
+-#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
++#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
+
+ static void dmam_coherent_decl_release(struct device *dev, void *res)
+ {
+diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h
+deleted file mode 100644
+index 0297e58..0000000
+--- a/include/asm-generic/dma-coherent.h
++++ /dev/null
+@@ -1,32 +0,0 @@
+-#ifndef DMA_COHERENT_H
+-#define DMA_COHERENT_H
+-
+-#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
+-/*
+- * These three functions are only for dma allocator.
+- * Don't use them in device drivers.
+- */
+-int dma_alloc_from_coherent(struct device *dev, ssize_t size,
+- dma_addr_t *dma_handle, void **ret);
+-int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
+-
+-int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, size_t size, int *ret);
+-/*
+- * Standard interface
+- */
+-#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+-int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+- dma_addr_t device_addr, size_t size, int flags);
+-
+-void dma_release_declared_memory(struct device *dev);
+-
+-void *dma_mark_declared_memory_occupied(struct device *dev,
+- dma_addr_t device_addr, size_t size);
+-#else
+-#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
+-#define dma_release_from_coherent(dev, order, vaddr) (0)
+-#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
+-#endif
+-
+-#endif
+diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
+index cc0517b..d6b575b 100644
+--- a/include/linux/dma-mapping.h
++++ b/include/linux/dma-mapping.h
+@@ -11,7 +11,6 @@
+ #include <linux/scatterlist.h>
+ #include <linux/kmemcheck.h>
+ #include <linux/bug.h>
+-#include <asm-generic/dma-coherent.h>
+
+ /*
+ * A dma_addr_t can hold any valid DMA or bus address for the platform.
+@@ -87,6 +86,23 @@ static inline int is_device_dma_capable(struct device *dev)
+ return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
+ }
+
++#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
++/*
++ * These three functions are only for dma allocator.
++ * Don't use them in device drivers.
++ */
++int dma_alloc_from_coherent(struct device *dev, ssize_t size,
++ dma_addr_t *dma_handle, void **ret);
++int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
++
++int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
++ void *cpu_addr, size_t size, int *ret);
++#else
++#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
++#define dma_release_from_coherent(dev, order, vaddr) (0)
++#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
++#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
++
+ #ifdef CONFIG_HAS_DMA
+ #include <asm/dma-mapping.h>
+ #else
+@@ -568,7 +584,13 @@ static inline int dma_get_cache_alignment(void)
+ #define DMA_MEMORY_INCLUDES_CHILDREN 0x04
+ #define DMA_MEMORY_EXCLUSIVE 0x08
+
+-#ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
++#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
++int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
++ dma_addr_t device_addr, size_t size, int flags);
++void dma_release_declared_memory(struct device *dev);
++void *dma_mark_declared_memory_occupied(struct device *dev,
++ dma_addr_t device_addr, size_t size);
++#else
+ static inline int
+ dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+ dma_addr_t device_addr, size_t size, int flags)
+@@ -587,7 +609,7 @@ dma_mark_declared_memory_occupied(struct device *dev,
+ {
+ return ERR_PTR(-EBUSY);
+ }
+-#endif
++#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
+
+ /*
+ * Managed DMA API
+@@ -600,13 +622,13 @@ extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+ extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle);
+-#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
++#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
+ extern int dmam_declare_coherent_memory(struct device *dev,
+ phys_addr_t phys_addr,
+ dma_addr_t device_addr, size_t size,
+ int flags);
+ extern void dmam_release_declared_memory(struct device *dev);
+-#else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
++#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
+ static inline int dmam_declare_coherent_memory(struct device *dev,
+ phys_addr_t phys_addr, dma_addr_t device_addr,
+ size_t size, gfp_t gfp)
+@@ -617,7 +639,7 @@ static inline int dmam_declare_coherent_memory(struct device *dev,
+ static inline void dmam_release_declared_memory(struct device *dev)
+ {
+ }
+-#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
++#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
+
+ static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t gfp)
+--
+2.10.0
+
diff --git a/patches.arch/arm64-Relocate-screen_info.lfb_base-on-PCI-BAR-alloc.patch b/patches.arch/arm64-Relocate-screen_info.lfb_base-on-PCI-BAR-alloc.patch
deleted file mode 100644
index f4464f394c..0000000000
--- a/patches.arch/arm64-Relocate-screen_info.lfb_base-on-PCI-BAR-alloc.patch
+++ /dev/null
@@ -1,192 +0,0 @@
-From 3c5af17727b0885ae9db2770b7fd3f6db556cfaa Mon Sep 17 00:00:00 2001
-From: Alexander Graf <agraf@suse.de>
-Date: Wed, 27 Apr 2016 23:39:30 +0200
-Subject: [PATCH] arm64: Relocate screen_info.lfb_base on PCI BAR allocation
-
-Patch-mainline: Not yet, posted v1 and no reply yet, but this is a P1
-References: bsc#975159
-
-When booting with efifb, we get a frame buffer address passed into the system.
-This address can be backed by any device, including PCI devices.
-
-PCI devices can have their BARs mapped to various places inside the PCI window
-though. Linux makes use of that on early boot and usually maps PCI BARs wherever
-it thinks makes sense.
-
-If we now load the efifb driver after that BAR map has happened, the frame
-buffer address we received may be invalid, because it was in a BAR map before
-Linux modified it.
-
-To work around that issue, this patch introduces a BAR mapping callback that
-gets called every time Linux (re)allocates a BAR. That way our arm64 efi code
-can check whether the frame buffer is inside the old map and adjust it to
-the new one.
-
-With this and the efifb patches applied, I can successfully see efifb output
-even after Linux remapped BARs.
-
-Signed-off-by: Alexander Graf <agraf@suse.de>
----
- arch/arm64/kernel/efi.c | 40 +++++++++++++++++++++++++++++++++++++++-
- drivers/pci/setup-res.c | 29 +++++++++++++++++++++++++++++
- include/linux/pci.h | 8 ++++++++
- 3 files changed, 76 insertions(+), 1 deletion(-)
-
-diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
-index fe1addd6..e6f66f46 100644
---- a/arch/arm64/kernel/efi.c
-+++ b/arch/arm64/kernel/efi.c
-@@ -27,6 +27,7 @@
- #include <linux/slab.h>
- #include <linux/spinlock.h>
- #include <linux/platform_device.h>
-+#include <linux/pci.h>
-
- #include <asm/cacheflush.h>
- #include <asm/efi.h>
-@@ -211,6 +212,41 @@ static __init void reserve_regions(void)
- set_bit(EFI_MEMMAP, &efi.flags);
- }
-
-+#ifdef CONFIG_PCI
-+static bool efi_pci_overlaps_efifb(struct pci_bar_update_info *update_info)
-+{
-+ /* is the screen_info frame buffer inside the pci BAR? */
-+ if (screen_info.lfb_base >= update_info->old_start &&
-+ (screen_info.lfb_base + screen_info.lfb_size) <=
-+ (update_info->old_start + update_info->size))
-+ return true;
-+
-+ return false;
-+}
-+
-+static int efi_pci_notifier(struct notifier_block *self,
-+ unsigned long cmd, void *v)
-+{
-+ struct pci_bar_update_info *update_info = v;
-+
-+ /*
-+ * When we reallocate a BAR that contains our frame buffer, set the
-+ * screen_info base to where it belongs
-+ */
-+ if (efi_pci_overlaps_efifb(update_info)) {
-+ u64 diff = (update_info->new_start - update_info->old_start);
-+ screen_info.lfb_base += diff;
-+ }
-+
-+ return NOTIFY_OK;
-+}
-+static struct notifier_block efi_pci_notifier_block = {
-+ .notifier_call = efi_pci_notifier,
-+};
-+#else
-+#define pci_notify_on_update_resource(a)
-+#endif
-+
- void __init efi_init(void)
- {
- struct efi_fdt_params params;
-@@ -243,8 +279,10 @@ void __init efi_init(void)
- reserve_regions();
- early_memunmap(memmap.map, params.mmap_size);
-
-- if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI)
-+ if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI) {
-+ pci_notify_on_update_resource(&efi_pci_notifier_block);
- memblock_reserve(screen_info.lfb_base, screen_info.lfb_size);
-+ }
- }
-
- static int __init register_gop_device(void)
-diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
-index 604011e0..d5c24fcd 100644
---- a/drivers/pci/setup-res.c
-+++ b/drivers/pci/setup-res.c
-@@ -23,8 +23,10 @@
- #include <linux/ioport.h>
- #include <linux/cache.h>
- #include <linux/slab.h>
-+#include <linux/notifier.h>
- #include "pci.h"
-
-+static RAW_NOTIFIER_HEAD(bar_update_chain);
-
- void pci_update_resource(struct pci_dev *dev, int resno)
- {
-@@ -35,6 +37,9 @@ void pci_update_resource(struct pci_dev *dev, int resno)
- int reg;
- enum pci_bar_type type;
- struct resource *res = dev->resource + resno;
-+ struct pci_bar_update_info update_info;
-+ struct pci_bus_region update_reg;
-+ struct resource update_res;
-
- if (dev->is_virtfn) {
- dev_warn(&dev->dev, "can't update VF BAR%d\n", resno);
-@@ -77,6 +82,22 @@ void pci_update_resource(struct pci_dev *dev, int resno)
- }
-
- /*
-+ * Fetch the old BAR location from the device, so we can notify
-+ * users of that BAR that its location is changing.
-+ */
-+ pci_read_config_dword(dev, reg, &check);
-+ update_reg.start = check & PCI_BASE_ADDRESS_MEM_MASK;
-+ if (check & PCI_BASE_ADDRESS_MEM_TYPE_64) {
-+ pci_read_config_dword(dev, reg, &check);
-+ update_reg.start |= ((u64)check) << 32;
-+ }
-+ update_info.size = region.end - region.start;
-+ update_reg.end = update_reg.start + update_info.size;
-+ pcibios_bus_to_resource(dev->bus, &update_res, &update_reg);
-+ update_info.old_start = update_res.start;
-+ update_info.new_start = res->start;
-+
-+ /*
- * We can't update a 64-bit BAR atomically, so when possible,
- * disable decoding so that a half-updated BAR won't conflict
- * with another device.
-@@ -108,6 +129,14 @@ void pci_update_resource(struct pci_dev *dev, int resno)
-
- if (disable)
- pci_write_config_word(dev, PCI_COMMAND, cmd);
-+
-+ /* Tell interested parties that the BAR mapping changed */
-+ raw_notifier_call_chain(&bar_update_chain, 0, &update_info);
-+}
-+
-+int pci_notify_on_update_resource(struct notifier_block *nb)
-+{
-+ return raw_notifier_chain_register(&bar_update_chain, nb);
- }
-
- int pci_claim_resource(struct pci_dev *dev, int resource)
-diff --git a/include/linux/pci.h b/include/linux/pci.h
-index 8a85c36b..aff30e41 100644
---- a/include/linux/pci.h
-+++ b/include/linux/pci.h
-@@ -30,6 +30,7 @@
- #include <linux/device.h>
- #include <linux/io.h>
- #include <linux/resource_ext.h>
-+#include <linux/notifier.h>
- #include <uapi/linux/pci.h>
-
- #include <linux/pci_ids.h>
-@@ -1043,6 +1044,13 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags);
- bool pci_device_is_present(struct pci_dev *pdev);
- void pci_ignore_hotplug(struct pci_dev *dev);
-
-+struct pci_bar_update_info {
-+ u64 old_start;
-+ u64 new_start;
-+ u64 size;
-+};
-+int pci_notify_on_update_resource(struct notifier_block *nb);
-+
- /* ROM control related routines */
- int pci_enable_rom(struct pci_dev *pdev);
- void pci_disable_rom(struct pci_dev *pdev);
---
-2.11.0
-
diff --git a/patches.arch/base-Export-platform_msi_domain_-alloc-free-_irqs.patch b/patches.arch/base-Export-platform_msi_domain_-alloc-free-_irqs.patch
new file mode 100644
index 0000000000..2d899b4a81
--- /dev/null
+++ b/patches.arch/base-Export-platform_msi_domain_-alloc-free-_irqs.patch
@@ -0,0 +1,43 @@
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Date: Tue, 26 Jan 2016 13:52:27 +0000
+Subject: base: Export platform_msi_domain_[alloc,free]_irqs
+Git-commit: bb1a793125d9cc61f2d1cff92fe3927fec45d528
+Patch-mainline: v4.5-rc2
+References: fate#320512
+
+The new function platform_msi_domain_{alloc,free}_irqs are meant to be
+used in platform drivers, which can be built as modules. Therefore, it
+makes sense to export them to be used from kernel modules.
+
+Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Acked-by: Marc Zyngier <marc.zyngier@arm.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Rob Herring <robh+dt@kernel.org>
+Cc: Frank Rowand <frowand.list@gmail.com>
+Cc: Grant Likely <grant.likely@linaro.org>
+Cc: Jiang Liu <jiang.liu@linux.intel.com>
+Link: http://lkml.kernel.org/r/1453816347-32720-4-git-send-email-marc.zyngier@arm.com
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/base/platform-msi.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/base/platform-msi.c
++++ b/drivers/base/platform-msi.c
+@@ -284,6 +284,7 @@ out_free_priv_data:
+
+ return err;
+ }
++EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
+
+ /**
+ * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
+@@ -301,6 +302,7 @@ void platform_msi_domain_free_irqs(struc
+ msi_domain_free_irqs(dev->msi_domain, dev);
+ platform_msi_free_descs(dev, 0, MAX_DEV_MSIS);
+ }
++EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
+
+ /**
+ * platform_msi_get_host_data - Query the private data associated with
diff --git a/patches.arch/qcom-0001-arm64-efi-split-off-EFI-init-and-runtime-code-for-re.patch b/patches.arch/qcom-0001-arm64-efi-split-off-EFI-init-and-runtime-code-for-re.patch
new file mode 100644
index 0000000000..d2865c3134
--- /dev/null
+++ b/patches.arch/qcom-0001-arm64-efi-split-off-EFI-init-and-runtime-code-for-re.patch
@@ -0,0 +1,803 @@
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Mon, 30 Nov 2015 13:28:18 +0100
+Subject: arm64/efi: split off EFI init and runtime code for reuse by 32-bit
+ ARM
+Git-commit: e5bc22a42e4d46cc203fdfb6d2c76202b08666a0
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+This splits off the early EFI init and runtime code that
+- discovers the EFI params and the memory map from the FDT, and installs
+ the memblocks and config tables.
+- prepares and installs the EFI page tables so that UEFI Runtime Services
+ can be invoked at the virtual address installed by the stub.
+
+This will allow it to be reused for 32-bit ARM.
+
+Reviewed-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/arm64/kernel/efi.c | 351 -------------------------------------
+ drivers/firmware/efi/Makefile | 2
+ drivers/firmware/efi/arm-init.c | 233 ++++++++++++++++++++++++
+ drivers/firmware/efi/arm-runtime.c | 151 +++++++++++++++
+ 4 files changed, 387 insertions(+), 350 deletions(-)
+ create mode 100644 drivers/firmware/efi/arm-init.c
+ create mode 100644 drivers/firmware/efi/arm-runtime.c
+
+--- a/arch/arm64/kernel/efi.c
++++ b/arch/arm64/kernel/efi.c
+@@ -11,347 +11,15 @@
+ *
+ */
+
+-#include <linux/atomic.h>
+ #include <linux/dmi.h>
+ #include <linux/efi.h>
+-#include <linux/export.h>
+-#include <linux/memblock.h>
+-#include <linux/mm_types.h>
+-#include <linux/bootmem.h>
+-#include <linux/of.h>
+-#include <linux/of_fdt.h>
+-#include <linux/preempt.h>
+-#include <linux/rbtree.h>
+-#include <linux/rwsem.h>
+-#include <linux/sched.h>
+-#include <linux/slab.h>
+-#include <linux/spinlock.h>
+-#include <linux/platform_device.h>
++#include <linux/init.h>
+
+-#include <asm/cacheflush.h>
+ #include <asm/efi.h>
+-#include <asm/tlbflush.h>
+-#include <asm/mmu_context.h>
+-#include <asm/mmu.h>
+-#include <asm/pgtable.h>
+
+ /* we will fill this structure from the stub, so don't put it in .bss */
+ struct screen_info screen_info __section(.data);
+
+-struct efi_memory_map memmap;
+-
+-static u64 efi_system_table;
+-
+-static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss;
+-
+-static struct mm_struct efi_mm = {
+- .mm_rb = RB_ROOT,
+- .pgd = efi_pgd,
+- .mm_users = ATOMIC_INIT(2),
+- .mm_count = ATOMIC_INIT(1),
+- .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
+- .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
+- .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
+-};
+-
+-static int __init is_normal_ram(efi_memory_desc_t *md)
+-{
+- if (md->attribute & EFI_MEMORY_WB)
+- return 1;
+- return 0;
+-}
+-
+-/*
+- * Translate a EFI virtual address into a physical address: this is necessary,
+- * as some data members of the EFI system table are virtually remapped after
+- * SetVirtualAddressMap() has been called.
+- */
+-static phys_addr_t efi_to_phys(unsigned long addr)
+-{
+- efi_memory_desc_t *md;
+-
+- for_each_efi_memory_desc(&memmap, md) {
+- if (!(md->attribute & EFI_MEMORY_RUNTIME))
+- continue;
+- if (md->virt_addr == 0)
+- /* no virtual mapping has been installed by the stub */
+- break;
+- if (md->virt_addr <= addr &&
+- (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT))
+- return md->phys_addr + addr - md->virt_addr;
+- }
+- return addr;
+-}
+-
+-static int __init uefi_init(void)
+-{
+- efi_char16_t *c16;
+- void *config_tables;
+- u64 table_size;
+- char vendor[100] = "unknown";
+- int i, retval;
+-
+- efi.systab = early_memremap(efi_system_table,
+- sizeof(efi_system_table_t));
+- if (efi.systab == NULL) {
+- pr_warn("Unable to map EFI system table.\n");
+- return -ENOMEM;
+- }
+-
+- set_bit(EFI_BOOT, &efi.flags);
+- set_bit(EFI_64BIT, &efi.flags);
+-
+- /*
+- * Verify the EFI Table
+- */
+- if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
+- pr_err("System table signature incorrect\n");
+- retval = -EINVAL;
+- goto out;
+- }
+- if ((efi.systab->hdr.revision >> 16) < 2)
+- pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n",
+- efi.systab->hdr.revision >> 16,
+- efi.systab->hdr.revision & 0xffff);
+-
+- /* Show what we know for posterity */
+- c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
+- sizeof(vendor) * sizeof(efi_char16_t));
+- if (c16) {
+- for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
+- vendor[i] = c16[i];
+- vendor[i] = '\0';
+- early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
+- }
+-
+- pr_info("EFI v%u.%.02u by %s\n",
+- efi.systab->hdr.revision >> 16,
+- efi.systab->hdr.revision & 0xffff, vendor);
+-
+- table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
+- config_tables = early_memremap(efi_to_phys(efi.systab->tables),
+- table_size);
+- if (config_tables == NULL) {
+- pr_warn("Unable to map EFI config table array.\n");
+- retval = -ENOMEM;
+- goto out;
+- }
+- retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
+- sizeof(efi_config_table_64_t), NULL);
+-
+- early_memunmap(config_tables, table_size);
+-out:
+- early_memunmap(efi.systab, sizeof(efi_system_table_t));
+- return retval;
+-}
+-
+-/*
+- * Return true for RAM regions we want to permanently reserve.
+- */
+-static __init int is_reserve_region(efi_memory_desc_t *md)
+-{
+- switch (md->type) {
+- case EFI_LOADER_CODE:
+- case EFI_LOADER_DATA:
+- case EFI_BOOT_SERVICES_CODE:
+- case EFI_BOOT_SERVICES_DATA:
+- case EFI_CONVENTIONAL_MEMORY:
+- case EFI_PERSISTENT_MEMORY:
+- return 0;
+- default:
+- break;
+- }
+- return is_normal_ram(md);
+-}
+-
+-static __init void reserve_regions(void)
+-{
+- efi_memory_desc_t *md;
+- u64 paddr, npages, size;
+-
+- if (efi_enabled(EFI_DBG))
+- pr_info("Processing EFI memory map:\n");
+-
+- /*
+- * Discard memblocks discovered so far: if there are any at this
+- * point, they originate from memory nodes in the DT, and UEFI
+- * uses its own memory map instead.
+- */
+- memblock_dump_all();
+- memblock_remove(0, (phys_addr_t)ULLONG_MAX);
+-
+- for_each_efi_memory_desc(&memmap, md) {
+- paddr = md->phys_addr;
+- npages = md->num_pages;
+-
+- if (efi_enabled(EFI_DBG)) {
+- char buf[64];
+-
+- pr_info(" 0x%012llx-0x%012llx %s",
+- paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
+- efi_md_typeattr_format(buf, sizeof(buf), md));
+- }
+-
+- memrange_efi_to_native(&paddr, &npages);
+- size = npages << PAGE_SHIFT;
+-
+- if (is_normal_ram(md))
+- early_init_dt_add_memory_arch(paddr, size);
+-
+- if (is_reserve_region(md)) {
+- memblock_mark_nomap(paddr, size);
+- if (efi_enabled(EFI_DBG))
+- pr_cont("*");
+- }
+-
+- if (efi_enabled(EFI_DBG))
+- pr_cont("\n");
+- }
+-
+- set_bit(EFI_MEMMAP, &efi.flags);
+-}
+-
+-void __init efi_init(void)
+-{
+- struct efi_fdt_params params;
+-
+- /* Grab UEFI information placed in FDT by stub */
+- if (!efi_get_fdt_params(&params))
+- return;
+-
+- efi_system_table = params.system_table;
+-
+- memmap.phys_map = params.mmap;
+- memmap.map = early_memremap(params.mmap, params.mmap_size);
+- if (memmap.map == NULL) {
+- /*
+- * If we are booting via UEFI, the UEFI memory map is the only
+- * description of memory we have, so there is little point in
+- * proceeding if we cannot access it.
+- */
+- panic("Unable to map EFI memory map.\n");
+- }
+- memmap.map_end = memmap.map + params.mmap_size;
+- memmap.desc_size = params.desc_size;
+- memmap.desc_version = params.desc_ver;
+-
+- if (uefi_init() < 0)
+- return;
+-
+- reserve_regions();
+- early_memunmap(memmap.map, params.mmap_size);
+- memblock_reserve(params.mmap & PAGE_MASK,
+- PAGE_ALIGN(params.mmap_size +
+- (params.mmap & ~PAGE_MASK)));
+-
+- if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI)
+- memblock_reserve(screen_info.lfb_base, screen_info.lfb_size);
+-}
+-
+-static int __init register_gop_device(void)
+-{
+- void *pd;
+-
+- if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+- return 0;
+-
+- /* the efifb driver accesses screen_info directly, no need to pass it */
+- pd = platform_device_register_simple("efi-framebuffer", 0, NULL, 0);
+- return PTR_ERR_OR_ZERO(pd);
+-}
+-subsys_initcall(register_gop_device);
+-
+-static bool __init efi_virtmap_init(void)
+-{
+- efi_memory_desc_t *md;
+-
+- init_new_context(NULL, &efi_mm);
+-
+- for_each_efi_memory_desc(&memmap, md) {
+- pgprot_t prot;
+-
+- if (!(md->attribute & EFI_MEMORY_RUNTIME))
+- continue;
+- if (md->virt_addr == 0)
+- return false;
+-
+- pr_info(" EFI remap 0x%016llx => %p\n",
+- md->phys_addr, (void *)md->virt_addr);
+-
+- /*
+- * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
+- * executable, everything else can be mapped with the XN bits
+- * set.
+- */
+- if (!is_normal_ram(md))
+- prot = __pgprot(PROT_DEVICE_nGnRE);
+- else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
+- !PAGE_ALIGNED(md->phys_addr))
+- prot = PAGE_KERNEL_EXEC;
+- else
+- prot = PAGE_KERNEL;
+-
+- create_pgd_mapping(&efi_mm, md->phys_addr, md->virt_addr,
+- md->num_pages << EFI_PAGE_SHIFT,
+- __pgprot(pgprot_val(prot) | PTE_NG));
+- }
+- return true;
+-}
+-
+-/*
+- * Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
+- * non-early mapping of the UEFI system table and virtual mappings for all
+- * EFI_MEMORY_RUNTIME regions.
+- */
+-static int __init arm64_enable_runtime_services(void)
+-{
+- u64 mapsize;
+-
+- if (!efi_enabled(EFI_BOOT)) {
+- pr_info("EFI services will not be available.\n");
+- return 0;
+- }
+-
+- if (efi_runtime_disabled()) {
+- pr_info("EFI runtime services will be disabled.\n");
+- return 0;
+- }
+-
+- pr_info("Remapping and enabling EFI services.\n");
+-
+- mapsize = memmap.map_end - memmap.map;
+- memmap.map = (__force void *)ioremap_cache(memmap.phys_map,
+- mapsize);
+- if (!memmap.map) {
+- pr_err("Failed to remap EFI memory map\n");
+- return -ENOMEM;
+- }
+- memmap.map_end = memmap.map + mapsize;
+- efi.memmap = &memmap;
+-
+- efi.systab = (__force void *)ioremap_cache(efi_system_table,
+- sizeof(efi_system_table_t));
+- if (!efi.systab) {
+- pr_err("Failed to remap EFI System Table\n");
+- return -ENOMEM;
+- }
+- set_bit(EFI_SYSTEM_TABLES, &efi.flags);
+-
+- if (!efi_virtmap_init()) {
+- pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
+- return -ENOMEM;
+- }
+-
+- /* Set up runtime services function pointers */
+- efi_native_runtime_setup();
+- set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+-
+- efi.runtime_version = efi.systab->hdr.revision;
+-
+- return 0;
+-}
+-early_initcall(arm64_enable_runtime_services);
+-
+ static int __init arm64_dmi_init(void)
+ {
+ /*
+@@ -366,23 +34,6 @@ static int __init arm64_dmi_init(void)
+ }
+ core_initcall(arm64_dmi_init);
+
+-static void efi_set_pgd(struct mm_struct *mm)
+-{
+- switch_mm(NULL, mm, NULL);
+-}
+-
+-void efi_virtmap_load(void)
+-{
+- preempt_disable();
+- efi_set_pgd(&efi_mm);
+-}
+-
+-void efi_virtmap_unload(void)
+-{
+- efi_set_pgd(current->active_mm);
+- preempt_enable();
+-}
+-
+ /*
+ * UpdateCapsule() depends on the system being shutdown via
+ * ResetSystem().
+--- a/drivers/firmware/efi/Makefile
++++ b/drivers/firmware/efi/Makefile
+@@ -21,4 +21,6 @@ obj-$(CONFIG_EFI_RUNTIME_WRAPPERS) += ru
+ obj-$(CONFIG_EFI_STUB) += libstub/
+ obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_mem.o
+
++arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
++obj-$(CONFIG_ARM64) += $(arm-obj-y)
+ obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o
+--- /dev/null
++++ b/drivers/firmware/efi/arm-init.c
+@@ -0,0 +1,233 @@
++/*
++ * Extensible Firmware Interface
++ *
++ * Based on Extensible Firmware Interface Specification version 2.4
++ *
++ * Copyright (C) 2013 - 2015 Linaro Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ */
++
++#include <linux/efi.h>
++#include <linux/init.h>
++#include <linux/memblock.h>
++#include <linux/mm_types.h>
++#include <linux/of.h>
++#include <linux/of_fdt.h>
++#include <linux/platform_device.h>
++
++#include <asm/efi.h>
++
++struct efi_memory_map memmap;
++
++u64 efi_system_table;
++
++static int __init is_normal_ram(efi_memory_desc_t *md)
++{
++ if (md->attribute & EFI_MEMORY_WB)
++ return 1;
++ return 0;
++}
++
++/*
++ * Translate a EFI virtual address into a physical address: this is necessary,
++ * as some data members of the EFI system table are virtually remapped after
++ * SetVirtualAddressMap() has been called.
++ */
++static phys_addr_t efi_to_phys(unsigned long addr)
++{
++ efi_memory_desc_t *md;
++
++ for_each_efi_memory_desc(&memmap, md) {
++ if (!(md->attribute & EFI_MEMORY_RUNTIME))
++ continue;
++ if (md->virt_addr == 0)
++ /* no virtual mapping has been installed by the stub */
++ break;
++ if (md->virt_addr <= addr &&
++ (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT))
++ return md->phys_addr + addr - md->virt_addr;
++ }
++ return addr;
++}
++
++static int __init uefi_init(void)
++{
++ efi_char16_t *c16;
++ void *config_tables;
++ u64 table_size;
++ char vendor[100] = "unknown";
++ int i, retval;
++
++ efi.systab = early_memremap(efi_system_table,
++ sizeof(efi_system_table_t));
++ if (efi.systab == NULL) {
++ pr_warn("Unable to map EFI system table.\n");
++ return -ENOMEM;
++ }
++
++ set_bit(EFI_BOOT, &efi.flags);
++ set_bit(EFI_64BIT, &efi.flags);
++
++ /*
++ * Verify the EFI Table
++ */
++ if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
++ pr_err("System table signature incorrect\n");
++ retval = -EINVAL;
++ goto out;
++ }
++ if ((efi.systab->hdr.revision >> 16) < 2)
++ pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n",
++ efi.systab->hdr.revision >> 16,
++ efi.systab->hdr.revision & 0xffff);
++
++ /* Show what we know for posterity */
++ c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
++ sizeof(vendor) * sizeof(efi_char16_t));
++ if (c16) {
++ for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
++ vendor[i] = c16[i];
++ vendor[i] = '\0';
++ early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
++ }
++
++ pr_info("EFI v%u.%.02u by %s\n",
++ efi.systab->hdr.revision >> 16,
++ efi.systab->hdr.revision & 0xffff, vendor);
++
++ table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
++ config_tables = early_memremap(efi_to_phys(efi.systab->tables),
++ table_size);
++ if (config_tables == NULL) {
++ pr_warn("Unable to map EFI config table array.\n");
++ retval = -ENOMEM;
++ goto out;
++ }
++ retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
++ sizeof(efi_config_table_64_t), NULL);
++
++ early_memunmap(config_tables, table_size);
++out:
++ early_memunmap(efi.systab, sizeof(efi_system_table_t));
++ return retval;
++}
++
++/*
++ * Return true for RAM regions we want to permanently reserve.
++ */
++static __init int is_reserve_region(efi_memory_desc_t *md)
++{
++ switch (md->type) {
++ case EFI_LOADER_CODE:
++ case EFI_LOADER_DATA:
++ case EFI_BOOT_SERVICES_CODE:
++ case EFI_BOOT_SERVICES_DATA:
++ case EFI_CONVENTIONAL_MEMORY:
++ case EFI_PERSISTENT_MEMORY:
++ return 0;
++ default:
++ break;
++ }
++ return is_normal_ram(md);
++}
++
++static __init void reserve_regions(void)
++{
++ efi_memory_desc_t *md;
++ u64 paddr, npages, size;
++
++ if (efi_enabled(EFI_DBG))
++ pr_info("Processing EFI memory map:\n");
++
++ /*
++ * Discard memblocks discovered so far: if there are any at this
++ * point, they originate from memory nodes in the DT, and UEFI
++ * uses its own memory map instead.
++ */
++ memblock_dump_all();
++ memblock_remove(0, (phys_addr_t)ULLONG_MAX);
++
++ for_each_efi_memory_desc(&memmap, md) {
++ paddr = md->phys_addr;
++ npages = md->num_pages;
++
++ if (efi_enabled(EFI_DBG)) {
++ char buf[64];
++
++ pr_info(" 0x%012llx-0x%012llx %s",
++ paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
++ efi_md_typeattr_format(buf, sizeof(buf), md));
++ }
++
++ memrange_efi_to_native(&paddr, &npages);
++ size = npages << PAGE_SHIFT;
++
++ if (is_normal_ram(md))
++ early_init_dt_add_memory_arch(paddr, size);
++
++ if (is_reserve_region(md)) {
++ memblock_mark_nomap(paddr, size);
++ if (efi_enabled(EFI_DBG))
++ pr_cont("*");
++ }
++
++ if (efi_enabled(EFI_DBG))
++ pr_cont("\n");
++ }
++
++ set_bit(EFI_MEMMAP, &efi.flags);
++}
++
++void __init efi_init(void)
++{
++ struct efi_fdt_params params;
++
++ /* Grab UEFI information placed in FDT by stub */
++ if (!efi_get_fdt_params(&params))
++ return;
++
++ efi_system_table = params.system_table;
++
++ memmap.phys_map = params.mmap;
++ memmap.map = early_memremap(params.mmap, params.mmap_size);
++ if (memmap.map == NULL) {
++ /*
++ * If we are booting via UEFI, the UEFI memory map is the only
++ * description of memory we have, so there is little point in
++ * proceeding if we cannot access it.
++ */
++ panic("Unable to map EFI memory map.\n");
++ }
++ memmap.map_end = memmap.map + params.mmap_size;
++ memmap.desc_size = params.desc_size;
++ memmap.desc_version = params.desc_ver;
++
++ if (uefi_init() < 0)
++ return;
++
++ reserve_regions();
++ early_memunmap(memmap.map, params.mmap_size);
++ memblock_reserve(params.mmap & PAGE_MASK,
++ PAGE_ALIGN(params.mmap_size +
++ (params.mmap & ~PAGE_MASK)));
++
++ if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI)
++ memblock_reserve(screen_info.lfb_base, screen_info.lfb_size);
++}
++
++static int __init register_gop_device(void)
++{
++ void *pd;
++
++ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
++ return 0;
++
++ /* the efifb driver accesses screen_info directly, no need to pass it */
++ pd = platform_device_register_simple("efi-framebuffer", 0, NULL, 0);
++ return PTR_ERR_OR_ZERO(pd);
++}
++subsys_initcall(register_gop_device);
+--- /dev/null
++++ b/drivers/firmware/efi/arm-runtime.c
+@@ -0,0 +1,151 @@
++/*
++ * Extensible Firmware Interface
++ *
++ * Based on Extensible Firmware Interface Specification version 2.4
++ *
++ * Copyright (C) 2013, 2014 Linaro Ltd.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ */
++
++#include <linux/efi.h>
++#include <linux/memblock.h>
++#include <linux/mm_types.h>
++#include <linux/preempt.h>
++#include <linux/rbtree.h>
++#include <linux/rwsem.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++
++#include <asm/cacheflush.h>
++#include <asm/efi.h>
++#include <asm/tlbflush.h>
++#include <asm/mmu_context.h>
++#include <asm/mmu.h>
++#include <asm/pgtable.h>
++
++static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss;
++
++extern u64 efi_system_table;
++
++static struct mm_struct efi_mm = {
++ .mm_rb = RB_ROOT,
++ .pgd = efi_pgd,
++ .mm_users = ATOMIC_INIT(2),
++ .mm_count = ATOMIC_INIT(1),
++ .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
++ .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
++ .mmlist = LIST_HEAD_INIT(efi_mm.mmlist),
++};
++
++static bool __init efi_virtmap_init(void)
++{
++ efi_memory_desc_t *md;
++
++ init_new_context(NULL, &efi_mm);
++
++ for_each_efi_memory_desc(&memmap, md) {
++ pgprot_t prot;
++
++ if (!(md->attribute & EFI_MEMORY_RUNTIME))
++ continue;
++ if (md->virt_addr == 0)
++ return false;
++
++ pr_info(" EFI remap 0x%016llx => %p\n",
++ md->phys_addr, (void *)md->virt_addr);
++
++ /*
++ * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
++ * executable, everything else can be mapped with the XN bits
++ * set.
++ */
++ if (!is_normal_ram(md))
++ prot = __pgprot(PROT_DEVICE_nGnRE);
++ else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
++ !PAGE_ALIGNED(md->phys_addr))
++ prot = PAGE_KERNEL_EXEC;
++ else
++ prot = PAGE_KERNEL;
++
++ create_pgd_mapping(&efi_mm, md->phys_addr, md->virt_addr,
++ md->num_pages << EFI_PAGE_SHIFT,
++ __pgprot(pgprot_val(prot) | PTE_NG));
++ }
++ return true;
++}
++
++/*
++ * Enable the UEFI Runtime Services if all prerequisites are in place, i.e.,
++ * non-early mapping of the UEFI system table and virtual mappings for all
++ * EFI_MEMORY_RUNTIME regions.
++ */
++static int __init arm64_enable_runtime_services(void)
++{
++ u64 mapsize;
++
++ if (!efi_enabled(EFI_BOOT)) {
++ pr_info("EFI services will not be available.\n");
++ return 0;
++ }
++
++ if (efi_runtime_disabled()) {
++ pr_info("EFI runtime services will be disabled.\n");
++ return 0;
++ }
++
++ pr_info("Remapping and enabling EFI services.\n");
++
++ mapsize = memmap.map_end - memmap.map;
++ memmap.map = (__force void *)ioremap_cache(memmap.phys_map,
++ mapsize);
++ if (!memmap.map) {
++ pr_err("Failed to remap EFI memory map\n");
++ return -ENOMEM;
++ }
++ memmap.map_end = memmap.map + mapsize;
++ efi.memmap = &memmap;
++
++ efi.systab = (__force void *)ioremap_cache(efi_system_table,
++ sizeof(efi_system_table_t));
++ if (!efi.systab) {
++ pr_err("Failed to remap EFI System Table\n");
++ return -ENOMEM;
++ }
++ set_bit(EFI_SYSTEM_TABLES, &efi.flags);
++
++ if (!efi_virtmap_init()) {
++ pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
++ return -ENOMEM;
++ }
++
++ /* Set up runtime services function pointers */
++ efi_native_runtime_setup();
++ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
++
++ efi.runtime_version = efi.systab->hdr.revision;
++
++ return 0;
++}
++early_initcall(arm64_enable_runtime_services);
++
++static void efi_set_pgd(struct mm_struct *mm)
++{
++ switch_mm(NULL, mm, NULL);
++}
++
++void efi_virtmap_load(void)
++{
++ preempt_disable();
++ efi_set_pgd(&efi_mm);
++}
++
++void efi_virtmap_unload(void)
++{
++ efi_set_pgd(current->active_mm);
++ preempt_enable();
++}
diff --git a/patches.arch/qcom-0002-arm64-efi-refactor-EFI-init-and-runtime-code-for-reu.patch b/patches.arch/qcom-0002-arm64-efi-refactor-EFI-init-and-runtime-code-for-reu.patch
new file mode 100644
index 0000000000..bfa0041386
--- /dev/null
+++ b/patches.arch/qcom-0002-arm64-efi-refactor-EFI-init-and-runtime-code-for-reu.patch
@@ -0,0 +1,229 @@
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Mon, 30 Nov 2015 13:28:19 +0100
+Subject: arm64/efi: refactor EFI init and runtime code for reuse by 32-bit ARM
+Git-commit: f7d924894265794f447ea799dd853400749b5a22
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+This refactors the EFI init and runtime code that will be shared
+between arm64 and ARM so that it can be built for both archs.
+
+Reviewed-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/arm64/include/asm/efi.h | 9 +++++++
+ arch/arm64/kernel/efi.c | 23 +++++++++++++++++++
+ drivers/firmware/efi/arm-init.c | 7 +++---
+ drivers/firmware/efi/arm-runtime.c | 43 +++++++++++++------------------------
+ drivers/firmware/efi/efi.c | 2 +
+ 5 files changed, 54 insertions(+), 30 deletions(-)
+
+--- a/arch/arm64/include/asm/efi.h
++++ b/arch/arm64/include/asm/efi.h
+@@ -2,7 +2,9 @@
+ #define _ASM_EFI_H
+
+ #include <asm/io.h>
++#include <asm/mmu_context.h>
+ #include <asm/neon.h>
++#include <asm/tlbflush.h>
+ #include <asm/ptrace.h>
+
+ #ifdef CONFIG_EFI
+@@ -11,6 +13,8 @@ extern void efi_init(void);
+ #define efi_init()
+ #endif
+
++int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
++
+ #define arch_efi_call_virt_setup() \
+ ({ \
+ kernel_neon_begin(); \
+@@ -68,6 +72,11 @@ static inline void efifb_setup_from_dmi(
+ * Services are enabled and the EFI_RUNTIME_SERVICES bit set.
+ */
+
++static inline void efi_set_pgd(struct mm_struct *mm)
++{
++ switch_mm(NULL, mm, NULL);
++}
++
+ void efi_virtmap_load(void);
+ void efi_virtmap_unload(void);
+
+--- a/arch/arm64/kernel/efi.c
++++ b/arch/arm64/kernel/efi.c
+@@ -20,6 +20,29 @@
+ /* we will fill this structure from the stub, so don't put it in .bss */
+ struct screen_info screen_info __section(.data);
+
++int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
++{
++ pteval_t prot_val;
++
++ /*
++ * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
++ * executable, everything else can be mapped with the XN bits
++ * set.
++ */
++ if ((md->attribute & EFI_MEMORY_WB) == 0)
++ prot_val = PROT_DEVICE_nGnRE;
++ else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
++ !PAGE_ALIGNED(md->phys_addr))
++ prot_val = pgprot_val(PAGE_KERNEL_EXEC);
++ else
++ prot_val = pgprot_val(PAGE_KERNEL);
++
++ create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
++ md->num_pages << EFI_PAGE_SHIFT,
++ __pgprot(prot_val | PTE_NG));
++ return 0;
++}
++
+ static int __init arm64_dmi_init(void)
+ {
+ /*
+--- a/drivers/firmware/efi/arm-init.c
++++ b/drivers/firmware/efi/arm-init.c
+@@ -58,7 +58,7 @@ static int __init uefi_init(void)
+ {
+ efi_char16_t *c16;
+ void *config_tables;
+- u64 table_size;
++ size_t table_size;
+ char vendor[100] = "unknown";
+ int i, retval;
+
+@@ -70,7 +70,8 @@ static int __init uefi_init(void)
+ }
+
+ set_bit(EFI_BOOT, &efi.flags);
+- set_bit(EFI_64BIT, &efi.flags);
++ if (IS_ENABLED(CONFIG_64BIT))
++ set_bit(EFI_64BIT, &efi.flags);
+
+ /*
+ * Verify the EFI Table
+@@ -108,7 +109,7 @@ static int __init uefi_init(void)
+ goto out;
+ }
+ retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
+- sizeof(efi_config_table_64_t), NULL);
++ sizeof(efi_config_table_t), NULL);
+
+ early_memunmap(config_tables, table_size);
+ out:
+--- a/drivers/firmware/efi/arm-runtime.c
++++ b/drivers/firmware/efi/arm-runtime.c
+@@ -12,6 +12,7 @@
+ */
+
+ #include <linux/efi.h>
++#include <linux/io.h>
+ #include <linux/memblock.h>
+ #include <linux/mm_types.h>
+ #include <linux/preempt.h>
+@@ -23,18 +24,14 @@
+
+ #include <asm/cacheflush.h>
+ #include <asm/efi.h>
+-#include <asm/tlbflush.h>
+-#include <asm/mmu_context.h>
+ #include <asm/mmu.h>
++#include <asm/pgalloc.h>
+ #include <asm/pgtable.h>
+
+-static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss;
+-
+ extern u64 efi_system_table;
+
+ static struct mm_struct efi_mm = {
+ .mm_rb = RB_ROOT,
+- .pgd = efi_pgd,
+ .mm_users = ATOMIC_INIT(2),
+ .mm_count = ATOMIC_INIT(1),
+ .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem),
+@@ -46,35 +43,27 @@ static bool __init efi_virtmap_init(void
+ {
+ efi_memory_desc_t *md;
+
++ efi_mm.pgd = pgd_alloc(&efi_mm);
+ init_new_context(NULL, &efi_mm);
+
+ for_each_efi_memory_desc(&memmap, md) {
+- pgprot_t prot;
++ phys_addr_t phys = md->phys_addr;
++ int ret;
+
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (md->virt_addr == 0)
+ return false;
+
+- pr_info(" EFI remap 0x%016llx => %p\n",
+- md->phys_addr, (void *)md->virt_addr);
+-
+- /*
+- * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
+- * executable, everything else can be mapped with the XN bits
+- * set.
+- */
+- if (!is_normal_ram(md))
+- prot = __pgprot(PROT_DEVICE_nGnRE);
+- else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
+- !PAGE_ALIGNED(md->phys_addr))
+- prot = PAGE_KERNEL_EXEC;
+- else
+- prot = PAGE_KERNEL;
+-
+- create_pgd_mapping(&efi_mm, md->phys_addr, md->virt_addr,
+- md->num_pages << EFI_PAGE_SHIFT,
+- __pgprot(pgprot_val(prot) | PTE_NG));
++ ret = efi_create_mapping(&efi_mm, md);
++ if (!ret) {
++ pr_info(" EFI remap %pa => %p\n",
++ &phys, (void *)(unsigned long)md->virt_addr);
++ } else {
++ pr_warn(" EFI remap %pa: failed to create mapping (%d)\n",
++ &phys, ret);
++ return false;
++ }
+ }
+ return true;
+ }
+@@ -84,7 +73,7 @@ static bool __init efi_virtmap_init(void
+ * non-early mapping of the UEFI system table and virtual mappings for all
+ * EFI_MEMORY_RUNTIME regions.
+ */
+-static int __init arm64_enable_runtime_services(void)
++static int __init arm_enable_runtime_services(void)
+ {
+ u64 mapsize;
+
+@@ -131,12 +120,7 @@ static int __init arm64_enable_runtime_services(void)
+
+ return 0;
+ }
+-early_initcall(arm64_enable_runtime_services);
+-
+-static void efi_set_pgd(struct mm_struct *mm)
+-{
+- switch_mm(NULL, mm, NULL);
+-}
++early_initcall(arm_enable_runtime_services);
+
+ void efi_virtmap_load(void)
+ {
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -25,6 +25,8 @@
+ #include <linux/io.h>
+ #include <linux/platform_device.h>
+
++#include <asm/efi.h>
++
+ struct efi __read_mostly efi = {
+ .mps = EFI_INVALID_TABLE_ADDR,
+ .acpi = EFI_INVALID_TABLE_ADDR,
diff --git a/patches.arch/qcom-0003-x86-efi-bgrt-Fix-kernel-panic-when-mapping-BGRT-data.patch b/patches.arch/qcom-0003-x86-efi-bgrt-Fix-kernel-panic-when-mapping-BGRT-data.patch
new file mode 100644
index 0000000000..7687bcacab
--- /dev/null
+++ b/patches.arch/qcom-0003-x86-efi-bgrt-Fix-kernel-panic-when-mapping-BGRT-data.patch
@@ -0,0 +1,189 @@
+From: Sai Praneeth <sai.praneeth.prakhya@intel.com>
+Date: Wed, 9 Dec 2015 15:41:08 -0800
+Subject: x86/efi-bgrt: Fix kernel panic when mapping BGRT data
+Git-commit: 50a0cb565246f20d59cdb161778531e4b19d35ac
+Patch-mainline: v4.6-rc1
+References: fate#320512
+
+Starting with this commit 35eb8b81edd4 ("x86/efi: Build our own page
+table structures") efi regions have a separate page directory called
+"efi_pgd". In order to access any efi region we have to first shift %cr3
+to this page table. In the bgrt code we are trying to copy bgrt_header
+and image, but these regions fall under "EFI_BOOT_SERVICES_DATA"
+and to access these regions we have to shift %cr3 to efi_pgd and not
+doing so will cause page fault as shown below.
+
+[ 0.251599] Last level dTLB entries: 4KB 64, 2MB 0, 4MB 0, 1GB 4
+[ 0.259126] Freeing SMP alternatives memory: 32K (ffffffff8230e000 - ffffffff82316000)
+[ 0.271803] BUG: unable to handle kernel paging request at fffffffefce35002
+[ 0.279740] IP: [<ffffffff821bca49>] efi_bgrt_init+0x144/0x1fd
+[ 0.286383] PGD 300f067 PUD 0
+[ 0.289879] Oops: 0000 [#1] SMP
+[ 0.293566] Modules linked in:
+[ 0.297039] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.4.0-rc1-eywa-eywa-built-in-47041+ #2
+[ 0.306619] Hardware name: Intel Corporation Skylake Client platform/Skylake Y LPDDR3 RVP3, BIOS SKLSE2R1.R00.B104.B01.1511110114 11/11/2015
+[ 0.320925] task: ffffffff820134c0 ti: ffffffff82000000 task.ti: ffffffff82000000
+[ 0.329420] RIP: 0010:[<ffffffff821bca49>] [<ffffffff821bca49>] efi_bgrt_init+0x144/0x1fd
+[ 0.338821] RSP: 0000:ffffffff82003f18 EFLAGS: 00010246
+[ 0.344852] RAX: fffffffefce35000 RBX: fffffffefce35000 RCX: fffffffefce2b000
+[ 0.352952] RDX: 000000008a82b000 RSI: ffffffff8235bb80 RDI: 000000008a835000
+[ 0.361050] RBP: ffffffff82003f30 R08: 000000008a865000 R09: ffffffffff202850
+[ 0.369149] R10: ffffffff811ad62f R11: 0000000000000000 R12: 0000000000000000
+[ 0.377248] R13: ffff88016dbaea40 R14: ffffffff822622c0 R15: ffffffff82003fb0
+[ 0.385348] FS: 0000000000000000(0000) GS:ffff88016d800000(0000) knlGS:0000000000000000
+[ 0.394533] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 0.401054] CR2: fffffffefce35002 CR3: 000000000300c000 CR4: 00000000003406f0
+[ 0.409153] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 0.417252] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 0.425350] Stack:
+[ 0.427638] ffffffffffffffff ffffffff82256900 ffff88016dbaea40 ffffffff82003f40
+[ 0.436086] ffffffff821bbce0 ffffffff82003f88 ffffffff8219c0c2 0000000000000000
+[ 0.444533] ffffffff8219ba4a ffffffff822622c0 0000000000083000 00000000ffffffff
+[ 0.452978] Call Trace:
+[ 0.455763] [<ffffffff821bbce0>] efi_late_init+0x9/0xb
+[ 0.461697] [<ffffffff8219c0c2>] start_kernel+0x463/0x47f
+[ 0.467928] [<ffffffff8219ba4a>] ? set_init_arg+0x55/0x55
+[ 0.474159] [<ffffffff8219b120>] ? early_idt_handler_array+0x120/0x120
+[ 0.481669] [<ffffffff8219b5ee>] x86_64_start_reservations+0x2a/0x2c
+[ 0.488982] [<ffffffff8219b72d>] x86_64_start_kernel+0x13d/0x14c
+[ 0.495897] Code: 00 41 b4 01 48 8b 78 28 e8 09 36 01 00 48 85 c0 48 89 c3 75 13 48 c7 c7 f8 ac d3 81 31 c0 e8 d7 3b fb fe e9 b5 00 00 00 45 84 e4 <44> 8b 6b 02 74 0d be 06 00 00 00 48 89 df e8 ae 34 0$
+[ 0.518151] RIP [<ffffffff821bca49>] efi_bgrt_init+0x144/0x1fd
+[ 0.524888] RSP <ffffffff82003f18>
+[ 0.528851] CR2: fffffffefce35002
+[ 0.532615] ---[ end trace 7b06521e6ebf2aea ]---
+[ 0.537852] Kernel panic - not syncing: Attempted to kill the idle task!
+
+As said above one way to fix this bug is to shift %cr3 to efi_pgd but we
+are not doing that way because it leaks inner details of how we switch
+to EFI page tables into a new call site and it also adds duplicate code.
+Instead, we remove the call to efi_lookup_mapped_addr() and always
+perform early_mem*() instead of early_io*() because we want to remap RAM
+regions and not I/O regions. We also delete efi_lookup_mapped_addr()
+because we are no longer using it.
+
+Signed-off-by: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Reported-by: Wendy Wang <wendy.wang@intel.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Josh Triplett <josh@joshtriplett.org>
+Cc: Ricardo Neri <ricardo.neri@intel.com>
+Cc: Ravi Shankar <ravi.v.shankar@intel.com>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/platform/efi/efi-bgrt.c | 39 ++++++++++++++-------------------------
+ drivers/firmware/efi/efi.c | 32 --------------------------------
+ 2 files changed, 14 insertions(+), 57 deletions(-)
+
+diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
+index 9a52b5c..bf51f4c 100644
+--- a/arch/x86/platform/efi/efi-bgrt.c
++++ b/arch/x86/platform/efi/efi-bgrt.c
+@@ -31,8 +31,7 @@ struct bmp_header {
+ void __init efi_bgrt_init(void)
+ {
+ acpi_status status;
+- void __iomem *image;
+- bool ioremapped = false;
++ void *image;
+ struct bmp_header bmp_header;
+
+ if (acpi_disabled)
+@@ -73,20 +72,14 @@ void __init efi_bgrt_init(void)
+ return;
+ }
+
+- image = efi_lookup_mapped_addr(bgrt_tab->image_address);
++ image = early_memremap(bgrt_tab->image_address, sizeof(bmp_header));
+ if (!image) {
+- image = early_ioremap(bgrt_tab->image_address,
+- sizeof(bmp_header));
+- ioremapped = true;
+- if (!image) {
+- pr_err("Ignoring BGRT: failed to map image header memory\n");
+- return;
+- }
++ pr_err("Ignoring BGRT: failed to map image header memory\n");
++ return;
+ }
+
+- memcpy_fromio(&bmp_header, image, sizeof(bmp_header));
+- if (ioremapped)
+- early_iounmap(image, sizeof(bmp_header));
++ memcpy(&bmp_header, image, sizeof(bmp_header));
++ early_memunmap(image, sizeof(bmp_header));
+ bgrt_image_size = bmp_header.size;
+
+ bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN);
+@@ -96,18 +89,14 @@ void __init efi_bgrt_init(void)
+ return;
+ }
+
+- if (ioremapped) {
+- image = early_ioremap(bgrt_tab->image_address,
+- bmp_header.size);
+- if (!image) {
+- pr_err("Ignoring BGRT: failed to map image memory\n");
+- kfree(bgrt_image);
+- bgrt_image = NULL;
+- return;
+- }
++ image = early_memremap(bgrt_tab->image_address, bmp_header.size);
++ if (!image) {
++ pr_err("Ignoring BGRT: failed to map image memory\n");
++ kfree(bgrt_image);
++ bgrt_image = NULL;
++ return;
+ }
+
+- memcpy_fromio(bgrt_image, image, bgrt_image_size);
+- if (ioremapped)
+- early_iounmap(image, bmp_header.size);
++ memcpy(bgrt_image, image, bgrt_image_size);
++ early_memunmap(image, bmp_header.size);
+ }
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 027ca21..e9c458b 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -324,38 +324,6 @@ u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
+ return end;
+ }
+
+-/*
+- * We can't ioremap data in EFI boot services RAM, because we've already mapped
+- * it as RAM. So, look it up in the existing EFI memory map instead. Only
+- * callable after efi_enter_virtual_mode and before efi_free_boot_services.
+- */
+-void __iomem *efi_lookup_mapped_addr(u64 phys_addr)
+-{
+- struct efi_memory_map *map;
+- void *p;
+- map = efi.memmap;
+- if (!map)
+- return NULL;
+- if (WARN_ON(!map->map))
+- return NULL;
+- for (p = map->map; p < map->map_end; p += map->desc_size) {
+- efi_memory_desc_t *md = p;
+- u64 size = md->num_pages << EFI_PAGE_SHIFT;
+- u64 end = md->phys_addr + size;
+- if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
+- md->type != EFI_BOOT_SERVICES_CODE &&
+- md->type != EFI_BOOT_SERVICES_DATA)
+- continue;
+- if (!md->virt_addr)
+- continue;
+- if (phys_addr >= md->phys_addr && phys_addr < end) {
+- phys_addr += md->virt_addr - md->phys_addr;
+- return (__force void __iomem *)(unsigned long)phys_addr;
+- }
+- }
+- return NULL;
+-}
+-
+ static __initdata efi_config_table_type_t common_tables[] = {
+ {ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20},
+ {ACPI_TABLE_GUID, "ACPI", &efi.acpi},
+--
+2.10.0
+
diff --git a/patches.arch/qcom-0004-ARM-wire-up-UEFI-init-and-runtime-support.patch b/patches.arch/qcom-0004-ARM-wire-up-UEFI-init-and-runtime-support.patch
new file mode 100644
index 0000000000..f43dc6d6c9
--- /dev/null
+++ b/patches.arch/qcom-0004-ARM-wire-up-UEFI-init-and-runtime-support.patch
@@ -0,0 +1,189 @@
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Sep 2015 13:49:52 -0700
+Subject: ARM: wire up UEFI init and runtime support
+Git-commit: da58fb6571bf40e5b2287d6aa3bbca04965f5677
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+This adds support to the kernel proper for booting via UEFI. It shares
+most of the code with arm64, so this patch mostly just wires it up for
+use with ARM.
+
+Note that this does not include the EFI stub, it is added in a subsequent
+patch.
+
+Tested-by: Ryan Harkin <ryan.harkin@linaro.org>
+Reviewed-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/arm/include/asm/efi.h | 60 +++++++++++++++++++++++++++++++++++++
+ arch/arm/include/asm/mmu_context.h | 2 -
+ arch/arm/kernel/Makefile | 1
+ arch/arm/kernel/efi.c | 38 +++++++++++++++++++++++
+ arch/arm/kernel/setup.c | 3 +
+ drivers/firmware/efi/Makefile | 1
+ 6 files changed, 104 insertions(+), 1 deletion(-)
+ create mode 100644 arch/arm/include/asm/efi.h
+ create mode 100644 arch/arm/kernel/efi.c
+
+--- /dev/null
++++ b/arch/arm/include/asm/efi.h
+@@ -0,0 +1,60 @@
++/*
++ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __ASM_ARM_EFI_H
++#define __ASM_ARM_EFI_H
++
++#include <asm/cacheflush.h>
++#include <asm/cachetype.h>
++#include <asm/early_ioremap.h>
++#include <asm/fixmap.h>
++#include <asm/highmem.h>
++#include <asm/mach/map.h>
++#include <asm/mmu_context.h>
++#include <asm/pgtable.h>
++
++#ifdef CONFIG_EFI
++void efi_init(void);
++
++int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
++
++#define efi_call_virt(f, ...) \
++({ \
++ efi_##f##_t *__f; \
++ efi_status_t __s; \
++ \
++ efi_virtmap_load(); \
++ __f = efi.systab->runtime->f; \
++ __s = __f(__VA_ARGS__); \
++ efi_virtmap_unload(); \
++ __s; \
++})
++
++#define __efi_call_virt(f, ...) \
++({ \
++ efi_##f##_t *__f; \
++ \
++ efi_virtmap_load(); \
++ __f = efi.systab->runtime->f; \
++ __f(__VA_ARGS__); \
++ efi_virtmap_unload(); \
++})
++
++static inline void efi_set_pgd(struct mm_struct *mm)
++{
++ check_and_switch_context(mm, NULL);
++}
++
++void efi_virtmap_load(void);
++void efi_virtmap_unload(void);
++
++#else
++#define efi_init()
++#endif /* CONFIG_EFI */
++
++#endif /* _ASM_ARM_EFI_H */
+--- a/arch/arm/include/asm/mmu_context.h
++++ b/arch/arm/include/asm/mmu_context.h
+@@ -26,7 +26,7 @@ void __check_vmalloc_seq(struct mm_struc
+ #ifdef CONFIG_CPU_HAS_ASID
+
+ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
+-#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
++#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
+
+ #ifdef CONFIG_ARM_ERRATA_798181
+ void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
+--- a/arch/arm/kernel/Makefile
++++ b/arch/arm/kernel/Makefile
+@@ -77,6 +77,7 @@ CFLAGS_pj4-cp0.o := -marm
+ AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
+ obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
+ obj-$(CONFIG_VDSO) += vdso.o
++obj-$(CONFIG_EFI) += efi.o
+
+ ifneq ($(CONFIG_ARCH_EBSA110),y)
+ obj-y += io.o
+--- /dev/null
++++ b/arch/arm/kernel/efi.c
+@@ -0,0 +1,38 @@
++/*
++ * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/efi.h>
++#include <asm/efi.h>
++#include <asm/mach/map.h>
++#include <asm/mmu_context.h>
++
++int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
++{
++ struct map_desc desc = {
++ .virtual = md->virt_addr,
++ .pfn = __phys_to_pfn(md->phys_addr),
++ .length = md->num_pages * EFI_PAGE_SIZE,
++ };
++
++ /*
++ * Order is important here: memory regions may have all of the
++ * bits below set (and usually do), so we check them in order of
++ * preference.
++ */
++ if (md->attribute & EFI_MEMORY_WB)
++ desc.type = MT_MEMORY_RWX;
++ else if (md->attribute & EFI_MEMORY_WT)
++ desc.type = MT_MEMORY_RWX_NONCACHED;
++ else if (md->attribute & EFI_MEMORY_WC)
++ desc.type = MT_DEVICE_WC;
++ else
++ desc.type = MT_DEVICE;
++
++ create_mapping_late(mm, &desc, true);
++ return 0;
++}
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -7,6 +7,7 @@
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
++#include <linux/efi.h>
+ #include <linux/export.h>
+ #include <linux/kernel.h>
+ #include <linux/stddef.h>
+@@ -37,6 +38,7 @@
+ #include <asm/cp15.h>
+ #include <asm/cpu.h>
+ #include <asm/cputype.h>
++#include <asm/efi.h>
+ #include <asm/elf.h>
+ #include <asm/fixmap.h>
+ #include <asm/procinfo.h>
+@@ -965,6 +967,7 @@ void __init setup_arch(char **cmdline_p)
+ early_paging_init(mdesc);
+ #endif
+ setup_dma_zone(mdesc);
++ efi_init();
+ sanity_check_meminfo();
+ arm_memblock_init(mdesc);
+
+--- a/drivers/firmware/efi/Makefile
++++ b/drivers/firmware/efi/Makefile
+@@ -22,5 +22,6 @@ obj-$(CONFIG_EFI_STUB) += libstub/
+ obj-$(CONFIG_EFI_FAKE_MEMMAP) += fake_mem.o
+
+ arm-obj-$(CONFIG_EFI) := arm-init.o arm-runtime.o
++obj-$(CONFIG_ARM) += $(arm-obj-y)
+ obj-$(CONFIG_ARM64) += $(arm-obj-y)
+ obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o
diff --git a/patches.arch/qcom-0005-ARM-add-UEFI-stub-support.patch b/patches.arch/qcom-0005-ARM-add-UEFI-stub-support.patch
new file mode 100644
index 0000000000..261813a82f
--- /dev/null
+++ b/patches.arch/qcom-0005-ARM-add-UEFI-stub-support.patch
@@ -0,0 +1,464 @@
+From: Roy Franz <roy.franz@linaro.org>
+Date: Wed, 23 Sep 2015 20:17:54 -0700
+Subject: ARM: add UEFI stub support
+Git-commit: 81a0bc39ea1960bbf8ece6a895d7cfd2d9efa28a
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+This patch adds EFI stub support for the ARM Linux kernel.
+
+The EFI stub operates similarly to the x86 and arm64 stubs: it is a
+shim between the EFI firmware and the normal zImage entry point, and
+sets up the environment that the zImage is expecting. This includes
+optionally loading the initrd and device tree from the system partition
+based on the kernel command line.
+
+Signed-off-by: Roy Franz <roy.franz@linaro.org>
+Tested-by: Ryan Harkin <ryan.harkin@linaro.org>
+Reviewed-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/arm/Kconfig | 19 ++++
+ arch/arm/boot/compressed/Makefile | 4
+ arch/arm/boot/compressed/efi-header.S | 130 ++++++++++++++++++++++++++++++
+ arch/arm/boot/compressed/head.S | 54 ++++++++++++
+ arch/arm/boot/compressed/vmlinux.lds.S | 7 +
+ arch/arm/include/asm/efi.h | 23 +++++
+ drivers/firmware/efi/libstub/Makefile | 9 ++
+ drivers/firmware/efi/libstub/arm-stub.c | 4
+ drivers/firmware/efi/libstub/arm32-stub.c | 85 +++++++++++++++++++
+ 9 files changed, 331 insertions(+), 4 deletions(-)
+ create mode 100644 arch/arm/boot/compressed/efi-header.S
+ create mode 100644 drivers/firmware/efi/libstub/arm32-stub.c
+
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -2044,6 +2044,25 @@ config AUTO_ZRELADDR
+ 0xf8000000. This assumes the zImage being placed in the first 128MB
+ from start of memory.
+
++config EFI_STUB
++ bool
++
++config EFI
++ bool "UEFI runtime support"
++ depends on OF && !CPU_BIG_ENDIAN && MMU && AUTO_ZRELADDR && !XIP_KERNEL
++ select UCS2_STRING
++ select EFI_PARAMS_FROM_FDT
++ select EFI_STUB
++ select EFI_ARMSTUB
++ select EFI_RUNTIME_WRAPPERS
++ ---help---
++ This option provides support for runtime services provided
++ by UEFI firmware (such as non-volatile variables, realtime
++ clock, and platform reset). A UEFI stub is also provided to
++ allow the kernel to be booted as an EFI application. This
++ is only useful for kernels that may run on systems that have
++ UEFI firmware.
++
+ endmenu
+
+ menu "CPU Power Management"
+--- a/arch/arm/boot/compressed/Makefile
++++ b/arch/arm/boot/compressed/Makefile
+@@ -167,9 +167,11 @@ if [ $(words $(ZRELADDR)) -gt 1 -a "$(CO
+ false; \
+ fi
+
++efi-obj-$(CONFIG_EFI_STUB) := $(objtree)/drivers/firmware/efi/libstub/lib.a
++
+ $(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \
+ $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) $(ashldi3) \
+- $(bswapsdi2) FORCE
++ $(bswapsdi2) $(efi-obj-y) FORCE
+ @$(check_for_multiple_zreladdr)
+ $(call if_changed,ld)
+ @$(check_for_bad_syms)
+--- /dev/null
++++ b/arch/arm/boot/compressed/efi-header.S
+@@ -0,0 +1,130 @@
++/*
++ * Copyright (C) 2013-2015 Linaro Ltd
++ * Authors: Roy Franz <roy.franz@linaro.org>
++ * Ard Biesheuvel <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++ .macro __nop
++#ifdef CONFIG_EFI_STUB
++ @ This is almost but not quite a NOP, since it does clobber the
++ @ condition flags. But it is the best we can do for EFI, since
++ @ PE/COFF expects the magic string "MZ" at offset 0, while the
++ @ ARM/Linux boot protocol expects an executable instruction
++ @ there.
++ .inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000
++#else
++ mov r0, r0
++#endif
++ .endm
++
++ .macro __EFI_HEADER
++#ifdef CONFIG_EFI_STUB
++ b __efi_start
++
++ .set start_offset, __efi_start - start
++ .org start + 0x3c
++ @
++ @ The PE header can be anywhere in the file, but for
++ @ simplicity we keep it together with the MSDOS header
++ @ The offset to the PE/COFF header needs to be at offset
++ @ 0x3C in the MSDOS header.
++ @ The only 2 fields of the MSDOS header that are used are this
++ @ PE/COFF offset, and the "MZ" bytes at offset 0x0.
++ @
++ .long pe_header - start @ Offset to the PE header.
++
++pe_header:
++ .ascii "PE\0\0"
++
++coff_header:
++ .short 0x01c2 @ ARM or Thumb
++ .short 2 @ nr_sections
++ .long 0 @ TimeDateStamp
++ .long 0 @ PointerToSymbolTable
++ .long 1 @ NumberOfSymbols
++ .short section_table - optional_header
++ @ SizeOfOptionalHeader
++ .short 0x306 @ Characteristics.
++ @ IMAGE_FILE_32BIT_MACHINE |
++ @ IMAGE_FILE_DEBUG_STRIPPED |
++ @ IMAGE_FILE_EXECUTABLE_IMAGE |
++ @ IMAGE_FILE_LINE_NUMS_STRIPPED
++
++optional_header:
++ .short 0x10b @ PE32 format
++ .byte 0x02 @ MajorLinkerVersion
++ .byte 0x14 @ MinorLinkerVersion
++ .long _end - __efi_start @ SizeOfCode
++ .long 0 @ SizeOfInitializedData
++ .long 0 @ SizeOfUninitializedData
++ .long efi_stub_entry - start @ AddressOfEntryPoint
++ .long start_offset @ BaseOfCode
++ .long 0 @ data
++
++extra_header_fields:
++ .long 0 @ ImageBase
++ .long 0x200 @ SectionAlignment
++ .long 0x200 @ FileAlignment
++ .short 0 @ MajorOperatingSystemVersion
++ .short 0 @ MinorOperatingSystemVersion
++ .short 0 @ MajorImageVersion
++ .short 0 @ MinorImageVersion
++ .short 0 @ MajorSubsystemVersion
++ .short 0 @ MinorSubsystemVersion
++ .long 0 @ Win32VersionValue
++
++ .long _end - start @ SizeOfImage
++ .long start_offset @ SizeOfHeaders
++ .long 0 @ CheckSum
++ .short 0xa @ Subsystem (EFI application)
++ .short 0 @ DllCharacteristics
++ .long 0 @ SizeOfStackReserve
++ .long 0 @ SizeOfStackCommit
++ .long 0 @ SizeOfHeapReserve
++ .long 0 @ SizeOfHeapCommit
++ .long 0 @ LoaderFlags
++ .long 0x6 @ NumberOfRvaAndSizes
++
++ .quad 0 @ ExportTable
++ .quad 0 @ ImportTable
++ .quad 0 @ ResourceTable
++ .quad 0 @ ExceptionTable
++ .quad 0 @ CertificationTable
++ .quad 0 @ BaseRelocationTable
++
++section_table:
++ @
++ @ The EFI application loader requires a relocation section
++ @ because EFI applications must be relocatable. This is a
++ @ dummy section as far as we are concerned.
++ @
++ .ascii ".reloc\0\0"
++ .long 0 @ VirtualSize
++ .long 0 @ VirtualAddress
++ .long 0 @ SizeOfRawData
++ .long 0 @ PointerToRawData
++ .long 0 @ PointerToRelocations
++ .long 0 @ PointerToLineNumbers
++ .short 0 @ NumberOfRelocations
++ .short 0 @ NumberOfLineNumbers
++ .long 0x42100040 @ Characteristics
++
++ .ascii ".text\0\0\0"
++ .long _end - __efi_start @ VirtualSize
++ .long __efi_start @ VirtualAddress
++ .long _edata - __efi_start @ SizeOfRawData
++ .long __efi_start @ PointerToRawData
++ .long 0 @ PointerToRelocations
++ .long 0 @ PointerToLineNumbers
++ .short 0 @ NumberOfRelocations
++ .short 0 @ NumberOfLineNumbers
++ .long 0xe0500020 @ Characteristics
++
++ .align 9
++__efi_start:
++#endif
++ .endm
+--- a/arch/arm/boot/compressed/head.S
++++ b/arch/arm/boot/compressed/head.S
+@@ -12,6 +12,8 @@
+ #include <asm/assembler.h>
+ #include <asm/v7m.h>
+
++#include "efi-header.S"
++
+ AR_CLASS( .arch armv7-a )
+ M_CLASS( .arch armv7-m )
+
+@@ -126,7 +128,7 @@
+ start:
+ .type start,#function
+ .rept 7
+- mov r0, r0
++ __nop
+ .endr
+ ARM( mov r0, r0 )
+ ARM( b 1f )
+@@ -139,7 +141,8 @@ start:
+ .word 0x04030201 @ endianness flag
+
+ THUMB( .thumb )
+-1:
++1: __EFI_HEADER
++
+ ARM_BE8( setend be ) @ go BE8 if compiled for BE8
+ AR_CLASS( mrs r9, cpsr )
+ #ifdef CONFIG_ARM_VIRT_EXT
+@@ -1353,6 +1356,53 @@ __enter_kernel:
+
+ reloc_code_end:
+
++#ifdef CONFIG_EFI_STUB
++ .align 2
++_start: .long start - .
++
++ENTRY(efi_stub_entry)
++ @ allocate space on stack for passing current zImage address
++ @ and for the EFI stub to return of new entry point of
++ @ zImage, as EFI stub may copy the kernel. Pointer address
++ @ is passed in r2. r0 and r1 are passed through from the
++ @ EFI firmware to efi_entry
++ adr ip, _start
++ ldr r3, [ip]
++ add r3, r3, ip
++ stmfd sp!, {r3, lr}
++ mov r2, sp @ pass zImage address in r2
++ bl efi_entry
++
++ @ Check for error return from EFI stub. r0 has FDT address
++ @ or error code.
++ cmn r0, #1
++ beq efi_load_fail
++
++ @ Preserve return value of efi_entry() in r4
++ mov r4, r0
++ bl cache_clean_flush
++ bl cache_off
++
++ @ Set parameters for booting zImage according to boot protocol
++ @ put FDT address in r2, it was returned by efi_entry()
++ @ r1 is the machine type, and r0 needs to be 0
++ mov r0, #0
++ mov r1, #0xFFFFFFFF
++ mov r2, r4
++
++ @ Branch to (possibly) relocated zImage that is in [sp]
++ ldr lr, [sp]
++ ldr ip, =start_offset
++ add lr, lr, ip
++ mov pc, lr @ no mode switch
++
++efi_load_fail:
++ @ Return EFI_LOAD_ERROR to EFI firmware on error.
++ ldr r0, =0x80000001
++ ldmfd sp!, {ip, pc}
++ENDPROC(efi_stub_entry)
++#endif
++
+ .align
+ .section ".stack", "aw", %nobits
+ .L_user_stack: .space 4096
+--- a/arch/arm/boot/compressed/vmlinux.lds.S
++++ b/arch/arm/boot/compressed/vmlinux.lds.S
+@@ -48,6 +48,13 @@ SECTIONS
+ *(.rodata)
+ *(.rodata.*)
+ }
++ .data : {
++ /*
++ * The EFI stub always executes from RAM, and runs strictly before the
++ * decompressor, so we can make an exception for its r/w data, and keep it
++ */
++ *(.data.efistub)
++ }
+ .piggydata : {
+ *(.piggydata)
+ }
+--- a/arch/arm/include/asm/efi.h
++++ b/arch/arm/include/asm/efi.h
+@@ -57,4 +57,27 @@ void efi_virtmap_unload(void);
+ #define efi_init()
+ #endif /* CONFIG_EFI */
+
++/* arch specific definitions used by the stub code */
++
++#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
++
++/*
++ * A reasonable upper bound for the uncompressed kernel size is 32 MBytes,
++ * so we will reserve that amount of memory. We have no easy way to tell what
++ * the actuall size of code + data the uncompressed kernel will use.
++ * If this is insufficient, the decompressor will relocate itself out of the
++ * way before performing the decompression.
++ */
++#define MAX_UNCOMP_KERNEL_SIZE SZ_32M
++
++/*
++ * The kernel zImage should preferably be located between 32 MB and 128 MB
++ * from the base of DRAM. The min address leaves space for a maximal size
++ * uncompressed image, and the max address is due to how the zImage decompressor
++ * picks a destination address.
++ */
++#define ZIMAGE_OFFSET_LIMIT SZ_128M
++#define MIN_ZIMAGE_OFFSET MAX_UNCOMP_KERNEL_SIZE
++#define MAX_FDT_OFFSET ZIMAGE_OFFSET_LIMIT
++
+ #endif /* _ASM_ARM_EFI_H */
+--- a/drivers/firmware/efi/libstub/Makefile
++++ b/drivers/firmware/efi/libstub/Makefile
+@@ -34,6 +34,7 @@ $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE
+ lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o string.o \
+ $(patsubst %.c,lib-%.o,$(arm-deps))
+
++lib-$(CONFIG_ARM) += arm32-stub.o
+ lib-$(CONFIG_ARM64) += arm64-stub.o random.o
+ CFLAGS_arm64-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
+
+@@ -67,3 +68,11 @@ quiet_cmd_stubcopy = STUBCPY $@
+ $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y) \
+ && (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
+ rm -f $@; /bin/false); else /bin/false; fi
++
++#
++# ARM discards the .data section because it disallows r/w data in the
++# decompressor. So move our .data to .data.efistub, which is preserved
++# explicitly by the decompressor linker script.
++#
++STUBCOPY_FLAGS-$(CONFIG_ARM) += --rename-section .data=.data.efistub
++STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
+--- a/drivers/firmware/efi/libstub/arm-stub.c
++++ b/drivers/firmware/efi/libstub/arm-stub.c
+@@ -340,8 +340,10 @@ fail:
+ * The value chosen is the largest non-zero power of 2 suitable for this purpose
+ * both on 32-bit and 64-bit ARM CPUs, to maximize the likelihood that it can
+ * be mapped efficiently.
++ * Since 32-bit ARM could potentially execute with a 1G/3G user/kernel split,
++ * map everything below 1 GB.
+ */
+-#define EFI_RT_VIRTUAL_BASE 0x40000000
++#define EFI_RT_VIRTUAL_BASE SZ_512M
+
+ static int cmp_mem_desc(const void *l, const void *r)
+ {
+--- /dev/null
++++ b/drivers/firmware/efi/libstub/arm32-stub.c
+@@ -0,0 +1,85 @@
++/*
++ * Copyright (C) 2013 Linaro Ltd; <roy.franz@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ *
++ */
++#include <linux/efi.h>
++#include <asm/efi.h>
++
++efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
++ unsigned long *image_addr,
++ unsigned long *image_size,
++ unsigned long *reserve_addr,
++ unsigned long *reserve_size,
++ unsigned long dram_base,
++ efi_loaded_image_t *image)
++{
++ unsigned long nr_pages;
++ efi_status_t status;
++ /* Use alloc_addr to tranlsate between types */
++ efi_physical_addr_t alloc_addr;
++
++ /*
++ * Verify that the DRAM base address is compatible with the ARM
++ * boot protocol, which determines the base of DRAM by masking
++ * off the low 27 bits of the address at which the zImage is
++ * loaded. These assumptions are made by the decompressor,
++ * before any memory map is available.
++ */
++ dram_base = round_up(dram_base, SZ_128M);
++
++ /*
++ * Reserve memory for the uncompressed kernel image. This is
++ * all that prevents any future allocations from conflicting
++ * with the kernel. Since we can't tell from the compressed
++ * image how much DRAM the kernel actually uses (due to BSS
++ * size uncertainty) we allocate the maximum possible size.
++ * Do this very early, as prints can cause memory allocations
++ * that may conflict with this.
++ */
++ alloc_addr = dram_base;
++ *reserve_size = MAX_UNCOMP_KERNEL_SIZE;
++ nr_pages = round_up(*reserve_size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
++ status = sys_table->boottime->allocate_pages(EFI_ALLOCATE_ADDRESS,
++ EFI_LOADER_DATA,
++ nr_pages, &alloc_addr);
++ if (status != EFI_SUCCESS) {
++ *reserve_size = 0;
++ pr_efi_err(sys_table, "Unable to allocate memory for uncompressed kernel.\n");
++ return status;
++ }
++ *reserve_addr = alloc_addr;
++
++ /*
++ * Relocate the zImage, so that it appears in the lowest 128 MB
++ * memory window.
++ */
++ *image_size = image->image_size;
++ status = efi_relocate_kernel(sys_table, image_addr, *image_size,
++ *image_size,
++ dram_base + MAX_UNCOMP_KERNEL_SIZE, 0);
++ if (status != EFI_SUCCESS) {
++ pr_efi_err(sys_table, "Failed to relocate kernel.\n");
++ efi_free(sys_table, *reserve_size, *reserve_addr);
++ *reserve_size = 0;
++ return status;
++ }
++
++ /*
++ * Check to see if we were able to allocate memory low enough
++ * in memory. The kernel determines the base of DRAM from the
++ * address at which the zImage is loaded.
++ */
++ if (*image_addr + *image_size > dram_base + ZIMAGE_OFFSET_LIMIT) {
++ pr_efi_err(sys_table, "Failed to relocate kernel, no low memory available.\n");
++ efi_free(sys_table, *reserve_size, *reserve_addr);
++ *reserve_size = 0;
++ efi_free(sys_table, *image_size, *image_addr);
++ *image_size = 0;
++ return EFI_LOAD_ERROR;
++ }
++ return EFI_SUCCESS;
++}
diff --git a/patches.arch/qcom-0006-efi-include-asm-early_ioremap.h-not-asm-efi.h-to-get.patch b/patches.arch/qcom-0006-efi-include-asm-early_ioremap.h-not-asm-efi.h-to-get.patch
new file mode 100644
index 0000000000..0ec91b701d
--- /dev/null
+++ b/patches.arch/qcom-0006-efi-include-asm-early_ioremap.h-not-asm-efi.h-to-get.patch
@@ -0,0 +1,44 @@
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Tue, 12 Jan 2016 14:22:46 +0100
+Subject: efi: include asm/early_ioremap.h not asm/efi.h to get early_memremap
+Git-commit: 0f7f2f0c0fcbe5e2bcba707a628ebaedfe2be4b4
+Patch-mainline: v4.5-rc1
+References: fate#320512
+
+The code in efi.c uses early_memremap(), but relies on a transitive
+include rather than including asm/early_ioremap.h directly, since
+this header did not exist on ia64.
+
+Commit f7d924894265 ("arm64/efi: refactor EFI init and runtime code
+for reuse by 32-bit ARM") attempted to work around this by including
+asm/efi.h, which transitively includes asm/early_ioremap.h on most
+architectures. However, since asm/efi.h does not exist on ia64 either,
+this is not much of an improvement.
+
+Now that we have created an asm/early_ioremap.h for ia64, we can just
+include it directly.
+
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/efi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index cffa89b..2cd37da 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -25,7 +25,7 @@
+ #include <linux/io.h>
+ #include <linux/platform_device.h>
+
+-#include <asm/efi.h>
++#include <asm/early_ioremap.h>
+
+ struct efi __read_mostly efi = {
+ .mps = EFI_INVALID_TABLE_ADDR,
+--
+2.10.0
+
diff --git a/patches.arch/qcom-0007-efi-arm-Drop-writable-mapping-of-the-UEFI-System-tab.patch b/patches.arch/qcom-0007-efi-arm-Drop-writable-mapping-of-the-UEFI-System-tab.patch
new file mode 100644
index 0000000000..f68b836b5e
--- /dev/null
+++ b/patches.arch/qcom-0007-efi-arm-Drop-writable-mapping-of-the-UEFI-System-tab.patch
@@ -0,0 +1,121 @@
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Mon, 25 Apr 2016 21:06:34 +0100
+Subject: efi/arm*: Drop writable mapping of the UEFI System table
+Git-commit: 14c43be60166981f0b1f034ad9c59252c6f99e0d
+Patch-mainline: v4.7-rc1
+References: fate#320512
+
+Commit:
+
+ 2eec5dedf770 ("efi/arm-init: Use read-only early mappings")
+
+updated the early ARM UEFI init code to create the temporary, early
+mapping of the UEFI System table using read-only attributes, as a
+hardening measure against inadvertent modification.
+
+However, this still leaves the permanent, writable mapping of the UEFI
+System table, which is only ever referenced during invocations of UEFI
+Runtime Services, at which time the UEFI virtual mapping is available,
+which also covers the system table. (This is guaranteed by the fact that
+SetVirtualAddressMap(), which is a runtime service itself, converts
+various entries in the table to their virtual equivalents, which implies
+that the table must be covered by a RuntimeServicesData region that has
+the EFI_MEMORY_RUNTIME attribute.)
+
+So instead of creating this permanent mapping, record the virtual address
+of the system table inside the UEFI virtual mapping, and dereference that
+when accessing the table. This protects the contents of the system table
+from inadvertent (or deliberate) modification when no UEFI Runtime
+Services calls are in progress.
+
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1461614832-17633-3-git-send-email-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/arm-init.c | 2 ++
+ drivers/firmware/efi/arm-runtime.c | 26 ++++++++++++++++----------
+ 2 files changed, 18 insertions(+), 10 deletions(-)
+
+--- a/drivers/firmware/efi/arm-init.c
++++ b/drivers/firmware/efi/arm-init.c
+@@ -86,6 +86,8 @@ static int __init uefi_init(void)
+ efi.systab->hdr.revision >> 16,
+ efi.systab->hdr.revision & 0xffff);
+
++ efi.runtime_version = efi.systab->hdr.revision;
++
+ /* Show what we know for posterity */
+ c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
+ sizeof(vendor) * sizeof(efi_char16_t));
+--- a/drivers/firmware/efi/arm-runtime.c
++++ b/drivers/firmware/efi/arm-runtime.c
+@@ -42,10 +42,12 @@ static struct mm_struct efi_mm = {
+ static bool __init efi_virtmap_init(void)
+ {
+ efi_memory_desc_t *md;
++ bool systab_found;
+
+ efi_mm.pgd = pgd_alloc(&efi_mm);
+ init_new_context(NULL, &efi_mm);
+
++ systab_found = false;
+ for_each_efi_memory_desc(&memmap, md) {
+ phys_addr_t phys = md->phys_addr;
+ int ret;
+@@ -64,8 +66,20 @@ static bool __init efi_virtmap_init(void
+ &phys, ret);
+ return false;
+ }
++ /*
++ * If this entry covers the address of the UEFI system table,
++ * calculate and record its virtual address.
++ */
++ if (efi_system_table >= phys &&
++ efi_system_table < phys + (md->num_pages * EFI_PAGE_SIZE)) {
++ efi.systab = (void *)(unsigned long)(efi_system_table -
++ phys + md->virt_addr);
++ systab_found = true;
++ }
+ }
+- return true;
++ if (!systab_found)
++ pr_err("No virtual mapping found for the UEFI System Table\n");
++ return systab_found;
+ }
+
+ /*
+@@ -99,16 +113,10 @@ static int __init arm_enable_runtime_ser
+ memmap.map_end = memmap.map + mapsize;
+ efi.memmap = &memmap;
+
+- efi.systab = (__force void *)ioremap_cache(efi_system_table,
+- sizeof(efi_system_table_t));
+- if (!efi.systab) {
+- pr_err("Failed to remap EFI System Table\n");
+- return -ENOMEM;
+- }
+ set_bit(EFI_SYSTEM_TABLES, &efi.flags);
+
+ if (!efi_virtmap_init()) {
+- pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
++ pr_err("UEFI virtual mapping missing or invalid -- runtime services will not be available\n");
+ return -ENOMEM;
+ }
+
+@@ -116,8 +124,6 @@ static int __init arm_enable_runtime_ser
+ efi_native_runtime_setup();
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+
+- efi.runtime_version = efi.systab->hdr.revision;
+-
+ return 0;
+ }
+ early_initcall(arm_enable_runtime_services);
diff --git a/patches.arch/qcom-0008-efi-arm64-Drop-__init-annotation-from-handle_kernel_.patch b/patches.arch/qcom-0008-efi-arm64-Drop-__init-annotation-from-handle_kernel_.patch
new file mode 100644
index 0000000000..47f956f713
--- /dev/null
+++ b/patches.arch/qcom-0008-efi-arm64-Drop-__init-annotation-from-handle_kernel_.patch
@@ -0,0 +1,52 @@
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Wed, 17 Feb 2016 12:35:57 +0000
+Subject: efi/arm64: Drop __init annotation from handle_kernel_image()
+Git-commit: dae31fd2b74c35cc84128733bc210bf6b26ae408
+Patch-mainline: v4.6-rc1
+References: fate#320512
+
+After moving arm64-stub.c to libstub/, all of its sections are emitted
+as .init.xxx sections automatically, and the __init annotation of
+handle_kernel_image() causes it to end up in .init.init.text, which is
+not recognized as an __init section by the linker scripts. So drop the
+annotation.
+
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1455712566-16727-5-git-send-email-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/libstub/arm64-stub.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/drivers/firmware/efi/libstub/arm64-stub.c
++++ b/drivers/firmware/efi/libstub/arm64-stub.c
+@@ -17,13 +17,13 @@
+
+ extern bool __nokaslr;
+
+-efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
+- unsigned long *image_addr,
+- unsigned long *image_size,
+- unsigned long *reserve_addr,
+- unsigned long *reserve_size,
+- unsigned long dram_base,
+- efi_loaded_image_t *image)
++efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
++ unsigned long *image_addr,
++ unsigned long *image_size,
++ unsigned long *reserve_addr,
++ unsigned long *reserve_size,
++ unsigned long dram_base,
++ efi_loaded_image_t *image)
+ {
+ efi_status_t status;
+ unsigned long kernel_size, kernel_memsize = 0;
diff --git a/patches.arch/qcom-0009-arm64-vmlinux.lds.S-Handle-.init.rodata.xxx-and-.ini.patch b/patches.arch/qcom-0009-arm64-vmlinux.lds.S-Handle-.init.rodata.xxx-and-.ini.patch
new file mode 100644
index 0000000000..3dbc66395f
--- /dev/null
+++ b/patches.arch/qcom-0009-arm64-vmlinux.lds.S-Handle-.init.rodata.xxx-and-.ini.patch
@@ -0,0 +1,47 @@
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Wed, 17 Feb 2016 12:35:58 +0000
+Subject: arm64/vmlinux.lds.S: Handle .init.rodata.xxx and .init.bss sections
+Git-commit: 1ce99bf45306ba889faadced6baabebf7770c546
+Patch-mainline: v4.6-rc1
+References: fate#320512
+
+The EFI stub is typically built into the decompressor (x86, ARM) so none
+of its symbols are annotated as __init. However, on arm64, the stub is
+linked into the kernel proper, and the code is __init annotated at the
+section level by prepending all names of SHF_ALLOC sections with '.init'.
+
+This results in section names like .init.rodata.str1.8 (for string literals)
+and .init.bss (which is tiny), both of which can be moved into the .init.data
+output section.
+
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1455712566-16727-6-git-send-email-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/arm64/kernel/vmlinux.lds.S | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
+index e3928f5..cbf4db4 100644
+--- a/arch/arm64/kernel/vmlinux.lds.S
++++ b/arch/arm64/kernel/vmlinux.lds.S
+@@ -134,6 +134,7 @@ SECTIONS
+ CON_INITCALL
+ SECURITY_INITCALL
+ INIT_RAM_FS
++ *(.init.rodata.* .init.bss) /* from the EFI stub */
+ }
+ .exit.data : {
+ ARM_EXIT_KEEP(EXIT_DATA)
+--
+2.10.0
+
diff --git a/patches.arch/qcom-0010-efi-efistub-Prevent-__init-annotations-from-being-us.patch b/patches.arch/qcom-0010-efi-efistub-Prevent-__init-annotations-from-being-us.patch
new file mode 100644
index 0000000000..14f140e6df
--- /dev/null
+++ b/patches.arch/qcom-0010-efi-efistub-Prevent-__init-annotations-from-being-us.patch
@@ -0,0 +1,56 @@
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Wed, 17 Feb 2016 12:35:59 +0000
+Subject: efi/efistub: Prevent __init annotations from being used
+Git-commit: 07e83dbb75865b016f6493c119a30aac7c25051a
+Patch-mainline: v4.6-rc1
+References: fate#320512
+
+__init annotations should not be used in the EFI stub, since the code is
+either included in the decompressor (x86, ARM) where they have no effect,
+or the whole stub is __init annotated at the section level (arm64), by
+renaming the sections.
+
+In the second case the __init annotations will be redundant, and will
+result in section names like .init.init.text, and our linker script does
+not expect that.
+
+So un-#define __init so that its inadvertent use will force a build error.
+
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1455712566-16727-7-git-send-email-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/libstub/efistub.h | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
+index 6b6548f..86ff7bf 100644
+--- a/drivers/firmware/efi/libstub/efistub.h
++++ b/drivers/firmware/efi/libstub/efistub.h
+@@ -5,6 +5,16 @@
+ /* error code which can't be mistaken for valid address */
+ #define EFI_ERROR (~0UL)
+
++/*
++ * __init annotations should not be used in the EFI stub, since the code is
++ * either included in the decompressor (x86, ARM) where they have no effect,
++ * or the whole stub is __init annotated at the section level (arm64), by
++ * renaming the sections, in which case the __init annotation will be
++ * redundant, and will result in section names like .init.init.text, and our
++ * linker script does not expect that.
++ */
++#undef __init
++
+ void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
+
+ efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
+--
+2.10.0
+
diff --git a/patches.arch/qcom-0011-efi-arm-init-Use-read-only-early-mappings.patch b/patches.arch/qcom-0011-efi-arm-init-Use-read-only-early-mappings.patch
new file mode 100644
index 0000000000..1b3109fd82
--- /dev/null
+++ b/patches.arch/qcom-0011-efi-arm-init-Use-read-only-early-mappings.patch
@@ -0,0 +1,70 @@
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Wed, 17 Feb 2016 12:36:00 +0000
+Subject: efi/arm-init: Use read-only early mappings
+Git-commit: 2eec5dedf770dc85c1fdf6b86873165e61bb1fff
+Patch-mainline: v4.6-rc1
+References: fate#320512
+
+The early mappings of the EFI system table contents and the UEFI memory
+map are read-only from the OS point of view. So map them read-only to
+protect them from inadvertent modification.
+
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1455712566-16727-8-git-send-email-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/arm-init.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/drivers/firmware/efi/arm-init.c
++++ b/drivers/firmware/efi/arm-init.c
+@@ -121,8 +121,8 @@ static int __init uefi_init(void)
+ char vendor[100] = "unknown";
+ int i, retval;
+
+- efi.systab = early_memremap(efi_system_table,
+- sizeof(efi_system_table_t));
++ efi.systab = early_memremap_ro(efi_system_table,
++ sizeof(efi_system_table_t));
+ if (efi.systab == NULL) {
+ pr_warn("Unable to map EFI system table.\n");
+ return -ENOMEM;
+@@ -148,8 +148,8 @@ static int __init uefi_init(void)
+ efi.runtime_version = efi.systab->hdr.revision;
+
+ /* Show what we know for posterity */
+- c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
+- sizeof(vendor) * sizeof(efi_char16_t));
++ c16 = early_memremap_ro(efi_to_phys(efi.systab->fw_vendor),
++ sizeof(vendor) * sizeof(efi_char16_t));
+ if (c16) {
+ for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
+ vendor[i] = c16[i];
+@@ -162,8 +162,8 @@ static int __init uefi_init(void)
+ efi.systab->hdr.revision & 0xffff, vendor);
+
+ table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
+- config_tables = early_memremap(efi_to_phys(efi.systab->tables),
+- table_size);
++ config_tables = early_memremap_ro(efi_to_phys(efi.systab->tables),
++ table_size);
+ if (config_tables == NULL) {
+ pr_warn("Unable to map EFI config table array.\n");
+ retval = -ENOMEM;
+@@ -256,7 +256,7 @@ void __init efi_init_fdt(void *fdt)
+ efi_system_table = params.system_table;
+
+ memmap.phys_map = params.mmap;
+- memmap.map = early_memremap(params.mmap, params.mmap_size);
++ memmap.map = early_memremap_ro(params.mmap, params.mmap_size);
+ if (memmap.map == NULL) {
+ /*
+ * If we are booting via UEFI, the UEFI memory map is the only
diff --git a/patches.arch/qcom-0012-efi-arm-Check-for-LPAE-support-before-booting-a-LPAE.patch b/patches.arch/qcom-0012-efi-arm-Check-for-LPAE-support-before-booting-a-LPAE.patch
new file mode 100644
index 0000000000..3f677c22ac
--- /dev/null
+++ b/patches.arch/qcom-0012-efi-arm-Check-for-LPAE-support-before-booting-a-LPAE.patch
@@ -0,0 +1,61 @@
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Wed, 17 Feb 2016 12:36:01 +0000
+Subject: efi/arm: Check for LPAE support before booting a LPAE kernel
+Git-commit: 2ec0f0a3a4bfab90eda8b81656f62e07abf2321f
+Patch-mainline: v4.6-rc1
+References: fate#320512
+
+A kernel built with support for LPAE cannot boot to a state where it
+can inform the user about if it has to fail due to missing LPAE support
+in the hardware.
+
+If we happen to be booting via UEFI, we can fail gracefully so check
+for LPAE support in the hardware on CONFIG_ARM_LPAE builds before
+entering the kernel proper.
+
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Reviewed-by: Jeremy Linton <jeremy.linton@arm.com>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1455712566-16727-9-git-send-email-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/libstub/arm32-stub.c | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
+index 495ebd6..6f42be4 100644
+--- a/drivers/firmware/efi/libstub/arm32-stub.c
++++ b/drivers/firmware/efi/libstub/arm32-stub.c
+@@ -9,6 +9,23 @@
+ #include <linux/efi.h>
+ #include <asm/efi.h>
+
++efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
++{
++ int block;
++
++ /* non-LPAE kernels can run anywhere */
++ if (!IS_ENABLED(CONFIG_ARM_LPAE))
++ return EFI_SUCCESS;
++
++ /* LPAE kernels need compatible hardware */
++ block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
++ if (block < 5) {
++ pr_efi_err(sys_table_arg, "This LPAE kernel is not supported by your CPU\n");
++ return EFI_UNSUPPORTED;
++ }
++ return EFI_SUCCESS;
++}
++
+ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
+ unsigned long *image_addr,
+ unsigned long *image_size,
+--
+2.10.0
+
diff --git a/patches.arch/qcom-0013-efi-arm64-Check-for-h-w-support-before-booting-a-4-K.patch b/patches.arch/qcom-0013-efi-arm64-Check-for-h-w-support-before-booting-a-4-K.patch
new file mode 100644
index 0000000000..abf8f2035f
--- /dev/null
+++ b/patches.arch/qcom-0013-efi-arm64-Check-for-h-w-support-before-booting-a-4-K.patch
@@ -0,0 +1,67 @@
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Wed, 17 Feb 2016 12:36:02 +0000
+Subject: efi/arm64: Check for h/w support before booting a >4 KB granular
+ kernel
+Git-commit: 42b55734030c1f724d5f47aeb872e2cccd650d79
+Patch-mainline: v4.6-rc1
+References: fate#320512
+
+A kernel built with support for a page size that is not supported by the
+hardware it runs on cannot boot to a state where it can inform the user
+about the failure.
+
+If we happen to be booting via UEFI, we can fail gracefully so check
+if the currently configured page size is supported by the hardware before
+entering the kernel proper. Note that UEFI mandates support for 4 KB pages,
+so in that case, no check is needed.
+
+Tested-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Reviewed-by: Jeremy Linton <jeremy.linton@arm.com>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1455712566-16727-10-git-send-email-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/libstub/arm64-stub.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+--- a/drivers/firmware/efi/libstub/arm64-stub.c
++++ b/drivers/firmware/efi/libstub/arm64-stub.c
+@@ -12,11 +12,31 @@
+ #include <linux/efi.h>
+ #include <asm/efi.h>
+ #include <asm/sections.h>
++#include <asm/sysreg.h>
+
+ #include "efistub.h"
+
+ extern bool __nokaslr;
+
++efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
++{
++ u64 tg;
++
++ /* UEFI mandates support for 4 KB granularity, no need to check */
++ if (IS_ENABLED(CONFIG_ARM64_4K_PAGES))
++ return EFI_SUCCESS;
++
++ tg = (read_cpuid(ID_AA64MMFR0_EL1) >> ID_AA64MMFR0_TGRAN_SHIFT) & 0xf;
++ if (tg != ID_AA64MMFR0_TGRAN_SUPPORTED) {
++ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
++ pr_efi_err(sys_table_arg, "This 64 KB granular kernel is not supported by your CPU\n");
++ else
++ pr_efi_err(sys_table_arg, "This 16 KB granular kernel is not supported by your CPU\n");
++ return EFI_UNSUPPORTED;
++ }
++ return EFI_SUCCESS;
++}
++
+ efi_status_t handle_kernel_image(efi_system_table_t *sys_table_arg,
+ unsigned long *image_addr,
+ unsigned long *image_size,
diff --git a/patches.arch/qcom-0014-efi-arm-Perform-hardware-compatibility-check.patch b/patches.arch/qcom-0014-efi-arm-Perform-hardware-compatibility-check.patch
new file mode 100644
index 0000000000..3ebb035103
--- /dev/null
+++ b/patches.arch/qcom-0014-efi-arm-Perform-hardware-compatibility-check.patch
@@ -0,0 +1,51 @@
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Wed, 17 Feb 2016 12:36:03 +0000
+Subject: efi/arm*: Perform hardware compatibility check
+Git-commit: b9d6769b5678dbd6cb328d20716561d35b2b1510
+Patch-mainline: v4.6-rc1
+References: fate#320512
+
+Before proceeding with relocating the kernel and parsing the command line,
+insert a call to check_platform_features() to allow an arch specific check
+to be performed whether the current kernel can execute on the current
+hardware.
+
+Tested-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Reviewed-by: Jeremy Linton <jeremy.linton@arm.com>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1455712566-16727-11-git-send-email-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/libstub/arm-stub.c | 4 ++++
+ drivers/firmware/efi/libstub/efistub.h | 2 ++
+ 2 files changed, 6 insertions(+)
+
+--- a/drivers/firmware/efi/libstub/arm-stub.c
++++ b/drivers/firmware/efi/libstub/arm-stub.c
+@@ -212,6 +212,10 @@ unsigned long efi_entry(void *handle, ef
+
+ pr_efi(sys_table, "Booting Linux Kernel...\n");
+
++ status = check_platform_features(sys_table);
++ if (status != EFI_SUCCESS)
++ goto fail;
++
+ /*
+ * Get a handle to the loaded image protocol. This is used to get
+ * information about the running image, such as size and the command
+--- a/drivers/firmware/efi/libstub/efistub.h
++++ b/drivers/firmware/efi/libstub/efistub.h
+@@ -60,4 +60,6 @@ efi_status_t efi_random_alloc(efi_system
+ unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long random_seed);
+
++efi_status_t check_platform_features(efi_system_table_t *sys_table_arg);
++
+ #endif
diff --git a/patches.arch/qcom-0015-x86-efi-Map-RAM-into-the-identity-page-table-for-mix.patch b/patches.arch/qcom-0015-x86-efi-Map-RAM-into-the-identity-page-table-for-mix.patch
new file mode 100644
index 0000000000..758ced09e4
--- /dev/null
+++ b/patches.arch/qcom-0015-x86-efi-Map-RAM-into-the-identity-page-table-for-mix.patch
@@ -0,0 +1,72 @@
+From: Matt Fleming <matt@codeblueprint.co.uk>
+Date: Fri, 27 Nov 2015 21:09:32 +0000
+Subject: x86/efi: Map RAM into the identity page table for mixed mode
+Git-commit: b61a76f8850d2979550abc42d7e09154ebb8d785
+Patch-mainline: v4.6-rc1
+References: fate#320512
+
+We are relying on the pre-existing mappings in 'trampoline_pgd'
+when accessing function arguments in the EFI mixed mode thunking
+code.
+
+Instead let's map memory explicitly so that things will continue
+to work when we move to a separate page table in the future.
+
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Acked-by: Borislav Petkov <bp@suse.de>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Toshi Kani <toshi.kani@hp.com>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1448658575-17029-4-git-send-email-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/platform/efi/efi_64.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -142,6 +142,7 @@ void efi_sync_low_kernel_mappings(void)
+ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+ {
+ unsigned long text;
++ efi_memory_desc_t *md;
+ struct page *page;
+ unsigned npages;
+ pgd_t *pgd;
+@@ -174,6 +175,25 @@ int __init efi_setup_page_tables(unsigne
+ if (!IS_ENABLED(CONFIG_EFI_MIXED))
+ return 0;
+
++ /*
++ * Map all of RAM so that we can access arguments in the 1:1
++ * mapping when making EFI runtime calls.
++ */
++ for_each_efi_memory_desc(&memmap, md) {
++ if (md->type != EFI_CONVENTIONAL_MEMORY &&
++ md->type != EFI_LOADER_DATA &&
++ md->type != EFI_LOADER_CODE)
++ continue;
++
++ pfn = md->phys_addr >> PAGE_SHIFT;
++ npages = md->num_pages;
++
++ if (kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, npages, 0)) {
++ pr_err("Failed to map 1:1 memory\n");
++ return 1;
++ }
++ }
++
+ page = alloc_page(GFP_KERNEL|__GFP_DMA32);
+ if (!page)
+ panic("Unable to allocate EFI runtime stack < 4GB\n");
diff --git a/patches.arch/qcom-0016-x86-efi-Hoist-page-table-switching-code-into-efi_cal.patch b/patches.arch/qcom-0016-x86-efi-Hoist-page-table-switching-code-into-efi_cal.patch
new file mode 100644
index 0000000000..86827a81d1
--- /dev/null
+++ b/patches.arch/qcom-0016-x86-efi-Hoist-page-table-switching-code-into-efi_cal.patch
@@ -0,0 +1,216 @@
+From: Matt Fleming <matt@codeblueprint.co.uk>
+Date: Fri, 27 Nov 2015 21:09:33 +0000
+Subject: x86/efi: Hoist page table switching code into efi_call_virt()
+Git-commit: c9f2a9a65e4855b74d92cdad688f6ee4a1a323ff
+Patch-mainline: v4.6-rc1
+References: fate#320512
+
+This change is a prerequisite for pending patches that switch to
+a dedicated EFI page table, instead of using 'trampoline_pgd'
+which shares PGD entries with 'swapper_pg_dir'. The pending
+patches make it impossible to dereference the runtime service
+function pointer without first switching %cr3.
+
+It's true that we now have duplicated switching code in
+efi_call_virt() and efi_call_phys_{prolog,epilog}() but we are
+sacrificing code duplication for a little more clarity and the
+ease of writing the page table switching code in C instead of
+asm.
+
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Acked-by: Borislav Petkov <bp@suse.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Jones <davej@codemonkey.org.uk>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Stephen Smalley <sds@tycho.nsa.gov>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Toshi Kani <toshi.kani@hp.com>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1448658575-17029-5-git-send-email-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/include/asm/efi.h | 25 ++++++++++++++++++++
+ arch/x86/platform/efi/efi_64.c | 24 +++++++++-----------
+ arch/x86/platform/efi/efi_stub_64.S | 43 ------------------------------------
+ 3 files changed, 36 insertions(+), 56 deletions(-)
+
+--- a/arch/x86/include/asm/efi.h
++++ b/arch/x86/include/asm/efi.h
+@@ -4,6 +4,7 @@
+ #include <asm/fpu/api.h>
+ #include <asm/pgtable.h>
+ #include <asm/processor-flags.h>
++#include <asm/tlb.h>
+
+ /*
+ * We map the EFI regions needed for runtime services non-contiguously,
+@@ -54,11 +55,29 @@ extern u64 asmlinkage efi_call(void *fp,
+
+ #define efi_call_phys(f, args...) efi_call((f), args)
+
++/*
++ * Scratch space used for switching the pagetable in the EFI stub
++ */
++struct efi_scratch {
++ u64 r15;
++ u64 prev_cr3;
++ pgd_t *efi_pgt;
++ bool use_pgd;
++ u64 phys_stack;
++} __packed;
++
+ #define arch_efi_call_virt_setup() \
+ ({ \
+ efi_sync_low_kernel_mappings(); \
+ preempt_disable(); \
+ __kernel_fpu_begin(); \
++ \
++ if (efi_scratch.use_pgd) { \
++ efi_scratch.prev_cr3 = read_cr3(); \
++ write_cr3((unsigned long)efi_scratch.efi_pgt); \
++ __flush_tlb_all(); \
++ } \
++ \
+ })
+
+ #define arch_efi_call_virt(f, args...) \
+@@ -66,6 +85,12 @@ extern u64 asmlinkage efi_call(void *fp,
+
+ #define arch_efi_call_virt_teardown() \
+ ({ \
++ \
++ if (efi_scratch.use_pgd) { \
++ write_cr3(efi_scratch.prev_cr3); \
++ __flush_tlb_all(); \
++ } \
++ \
+ __kernel_fpu_end(); \
+ preempt_enable(); \
+ })
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -47,16 +47,7 @@
+ */
+ static u64 efi_va = EFI_VA_START;
+
+-/*
+- * Scratch space used for switching the pagetable in the EFI stub
+- */
+-struct efi_scratch {
+- u64 r15;
+- u64 prev_cr3;
+- pgd_t *efi_pgt;
+- bool use_pgd;
+- u64 phys_stack;
+-} __packed;
++struct efi_scratch efi_scratch;
+
+ static void __init early_code_mapping_set_exec(int executable)
+ {
+@@ -81,8 +72,11 @@ pgd_t * __init efi_call_phys_prolog(void
+ int pgd;
+ int n_pgds;
+
+- if (!efi_enabled(EFI_OLD_MEMMAP))
+- return NULL;
++ if (!efi_enabled(EFI_OLD_MEMMAP)) {
++ save_pgd = (pgd_t *)read_cr3();
++ write_cr3((unsigned long)efi_scratch.efi_pgt);
++ goto out;
++ }
+
+ early_code_mapping_set_exec(1);
+
+@@ -94,6 +88,7 @@ pgd_t * __init efi_call_phys_prolog(void
+ vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
+ set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
+ }
++out:
+ __flush_tlb_all();
+
+ return save_pgd;
+@@ -107,8 +102,11 @@ void __init efi_call_phys_epilog(pgd_t *
+ int pgd_idx;
+ int nr_pgds;
+
+- if (!save_pgd)
++ if (!efi_enabled(EFI_OLD_MEMMAP)) {
++ write_cr3((unsigned long)save_pgd);
++ __flush_tlb_all();
+ return;
++ }
+
+ nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
+
+--- a/arch/x86/platform/efi/efi_stub_64.S
++++ b/arch/x86/platform/efi/efi_stub_64.S
+@@ -38,41 +38,6 @@
+ mov %rsi, %cr0; \
+ mov (%rsp), %rsp
+
+- /* stolen from gcc */
+- .macro FLUSH_TLB_ALL
+- movq %r15, efi_scratch(%rip)
+- movq %r14, efi_scratch+8(%rip)
+- movq %cr4, %r15
+- movq %r15, %r14
+- andb $0x7f, %r14b
+- movq %r14, %cr4
+- movq %r15, %cr4
+- movq efi_scratch+8(%rip), %r14
+- movq efi_scratch(%rip), %r15
+- .endm
+-
+- .macro SWITCH_PGT
+- cmpb $0, efi_scratch+24(%rip)
+- je 1f
+- movq %r15, efi_scratch(%rip) # r15
+- # save previous CR3
+- movq %cr3, %r15
+- movq %r15, efi_scratch+8(%rip) # prev_cr3
+- movq efi_scratch+16(%rip), %r15 # EFI pgt
+- movq %r15, %cr3
+- 1:
+- .endm
+-
+- .macro RESTORE_PGT
+- cmpb $0, efi_scratch+24(%rip)
+- je 2f
+- movq efi_scratch+8(%rip), %r15
+- movq %r15, %cr3
+- movq efi_scratch(%rip), %r15
+- FLUSH_TLB_ALL
+- 2:
+- .endm
+-
+ ENTRY(efi_call)
+ SAVE_XMM
+ mov (%rsp), %rax
+@@ -83,16 +48,8 @@ ENTRY(efi_call)
+ mov %r8, %r9
+ mov %rcx, %r8
+ mov %rsi, %rcx
+- SWITCH_PGT
+ call *%rdi
+- RESTORE_PGT
+ addq $48, %rsp
+ RESTORE_XMM
+ ret
+ ENDPROC(efi_call)
+-
+- .data
+-ENTRY(efi_scratch)
+- .fill 3,8,0
+- .byte 0
+- .quad 0
diff --git a/patches.arch/qcom-0017-x86-efi-Build-our-own-page-table-structures.patch b/patches.arch/qcom-0017-x86-efi-Build-our-own-page-table-structures.patch
new file mode 100644
index 0000000000..6315324fa4
--- /dev/null
+++ b/patches.arch/qcom-0017-x86-efi-Build-our-own-page-table-structures.patch
@@ -0,0 +1,313 @@
+From: Matt Fleming <matt@codeblueprint.co.uk>
+Date: Fri, 27 Nov 2015 21:09:34 +0000
+Subject: x86/efi: Build our own page table structures
+Git-commit: 67a9108ed4313b85a9c53406d80dc1ae3f8c3e36
+Patch-mainline: v4.6-rc1
+References: fate#320512
+
+With commit e1a58320a38d ("x86/mm: Warn on W^X mappings") all
+users booting on 64-bit UEFI machines see the following warning,
+
+ ------------[ cut here ]------------
+ WARNING: CPU: 7 PID: 1 at arch/x86/mm/dump_pagetables.c:225 note_page+0x5dc/0x780()
+ x86/mm: Found insecure W+X mapping at address ffff88000005f000/0xffff88000005f000
+ ...
+ x86/mm: Checked W+X mappings: FAILED, 165660 W+X pages found.
+ ...
+
+This is caused by mapping EFI regions with RWX permissions.
+There isn't much we can do to restrict the permissions for these
+regions due to the way the firmware toolchains mix code and
+data, but we can at least isolate these mappings so that they do
+not appear in the regular kernel page tables.
+
+In commit d2f7cbe7b26a ("x86/efi: Runtime services virtual
+mapping") we started using 'trampoline_pgd' to map the EFI
+regions because there was an existing identity mapping there
+which we use during the SetVirtualAddressMap() call and for
+broken firmware that accesses those addresses.
+
+But 'trampoline_pgd' shares some PGD entries with
+'swapper_pg_dir' and does not provide the isolation we require.
+Notably the virtual address for __START_KERNEL_map and
+MODULES_START are mapped by the same PGD entry so we need to be
+more careful when copying changes over in
+efi_sync_low_kernel_mappings().
+
+This patch doesn't go the full mile, we still want to share some
+PGD entries with 'swapper_pg_dir'. Having completely separate
+page tables brings its own issues such as synchronising new
+mappings after memory hotplug and module loading. Sharing also
+keeps memory usage down.
+
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Acked-by: Borislav Petkov <bp@suse.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Jones <davej@codemonkey.org.uk>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Stephen Smalley <sds@tycho.nsa.gov>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Toshi Kani <toshi.kani@hp.com>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1448658575-17029-6-git-send-email-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/include/asm/efi.h | 1
+ arch/x86/platform/efi/efi.c | 39 +++++-----------
+ arch/x86/platform/efi/efi_32.c | 5 ++
+ arch/x86/platform/efi/efi_64.c | 97 ++++++++++++++++++++++++++++++++++-------
+ 4 files changed, 102 insertions(+), 40 deletions(-)
+
+--- a/arch/x86/include/asm/efi.h
++++ b/arch/x86/include/asm/efi.h
+@@ -123,6 +123,7 @@ extern void __init efi_memory_uc(u64 add
+ extern void __init efi_map_region(efi_memory_desc_t *md);
+ extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
+ extern void efi_sync_low_kernel_mappings(void);
++extern int __init efi_alloc_page_tables(void);
+ extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
+ extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
+ extern void __init old_map_region(efi_memory_desc_t *md);
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -854,7 +854,7 @@ static void __init kexec_enter_virtual_m
+ * This function will switch the EFI runtime services to virtual mode.
+ * Essentially, we look through the EFI memmap and map every region that
+ * has the runtime attribute bit set in its memory descriptor into the
+- * ->trampoline_pgd page table using a top-down VA allocation scheme.
++ * efi_pgd page table.
+ *
+ * The old method which used to update that memory descriptor with the
+ * virtual address obtained from ioremap() is still supported when the
+@@ -864,8 +864,8 @@ static void __init kexec_enter_virtual_m
+ *
+ * The new method does a pagetable switch in a preemption-safe manner
+ * so that we're in a different address space when calling a runtime
+- * function. For function arguments passing we do copy the PGDs of the
+- * kernel page table into ->trampoline_pgd prior to each call.
++ * function. For function arguments passing we do copy the PUDs of the
++ * kernel page table into efi_pgd prior to each call.
+ *
+ * Specially for kexec boot, efi runtime maps in previous kernel should
+ * be passed in via setup_data. In that case runtime ranges will be mapped
+@@ -880,6 +880,12 @@ static void __init __efi_enter_virtual_m
+
+ efi.systab = NULL;
+
++ if (efi_alloc_page_tables()) {
++ pr_err("Failed to allocate EFI page tables\n");
++ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
++ return;
++ }
++
+ efi_merge_regions();
+ new_memmap = efi_map_regions(&count, &pg_shift);
+ if (!new_memmap) {
+@@ -939,28 +945,11 @@ static void __init __efi_enter_virtual_m
+ efi_runtime_mkexec();
+
+ /*
+- * We mapped the descriptor array into the EFI pagetable above but we're
+- * not unmapping it here. Here's why:
+- *
+- * We're copying select PGDs from the kernel page table to the EFI page
+- * table and when we do so and make changes to those PGDs like unmapping
+- * stuff from them, those changes appear in the kernel page table and we
+- * go boom.
+- *
+- * From setup_real_mode():
+- *
+- * ...
+- * trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
+- *
+- * In this particular case, our allocation is in PGD 0 of the EFI page
+- * table but we've copied that PGD from PGD[272] of the EFI page table:
+- *
+- * pgd_index(__PAGE_OFFSET = 0xffff880000000000) = 272
+- *
+- * where the direct memory mapping in kernel space is.
+- *
+- * new_memmap's VA comes from that direct mapping and thus clearing it,
+- * it would get cleared in the kernel page table too.
++ * We mapped the descriptor array into the EFI pagetable above
++ * but we're not unmapping it here because if we're running in
++ * EFI mixed mode we need all of memory to be accessible when
++ * we pass parameters to the EFI runtime services in the
++ * thunking code.
+ *
+ * efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift);
+ */
+--- a/arch/x86/platform/efi/efi_32.c
++++ b/arch/x86/platform/efi/efi_32.c
+@@ -38,6 +38,11 @@
+ * say 0 - 3G.
+ */
+
++int __init efi_alloc_page_tables(void)
++{
++ return 0;
++}
++
+ void efi_sync_low_kernel_mappings(void) {}
+ void __init efi_dump_pagetable(void) {}
+ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -40,6 +40,7 @@
+ #include <asm/fixmap.h>
+ #include <asm/realmode.h>
+ #include <asm/time.h>
++#include <asm/pgalloc.h>
+
+ /*
+ * We allocate runtime services regions bottom-up, starting from -4G, i.e.
+@@ -119,22 +120,92 @@ void __init efi_call_phys_epilog(pgd_t *
+ early_code_mapping_set_exec(0);
+ }
+
++static pgd_t *efi_pgd;
++
++/*
++ * We need our own copy of the higher levels of the page tables
++ * because we want to avoid inserting EFI region mappings (EFI_VA_END
++ * to EFI_VA_START) into the standard kernel page tables. Everything
++ * else can be shared, see efi_sync_low_kernel_mappings().
++ */
++int __init efi_alloc_page_tables(void)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ gfp_t gfp_mask;
++
++ if (efi_enabled(EFI_OLD_MEMMAP))
++ return 0;
++
++ gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO;
++ efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
++ if (!efi_pgd)
++ return -ENOMEM;
++
++ pgd = efi_pgd + pgd_index(EFI_VA_END);
++
++ pud = pud_alloc_one(NULL, 0);
++ if (!pud) {
++ free_page((unsigned long)efi_pgd);
++ return -ENOMEM;
++ }
++
++ pgd_populate(NULL, pgd, pud);
++
++ return 0;
++}
++
+ /*
+ * Add low kernel mappings for passing arguments to EFI functions.
+ */
+ void efi_sync_low_kernel_mappings(void)
+ {
+- unsigned num_pgds;
+- pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
++ unsigned num_entries;
++ pgd_t *pgd_k, *pgd_efi;
++ pud_t *pud_k, *pud_efi;
+
+ if (efi_enabled(EFI_OLD_MEMMAP))
+ return;
+
+- num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET);
++ /*
++ * We can share all PGD entries apart from the one entry that
++ * covers the EFI runtime mapping space.
++ *
++ * Make sure the EFI runtime region mappings are guaranteed to
++ * only span a single PGD entry and that the entry also maps
++ * other important kernel regions.
++ */
++ BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
++ BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
++ (EFI_VA_END & PGDIR_MASK));
++
++ pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
++ pgd_k = pgd_offset_k(PAGE_OFFSET);
++
++ num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
++ memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
+
+- memcpy(pgd + pgd_index(PAGE_OFFSET),
+- init_mm.pgd + pgd_index(PAGE_OFFSET),
+- sizeof(pgd_t) * num_pgds);
++ /*
++ * We share all the PUD entries apart from those that map the
++ * EFI regions. Copy around them.
++ */
++ BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
++ BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
++
++ pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
++ pud_efi = pud_offset(pgd_efi, 0);
++
++ pgd_k = pgd_offset_k(EFI_VA_END);
++ pud_k = pud_offset(pgd_k, 0);
++
++ num_entries = pud_index(EFI_VA_END);
++ memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
++
++ pud_efi = pud_offset(pgd_efi, EFI_VA_START);
++ pud_k = pud_offset(pgd_k, EFI_VA_START);
++
++ num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
++ memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
+ }
+
+ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+@@ -148,8 +219,8 @@ int __init efi_setup_page_tables(unsigne
+ if (efi_enabled(EFI_OLD_MEMMAP))
+ return 0;
+
+- efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
+- pgd = __va(efi_scratch.efi_pgt);
++ efi_scratch.efi_pgt = (pgd_t *)__pa(efi_pgd);
++ pgd = efi_pgd;
+
+ /*
+ * It can happen that the physical address of new_memmap lands in memory
+@@ -212,15 +283,13 @@ int __init efi_setup_page_tables(unsigne
+
+ void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+ {
+- pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+-
+- kernel_unmap_pages_in_pgd(pgd, pa_memmap, num_pages);
++ kernel_unmap_pages_in_pgd(efi_pgd, pa_memmap, num_pages);
+ }
+
+ static void __init __map_region(efi_memory_desc_t *md, u64 va)
+ {
+- pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+ unsigned long pf = 0;
++ pgd_t *pgd = efi_pgd;
+
+ if (!(md->attribute & EFI_MEMORY_WB))
+ pf |= _PAGE_PCD;
+@@ -328,9 +397,7 @@ void __init efi_runtime_mkexec(void)
+ void __init efi_dump_pagetable(void)
+ {
+ #ifdef CONFIG_EFI_PGT_DUMP
+- pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+-
+- ptdump_walk_pgd_level(NULL, pgd);
++ ptdump_walk_pgd_level(NULL, efi_pgd);
+ #endif
+ }
+
diff --git a/patches.arch/qcom-0018-x86-efi-Setup-separate-EFI-page-tables-in-kexec-path.patch b/patches.arch/qcom-0018-x86-efi-Setup-separate-EFI-page-tables-in-kexec-path.patch
new file mode 100644
index 0000000000..7057822ffa
--- /dev/null
+++ b/patches.arch/qcom-0018-x86-efi-Setup-separate-EFI-page-tables-in-kexec-path.patch
@@ -0,0 +1,86 @@
+From: Matt Fleming <matt@codeblueprint.co.uk>
+Date: Thu, 21 Jan 2016 14:11:59 +0000
+Subject: x86/efi: Setup separate EFI page tables in kexec paths
+Git-commit: 753b11ef8e92a1c1bbe97f2a5ec14bdd1ef2e6fe
+Patch-mainline: v4.6-rc1
+References: fate#320512
+
+The switch to using a new dedicated page table for EFI runtime
+calls in commit commit 67a9108ed431 ("x86/efi: Build our own
+page table structures") failed to take into account changes
+required for the kexec code paths, which are unfortunately
+duplicated in the EFI code.
+
+Call the allocation and setup functions in
+kexec_enter_virtual_mode() just like we do for
+__efi_enter_virtual_mode() to avoid hitting NULL-pointer
+dereferences when making EFI runtime calls.
+
+At the very least, the call to efi_setup_page_tables() should
+have existed for kexec before the following commit:
+
+ 67a9108ed431 ("x86/efi: Build our own page table structures")
+
+Things just magically worked because we were actually using
+the kernel's page tables that contained the required mappings.
+
+Reported-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Tested-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Dave Young <dyoung@redhat.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1453385519-11477-1-git-send-email-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/platform/efi/efi.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 3c1f3cd..bdd9477 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -815,6 +815,7 @@ static void __init kexec_enter_virtual_mode(void)
+ {
+ #ifdef CONFIG_KEXEC_CORE
+ efi_memory_desc_t *md;
++ unsigned int num_pages;
+ void *p;
+
+ efi.systab = NULL;
+@@ -829,6 +830,12 @@ static void __init kexec_enter_virtual_mode(void)
+ return;
+ }
+
++ if (efi_alloc_page_tables()) {
++ pr_err("Failed to allocate EFI page tables\n");
++ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
++ return;
++ }
++
+ /*
+ * Map efi regions which were passed via setup_data. The virt_addr is a
+ * fixed addr which was used in first kernel of a kexec boot.
+@@ -843,6 +850,14 @@ static void __init kexec_enter_virtual_mode(void)
+
+ BUG_ON(!efi.systab);
+
++ num_pages = ALIGN(memmap.nr_map * memmap.desc_size, PAGE_SIZE);
++ num_pages >>= PAGE_SHIFT;
++
++ if (efi_setup_page_tables(memmap.phys_map, num_pages)) {
++ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
++ return;
++ }
++
+ efi_sync_low_kernel_mappings();
+
+ /*
diff --git a/patches.arch/qcom-0019-x86-rtc-Replace-paravirt-rtc-check-with-platform-leg.patch b/patches.arch/qcom-0019-x86-rtc-Replace-paravirt-rtc-check-with-platform-leg.patch
new file mode 100644
index 0000000000..3672f324ab
--- /dev/null
+++ b/patches.arch/qcom-0019-x86-rtc-Replace-paravirt-rtc-check-with-platform-leg.patch
@@ -0,0 +1,336 @@
+From: "Luis R. Rodriguez" <mcgrof@kernel.org>
+Date: Wed, 13 Apr 2016 17:04:34 -0700
+Subject: x86/rtc: Replace paravirt rtc check with platform legacy quirk
+Git-commit: 8d152e7a5c7537b18b4e9e0eb96f549b016636dc
+Patch-mainline: v4.7-rc1
+References: fate#320512
+
+We have 4 types of x86 platforms that disable RTC:
+
+ * Intel MID
+ * Lguest - uses paravirt
+ * Xen dom-U - uses paravirt
+ * x86 on legacy systems annotated with an ACPI legacy flag
+
+We can consolidate all of these into a platform specific legacy
+quirk set early in boot through i386_start_kernel() and through
+x86_64_start_reservations(). This deals with the RTC quirks which
+we can rely on through the hardware subarch, the ACPI check can
+be dealt with separately.
+
+For Xen things are bit more complex given that the @X86_SUBARCH_XEN
+x86_hardware_subarch is shared on for Xen which uses the PV path for
+both domU and dom0. Since the semantics for differentiating between
+the two are Xen specific we provide a platform helper to help override
+default legacy features -- x86_platform.set_legacy_features(). Use
+of this helper is highly discouraged, its only purpose should be
+to account for the lack of semantics available within your given
+x86_hardware_subarch.
+
+As per 0-day, this bumps the vmlinux size using i386-tinyconfig as
+follows:
+
+TOTAL TEXT init.text x86_early_init_platform_quirks()
++70 +62 +62 +43
+
+Only 8 bytes overhead total, as the main increase in size is
+all removed via __init.
+
+Suggested-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Luis R. Rodriguez <mcgrof@kernel.org>
+Reviewed-by: Juergen Gross <jgross@suse.com>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: andrew.cooper3@citrix.com
+Cc: andriy.shevchenko@linux.intel.com
+Cc: bigeasy@linutronix.de
+Cc: boris.ostrovsky@oracle.com
+Cc: david.vrabel@citrix.com
+Cc: ffainelli@freebox.fr
+Cc: george.dunlap@citrix.com
+Cc: glin@suse.com
+Cc: jlee@suse.com
+Cc: josh@joshtriplett.org
+Cc: julien.grall@linaro.org
+Cc: konrad.wilk@oracle.com
+Cc: kozerkov@parallels.com
+Cc: lenb@kernel.org
+Cc: lguest@lists.ozlabs.org
+Cc: linux-acpi@vger.kernel.org
+Cc: lv.zheng@intel.com
+Cc: matt@codeblueprint.co.uk
+Cc: mbizon@freebox.fr
+Cc: rjw@rjwysocki.net
+Cc: robert.moore@intel.com
+Cc: rusty@rustcorp.com.au
+Cc: tiwai@suse.de
+Cc: toshi.kani@hp.com
+Cc: xen-devel@lists.xensource.com
+Link: http://lkml.kernel.org/r/1460592286-300-5-git-send-email-mcgrof@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/Makefile | 1 +
+ arch/x86/include/asm/paravirt.h | 6 ------
+ arch/x86/include/asm/paravirt_types.h | 5 -----
+ arch/x86/include/asm/processor.h | 1 -
+ arch/x86/include/asm/x86_init.h | 21 +++++++++++++++++++++
+ arch/x86/kernel/Makefile | 6 +++++-
+ arch/x86/kernel/head32.c | 2 ++
+ arch/x86/kernel/head64.c | 1 +
+ arch/x86/kernel/platform-quirks.c | 21 +++++++++++++++++++++
+ arch/x86/kernel/rtc.c | 7 ++-----
+ arch/x86/lguest/boot.c | 1 -
+ arch/x86/xen/enlighten.c | 10 +++++++---
+ 12 files changed, 60 insertions(+), 22 deletions(-)
+ create mode 100644 arch/x86/kernel/platform-quirks.c
+
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -211,6 +211,7 @@ endif
+ head-y := arch/x86/kernel/head_$(BITS).o
+ head-y += arch/x86/kernel/head$(BITS).o
+ head-y += arch/x86/kernel/head.o
++head-y += arch/x86/kernel/platform-quirks.o
+
+ libs-y += arch/x86/lib/
+
+--- a/arch/x86/include/asm/paravirt.h
++++ b/arch/x86/include/asm/paravirt.h
+@@ -19,12 +19,6 @@ static inline int paravirt_enabled(void)
+ return pv_info.paravirt_enabled;
+ }
+
+-static inline int paravirt_has_feature(unsigned int feature)
+-{
+- WARN_ON_ONCE(!pv_info.paravirt_enabled);
+- return (pv_info.features & feature);
+-}
+-
+ static inline void load_sp0(struct tss_struct *tss,
+ struct thread_struct *thread)
+ {
+--- a/arch/x86/include/asm/paravirt_types.h
++++ b/arch/x86/include/asm/paravirt_types.h
+@@ -70,14 +70,9 @@ struct pv_info {
+ #endif
+
+ int paravirt_enabled;
+- unsigned int features; /* valid only if paravirt_enabled is set */
+ const char *name;
+ };
+
+-#define paravirt_has(x) paravirt_has_feature(PV_SUPPORTED_##x)
+-/* Supported features */
+-#define PV_SUPPORTED_RTC (1<<0)
+-
+ struct pv_init_ops {
+ /*
+ * Patch may replace one of the defined code sequences with
+--- a/arch/x86/include/asm/processor.h
++++ b/arch/x86/include/asm/processor.h
+@@ -474,7 +474,6 @@ static inline unsigned long current_top_
+ #else
+ #define __cpuid native_cpuid
+ #define paravirt_enabled() 0
+-#define paravirt_has(x) 0
+
+ static inline void load_sp0(struct tss_struct *tss,
+ struct thread_struct *thread)
+--- a/arch/x86/include/asm/x86_init.h
++++ b/arch/x86/include/asm/x86_init.h
+@@ -144,6 +144,15 @@ struct x86_cpuinit_ops {
+ struct timespec;
+
+ /**
++ * struct x86_legacy_features - legacy x86 features
++ *
++ * @rtc: this device has a CMOS real-time clock present
++ */
++struct x86_legacy_features {
++ int rtc;
++};
++
++/**
+ * struct x86_platform_ops - platform specific runtime functions
+ * @calibrate_tsc: calibrate TSC
+ * @get_wallclock: get time from HW clock like RTC etc.
+@@ -154,6 +163,14 @@ struct timespec;
+ * @save_sched_clock_state: save state for sched_clock() on suspend
+ * @restore_sched_clock_state: restore state for sched_clock() on resume
+ * @apic_post_init: adjust apic if neeeded
++ * @legacy: legacy features
++ * @set_legacy_features: override legacy features. Use of this callback
++ * is highly discouraged. You should only need
++ * this if your hardware platform requires further
++ * custom fine tuning far beyong what may be
++ * possible in x86_early_init_platform_quirks() by
++ * only using the current x86_hardware_subarch
++ * semantics.
+ */
+ struct x86_platform_ops {
+ unsigned long (*calibrate_tsc)(void);
+@@ -167,6 +184,8 @@ struct x86_platform_ops {
+ void (*save_sched_clock_state)(void);
+ void (*restore_sched_clock_state)(void);
+ void (*apic_post_init)(void);
++ struct x86_legacy_features legacy;
++ void (*set_legacy_features)(void);
+ };
+
+ struct pci_dev;
+@@ -188,6 +207,8 @@ extern struct x86_cpuinit_ops x86_cpuini
+ extern struct x86_platform_ops x86_platform;
+ extern struct x86_msi_ops x86_msi;
+ extern struct x86_io_apic_ops x86_io_apic_ops;
++
++extern void x86_early_init_platform_quirks(void);
+ extern void x86_init_noop(void);
+ extern void x86_init_uint_noop(unsigned int unused);
+
+--- a/arch/x86/kernel/Makefile
++++ b/arch/x86/kernel/Makefile
+@@ -2,7 +2,11 @@
+ # Makefile for the linux kernel.
+ #
+
+-extra-y := head_$(BITS).o head$(BITS).o head.o vmlinux.lds
++extra-y := head_$(BITS).o
++extra-y += head$(BITS).o
++extra-y += head.o
++extra-y += platform-quirks.o
++extra-y += vmlinux.lds
+
+ CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
+
+--- a/arch/x86/kernel/head32.c
++++ b/arch/x86/kernel/head32.c
+@@ -34,6 +34,8 @@ asmlinkage __visible void __init i386_st
+ cr4_init_shadow();
+ sanitize_boot_params(&boot_params);
+
++ x86_early_init_platform_quirks();
++
+ /* Call the subarch specific early setup function */
+ switch (boot_params.hdr.hardware_subarch) {
+ case X86_SUBARCH_INTEL_MID:
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -190,6 +190,7 @@ void __init x86_64_start_reservations(ch
+ if (!boot_params.hdr.version)
+ copy_bootdata(__va(real_mode_data));
+
++ x86_early_init_platform_quirks();
+ reserve_ebda_region();
+
+ start_kernel();
+--- /dev/null
++++ b/arch/x86/kernel/platform-quirks.c
+@@ -0,0 +1,21 @@
++#include <linux/kernel.h>
++#include <linux/init.h>
++
++#include <asm/setup.h>
++#include <asm/bios_ebda.h>
++
++void __init x86_early_init_platform_quirks(void)
++{
++ x86_platform.legacy.rtc = 1;
++
++ switch (boot_params.hdr.hardware_subarch) {
++ case X86_SUBARCH_XEN:
++ case X86_SUBARCH_LGUEST:
++ case X86_SUBARCH_INTEL_MID:
++ x86_platform.legacy.rtc = 0;
++ break;
++ }
++
++ if (x86_platform.set_legacy_features)
++ x86_platform.set_legacy_features();
++}
+--- a/arch/x86/kernel/rtc.c
++++ b/arch/x86/kernel/rtc.c
+@@ -14,6 +14,7 @@
+ #include <asm/time.h>
+ #include <asm/intel-mid.h>
+ #include <asm/rtc.h>
++#include <asm/setup.h>
+
+ #ifdef CONFIG_X86_32
+ /*
+@@ -188,10 +189,6 @@ static __init int add_rtc_cmos(void)
+ if (of_have_populated_dt())
+ return 0;
+
+- /* Intel MID platforms don't have ioport rtc */
+- if (intel_mid_identify_cpu())
+- return -ENODEV;
+-
+ #ifdef CONFIG_ACPI
+ if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) {
+ /* This warning can likely go away again in a year or two. */
+@@ -200,7 +197,7 @@ static __init int add_rtc_cmos(void)
+ }
+ #endif
+
+- if (paravirt_enabled() && !paravirt_has(RTC))
++ if (!x86_platform.legacy.rtc)
+ return -ENODEV;
+
+ platform_device_register(&rtc_device);
+--- a/arch/x86/lguest/boot.c
++++ b/arch/x86/lguest/boot.c
+@@ -1414,7 +1414,6 @@ __init void lguest_init(void)
+ pv_info.kernel_rpl = 1;
+ /* Everyone except Xen runs with this set. */
+ pv_info.shared_kernel_pmd = 1;
+- pv_info.features = 0;
+
+ /*
+ * We set up all the lguest overrides for sensitive operations. These
+--- a/arch/x86/xen/enlighten.c
++++ b/arch/x86/xen/enlighten.c
+@@ -1191,7 +1191,6 @@ static const struct pv_info xen_info __i
+ #ifdef CONFIG_X86_64
+ .extra_user_64bit_cs = FLAT_USER_CS64,
+ #endif
+- .features = 0,
+ .name = "Xen",
+ };
+
+@@ -1513,6 +1512,11 @@ static void __init xen_pvh_early_guest_i
+ }
+ #endif /* CONFIG_XEN_PVH */
+
++static void __init xen_dom0_set_legacy_features(void)
++{
++ x86_platform.legacy.rtc = 1;
++}
++
+ /* First C function to be called on Xen boot */
+ asmlinkage __visible void __init xen_start_kernel(void)
+ {
+@@ -1533,8 +1537,6 @@ asmlinkage __visible void __init xen_sta
+
+ /* Install Xen paravirt ops */
+ pv_info = xen_info;
+- if (xen_initial_domain())
+- pv_info.features |= PV_SUPPORTED_RTC;
+ pv_init_ops = xen_init_ops;
+ pv_apic_ops = xen_apic_ops;
+ if (!xen_pvh_domain()) {
+@@ -1687,6 +1689,8 @@ asmlinkage __visible void __init xen_sta
+ .u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS,
+ };
+
++ x86_platform.set_legacy_features =
++ xen_dom0_set_legacy_features;
+ xen_init_vga(info, xen_start_info->console.dom0.info_size);
+ xen_start_info->console.domU.mfn = 0;
+ xen_start_info->console.domU.evtchn = 0;
diff --git a/patches.arch/qcom-0020-x86-init-Use-a-platform-legacy-quirk-for-EBDA.patch b/patches.arch/qcom-0020-x86-init-Use-a-platform-legacy-quirk-for-EBDA.patch
new file mode 100644
index 0000000000..b34ae5d28e
--- /dev/null
+++ b/patches.arch/qcom-0020-x86-init-Use-a-platform-legacy-quirk-for-EBDA.patch
@@ -0,0 +1,115 @@
+From: "Luis R. Rodriguez" <mcgrof@kernel.org>
+Date: Wed, 13 Apr 2016 17:04:36 -0700
+Subject: x86/init: Use a platform legacy quirk for EBDA
+Git-commit: 1330e3bc544a1951d81b7f3c7d4cecf77d906f67
+Patch-mainline: v4.7-rc1
+References: fate#320512
+
+This replaces the paravirt_enabled() check with a
+proper x86 legacy platform quirk.
+
+As per 0-day, this bumps the vmlinux size using i386-tinyconfig as
+follows:
+
+TOTAL TEXT init.text x86_early_init_platform_quirks()
++39 +35 +35 +25
+
+That's a 4 byte total overhead, the rest is all cleared out
+upon init as its all __init text.
+
+v2: document 0-day vmlinux size impact
+
+Signed-off-by: Luis R. Rodriguez <mcgrof@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: andrew.cooper3@citrix.com
+Cc: andriy.shevchenko@linux.intel.com
+Cc: bigeasy@linutronix.de
+Cc: boris.ostrovsky@oracle.com
+Cc: david.vrabel@citrix.com
+Cc: ffainelli@freebox.fr
+Cc: george.dunlap@citrix.com
+Cc: glin@suse.com
+Cc: jgross@suse.com
+Cc: jlee@suse.com
+Cc: josh@joshtriplett.org
+Cc: julien.grall@linaro.org
+Cc: konrad.wilk@oracle.com
+Cc: kozerkov@parallels.com
+Cc: lenb@kernel.org
+Cc: lguest@lists.ozlabs.org
+Cc: linux-acpi@vger.kernel.org
+Cc: lv.zheng@intel.com
+Cc: matt@codeblueprint.co.uk
+Cc: mbizon@freebox.fr
+Cc: rjw@rjwysocki.net
+Cc: robert.moore@intel.com
+Cc: rusty@rustcorp.com.au
+Cc: tiwai@suse.de
+Cc: toshi.kani@hp.com
+Cc: xen-devel@lists.xensource.com
+Link: http://lkml.kernel.org/r/1460592286-300-7-git-send-email-mcgrof@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/include/asm/x86_init.h | 3 +++
+ arch/x86/kernel/head.c | 2 +-
+ arch/x86/kernel/platform-quirks.c | 4 ++++
+ 3 files changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
+index 8bb8c1a..89d9d57 100644
+--- a/arch/x86/include/asm/x86_init.h
++++ b/arch/x86/include/asm/x86_init.h
+@@ -145,9 +145,12 @@ struct timespec;
+ * struct x86_legacy_features - legacy x86 features
+ *
+ * @rtc: this device has a CMOS real-time clock present
++ * @ebda_search: it's safe to search for the EBDA signature in the hardware's
++ * low RAM
+ */
+ struct x86_legacy_features {
+ int rtc;
++ int ebda_search;
+ };
+
+ /**
+diff --git a/arch/x86/kernel/head.c b/arch/x86/kernel/head.c
+index 992f442..afe65df 100644
+--- a/arch/x86/kernel/head.c
++++ b/arch/x86/kernel/head.c
+@@ -38,7 +38,7 @@ void __init reserve_ebda_region(void)
+ * that the paravirt case can handle memory setup
+ * correctly, without our help.
+ */
+- if (paravirt_enabled())
++ if (!x86_platform.legacy.ebda_search)
+ return;
+
+ /* end of low (conventional) memory */
+diff --git a/arch/x86/kernel/platform-quirks.c b/arch/x86/kernel/platform-quirks.c
+index 021a5f9..01b1597 100644
+--- a/arch/x86/kernel/platform-quirks.c
++++ b/arch/x86/kernel/platform-quirks.c
+@@ -7,8 +7,12 @@
+ void __init x86_early_init_platform_quirks(void)
+ {
+ x86_platform.legacy.rtc = 1;
++ x86_platform.legacy.ebda_search = 0;
+
+ switch (boot_params.hdr.hardware_subarch) {
++ case X86_SUBARCH_PC:
++ x86_platform.legacy.ebda_search = 1;
++ break;
+ case X86_SUBARCH_XEN:
+ case X86_SUBARCH_LGUEST:
+ case X86_SUBARCH_INTEL_MID:
+--
+2.10.0
+
diff --git a/patches.arch/qcom-0021-x86-init-Rename-EBDA-code-file.patch b/patches.arch/qcom-0021-x86-init-Rename-EBDA-code-file.patch
new file mode 100644
index 0000000000..f1b9718bad
--- /dev/null
+++ b/patches.arch/qcom-0021-x86-init-Rename-EBDA-code-file.patch
@@ -0,0 +1,228 @@
+From: "Luis R. Rodriguez" <mcgrof@kernel.org>
+Date: Wed, 13 Apr 2016 17:04:43 -0700
+Subject: x86/init: Rename EBDA code file
+Git-commit: f2d85299b7f11f73cc0a294e396cdae114e75787
+Patch-mainline: v4.7-rc1
+References: fate#320512
+
+This makes it clearer what this is.
+
+Signed-off-by: Luis R. Rodriguez <mcgrof@kernel.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: andrew.cooper3@citrix.com
+Cc: andriy.shevchenko@linux.intel.com
+Cc: bigeasy@linutronix.de
+Cc: boris.ostrovsky@oracle.com
+Cc: david.vrabel@citrix.com
+Cc: ffainelli@freebox.fr
+Cc: george.dunlap@citrix.com
+Cc: glin@suse.com
+Cc: jgross@suse.com
+Cc: jlee@suse.com
+Cc: josh@joshtriplett.org
+Cc: julien.grall@linaro.org
+Cc: konrad.wilk@oracle.com
+Cc: kozerkov@parallels.com
+Cc: lenb@kernel.org
+Cc: lguest@lists.ozlabs.org
+Cc: linux-acpi@vger.kernel.org
+Cc: lv.zheng@intel.com
+Cc: matt@codeblueprint.co.uk
+Cc: mbizon@freebox.fr
+Cc: rjw@rjwysocki.net
+Cc: robert.moore@intel.com
+Cc: rusty@rustcorp.com.au
+Cc: tiwai@suse.de
+Cc: toshi.kani@hp.com
+Cc: xen-devel@lists.xensource.com
+Link: http://lkml.kernel.org/r/1460592286-300-14-git-send-email-mcgrof@kernel.org
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/Makefile | 2 +-
+ arch/x86/kernel/Makefile | 2 +-
+ arch/x86/kernel/{head.c => ebda.c} | 0
+ arch/x86/Makefile | 2 -
+ arch/x86/kernel/Makefile | 2 -
+ arch/x86/kernel/ebda.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++
+ arch/x86/kernel/head.c | 70 ----------------------------------------------
+ 4 files changed, 73 insertions(+), 72 deletions(-)
+ rename arch/x86/kernel/{head.c => ebda.c} (100%)
+
+--- a/arch/x86/Makefile
++++ b/arch/x86/Makefile
+@@ -210,7 +210,7 @@ endif
+
+ head-y := arch/x86/kernel/head_$(BITS).o
+ head-y += arch/x86/kernel/head$(BITS).o
+-head-y += arch/x86/kernel/head.o
++head-y += arch/x86/kernel/ebda.o
+ head-y += arch/x86/kernel/platform-quirks.o
+
+ libs-y += arch/x86/lib/
+--- a/arch/x86/kernel/Makefile
++++ b/arch/x86/kernel/Makefile
+@@ -4,7 +4,7 @@
+
+ extra-y := head_$(BITS).o
+ extra-y += head$(BITS).o
+-extra-y += head.o
++extra-y += ebda.o
+ extra-y += platform-quirks.o
+ extra-y += vmlinux.lds
+
+--- /dev/null
++++ b/arch/x86/kernel/ebda.c
+@@ -0,0 +1,71 @@
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/memblock.h>
++
++#include <asm/setup.h>
++#include <asm/bios_ebda.h>
++
++/*
++ * The BIOS places the EBDA/XBDA at the top of conventional
++ * memory, and usually decreases the reported amount of
++ * conventional memory (int 0x12) too. This also contains a
++ * workaround for Dell systems that neglect to reserve EBDA.
++ * The same workaround also avoids a problem with the AMD768MPX
++ * chipset: reserve a page before VGA to prevent PCI prefetch
++ * into it (errata #56). Usually the page is reserved anyways,
++ * unless you have no PS/2 mouse plugged in.
++ *
++ * This functions is deliberately very conservative. Losing
++ * memory in the bottom megabyte is rarely a problem, as long
++ * as we have enough memory to install the trampoline. Using
++ * memory that is in use by the BIOS or by some DMA device
++ * the BIOS didn't shut down *is* a big problem.
++ */
++
++#define BIOS_LOWMEM_KILOBYTES 0x413
++#define LOWMEM_CAP 0x9f000U /* Absolute maximum */
++#define INSANE_CUTOFF 0x20000U /* Less than this = insane */
++
++void __init reserve_ebda_region(void)
++{
++ unsigned int lowmem, ebda_addr;
++
++ /*
++ * To determine the position of the EBDA and the
++ * end of conventional memory, we need to look at
++ * the BIOS data area. In a paravirtual environment
++ * that area is absent. We'll just have to assume
++ * that the paravirt case can handle memory setup
++ * correctly, without our help.
++ */
++ if (!x86_platform.legacy.ebda_search)
++ return;
++
++ /* end of low (conventional) memory */
++ lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
++ lowmem <<= 10;
++
++ /* start of EBDA area */
++ ebda_addr = get_bios_ebda();
++
++ /*
++ * Note: some old Dells seem to need 4k EBDA without
++ * reporting so, so just consider the memory above 0x9f000
++ * to be off limits (bugzilla 2990).
++ */
++
++ /* If the EBDA address is below 128K, assume it is bogus */
++ if (ebda_addr < INSANE_CUTOFF)
++ ebda_addr = LOWMEM_CAP;
++
++ /* If lowmem is less than 128K, assume it is bogus */
++ if (lowmem < INSANE_CUTOFF)
++ lowmem = LOWMEM_CAP;
++
++ /* Use the lower of the lowmem and EBDA markers as the cutoff */
++ lowmem = min(lowmem, ebda_addr);
++ lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */
++
++ /* reserve all memory between lowmem and the 1MB mark */
++ memblock_reserve(lowmem, 0x100000 - lowmem);
++}
+--- a/arch/x86/kernel/head.c
++++ b/arch/x86/kernel/head.c
+@@ -1,71 +1 @@
+-#include <linux/kernel.h>
+-#include <linux/init.h>
+-#include <linux/memblock.h>
+
+-#include <asm/setup.h>
+-#include <asm/bios_ebda.h>
+-
+-/*
+- * The BIOS places the EBDA/XBDA at the top of conventional
+- * memory, and usually decreases the reported amount of
+- * conventional memory (int 0x12) too. This also contains a
+- * workaround for Dell systems that neglect to reserve EBDA.
+- * The same workaround also avoids a problem with the AMD768MPX
+- * chipset: reserve a page before VGA to prevent PCI prefetch
+- * into it (errata #56). Usually the page is reserved anyways,
+- * unless you have no PS/2 mouse plugged in.
+- *
+- * This functions is deliberately very conservative. Losing
+- * memory in the bottom megabyte is rarely a problem, as long
+- * as we have enough memory to install the trampoline. Using
+- * memory that is in use by the BIOS or by some DMA device
+- * the BIOS didn't shut down *is* a big problem.
+- */
+-
+-#define BIOS_LOWMEM_KILOBYTES 0x413
+-#define LOWMEM_CAP 0x9f000U /* Absolute maximum */
+-#define INSANE_CUTOFF 0x20000U /* Less than this = insane */
+-
+-void __init reserve_ebda_region(void)
+-{
+- unsigned int lowmem, ebda_addr;
+-
+- /*
+- * To determine the position of the EBDA and the
+- * end of conventional memory, we need to look at
+- * the BIOS data area. In a paravirtual environment
+- * that area is absent. We'll just have to assume
+- * that the paravirt case can handle memory setup
+- * correctly, without our help.
+- */
+- if (!x86_platform.legacy.ebda_search)
+- return;
+-
+- /* end of low (conventional) memory */
+- lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
+- lowmem <<= 10;
+-
+- /* start of EBDA area */
+- ebda_addr = get_bios_ebda();
+-
+- /*
+- * Note: some old Dells seem to need 4k EBDA without
+- * reporting so, so just consider the memory above 0x9f000
+- * to be off limits (bugzilla 2990).
+- */
+-
+- /* If the EBDA address is below 128K, assume it is bogus */
+- if (ebda_addr < INSANE_CUTOFF)
+- ebda_addr = LOWMEM_CAP;
+-
+- /* If lowmem is less than 128K, assume it is bogus */
+- if (lowmem < INSANE_CUTOFF)
+- lowmem = LOWMEM_CAP;
+-
+- /* Use the lower of the lowmem and EBDA markers as the cutoff */
+- lowmem = min(lowmem, ebda_addr);
+- lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */
+-
+- /* reserve all memory between lowmem and the 1MB mark */
+- memblock_reserve(lowmem, 0x100000 - lowmem);
+-}
diff --git a/patches.arch/qcom-0022-efi-Iterate-over-efi.memmap-in-for_each_efi_memory_d.patch b/patches.arch/qcom-0022-efi-Iterate-over-efi.memmap-in-for_each_efi_memory_d.patch
new file mode 100644
index 0000000000..03c6bf1c23
--- /dev/null
+++ b/patches.arch/qcom-0022-efi-Iterate-over-efi.memmap-in-for_each_efi_memory_d.patch
@@ -0,0 +1,332 @@
+From: Matt Fleming <matt@codeblueprint.co.uk>
+Date: Mon, 25 Apr 2016 21:06:38 +0100
+Subject: efi: Iterate over efi.memmap in for_each_efi_memory_desc()
+Git-commit: 78ce248faa3c46e24e9bd42db3ab3650659f16dd (partial)
+Patch-mainline: v4.7-rc1
+References: fate#320512
+
+Most of the users of for_each_efi_memory_desc() are equally happy
+iterating over the EFI memory map in efi.memmap instead of 'memmap',
+since the former is usually a pointer to the latter.
+
+For those users that want to specify an EFI memory map other than
+efi.memmap, that can be done using for_each_efi_memory_desc_in_map().
+One such example is in the libstub code where the firmware is queried
+directly for the memory map, it gets iterated over, and then freed.
+
+This change goes part of the way toward deleting the global 'memmap'
+variable, which is not universally available on all architectures
+(notably IA64) and is rather poorly named.
+
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Mark Salter <msalter@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1461614832-17633-7-git-send-email-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+[mb: drop hunk efi_runtime_update_mappings]
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/platform/efi/efi.c | 43 +++++++------------------
+ arch/x86/platform/efi/efi_64.c | 6 +--
+ arch/x86/platform/efi/quirks.c | 10 ++---
+ drivers/firmware/efi/arm-init.c | 4 +-
+ drivers/firmware/efi/arm-runtime.c | 2 -
+ drivers/firmware/efi/efi.c | 6 ---
+ drivers/firmware/efi/fake_mem.c | 3 -
+ drivers/firmware/efi/libstub/efi-stub-helper.c | 6 ++-
+ include/linux/efi.h | 11 +++++-
+ 9 files changed, 38 insertions(+), 53 deletions(-)
+
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -119,11 +119,10 @@ void efi_get_time(struct timespec *now)
+
+ void __init efi_find_mirror(void)
+ {
+- void *p;
++ efi_memory_desc_t *md;
+ u64 mirror_size = 0, total_size = 0;
+
+- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+- efi_memory_desc_t *md = p;
++ for_each_efi_memory_desc(md) {
+ unsigned long long start = md->phys_addr;
+ unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+
+@@ -146,10 +145,9 @@ void __init efi_find_mirror(void)
+
+ static void __init do_add_efi_memmap(void)
+ {
+- void *p;
++ efi_memory_desc_t *md;
+
+- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+- efi_memory_desc_t *md = p;
++ for_each_efi_memory_desc(md) {
+ unsigned long long start = md->phys_addr;
+ unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+ int e820_type;
+@@ -226,17 +224,13 @@ void __init efi_print_memmap(void)
+ {
+ #ifdef EFI_DEBUG
+ efi_memory_desc_t *md;
+- void *p;
+- int i;
++ int i = 0;
+
+- for (p = memmap.map, i = 0;
+- p < memmap.map_end;
+- p += memmap.desc_size, i++) {
++ for_each_efi_memory_desc(md) {
+ char buf[64];
+
+- md = p;
+ pr_info("mem%02u: %s range=[0x%016llx-0x%016llx) (%lluMB)\n",
+- i, efi_md_typeattr_format(buf, sizeof(buf), md),
++ i++, efi_md_typeattr_format(buf, sizeof(buf), md),
+ md->phys_addr,
+ md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
+ (md->num_pages >> (20 - EFI_PAGE_SHIFT)));
+@@ -552,12 +546,9 @@ void __init efi_set_executable(efi_memor
+ void __init runtime_code_page_mkexec(void)
+ {
+ efi_memory_desc_t *md;
+- void *p;
+
+ /* Make EFI runtime service code area executable */
+- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+- md = p;
+-
++ for_each_efi_memory_desc(md) {
+ if (md->type != EFI_RUNTIME_SERVICES_CODE)
+ continue;
+
+@@ -604,12 +595,10 @@ void __init old_map_region(efi_memory_de
+ /* Merge contiguous regions of the same type and attribute */
+ static void __init efi_merge_regions(void)
+ {
+- void *p;
+ efi_memory_desc_t *md, *prev_md = NULL;
+
+- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
++ for_each_efi_memory_desc(md) {
+ u64 prev_size;
+- md = p;
+
+ if (!prev_md) {
+ prev_md = md;
+@@ -652,15 +641,13 @@ static void __init save_runtime_map(void
+ {
+ #ifdef CONFIG_KEXEC_CORE
+ efi_memory_desc_t *md;
+- void *tmp, *p, *q = NULL;
++ void *tmp, *q = NULL;
+ int count = 0;
+
+ if (efi_enabled(EFI_OLD_MEMMAP))
+ return;
+
+- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+- md = p;
+-
++ for_each_efi_memory_desc(md) {
+ if (!(md->attribute & EFI_MEMORY_RUNTIME) ||
+ (md->type == EFI_BOOT_SERVICES_CODE) ||
+ (md->type == EFI_BOOT_SERVICES_DATA))
+@@ -816,7 +803,6 @@ static void __init kexec_enter_virtual_m
+ #ifdef CONFIG_KEXEC_CORE
+ efi_memory_desc_t *md;
+ unsigned int num_pages;
+- void *p;
+
+ efi.systab = NULL;
+
+@@ -840,8 +826,7 @@ static void __init kexec_enter_virtual_m
+ * Map efi regions which were passed via setup_data. The virt_addr is a
+ * fixed addr which was used in first kernel of a kexec boot.
+ */
+- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+- md = p;
++ for_each_efi_memory_desc(md) {
+ efi_map_region_fixed(md); /* FIXME: add error handling */
+ get_systab_virt_addr(md);
+ }
+@@ -1006,13 +991,11 @@ void __init efi_enter_virtual_mode(void)
+ u32 efi_mem_type(unsigned long phys_addr)
+ {
+ efi_memory_desc_t *md;
+- void *p;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return 0;
+
+- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+- md = p;
++ for_each_efi_memory_desc(md) {
+ if ((md->phys_addr <= phys_addr) &&
+ (phys_addr < (md->phys_addr +
+ (md->num_pages << EFI_PAGE_SHIFT))))
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -53,14 +53,12 @@ struct efi_scratch efi_scratch;
+ static void __init early_code_mapping_set_exec(int executable)
+ {
+ efi_memory_desc_t *md;
+- void *p;
+
+ if (!(__supported_pte_mask & _PAGE_NX))
+ return;
+
+ /* Make EFI service code area executable */
+- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+- md = p;
++ for_each_efi_memory_desc(md) {
+ if (md->type == EFI_RUNTIME_SERVICES_CODE ||
+ md->type == EFI_BOOT_SERVICES_CODE)
+ efi_set_executable(md, executable);
+@@ -250,7 +248,7 @@ int __init efi_setup_page_tables(unsigne
+ * Map all of RAM so that we can access arguments in the 1:1
+ * mapping when making EFI runtime calls.
+ */
+- for_each_efi_memory_desc(&memmap, md) {
++ for_each_efi_memory_desc(md) {
+ if (md->type != EFI_CONVENTIONAL_MEMORY &&
+ md->type != EFI_LOADER_DATA &&
+ md->type != EFI_LOADER_CODE)
+--- a/arch/x86/platform/efi/quirks.c
++++ b/arch/x86/platform/efi/quirks.c
+@@ -140,10 +140,9 @@ EXPORT_SYMBOL_GPL(efi_query_variable_sto
+ */
+ void __init efi_reserve_boot_services(void)
+ {
+- void *p;
++ efi_memory_desc_t *md;
+
+- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+- efi_memory_desc_t *md = p;
++ for_each_efi_memory_desc(md) {
+ u64 start = md->phys_addr;
+ u64 size = md->num_pages << EFI_PAGE_SHIFT;
+
+@@ -171,10 +170,9 @@ void __init efi_reserve_boot_services(vo
+
+ void __init efi_free_boot_services(void)
+ {
+- void *p;
++ efi_memory_desc_t *md;
+
+- for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+- efi_memory_desc_t *md = p;
++ for_each_efi_memory_desc(md) {
+ unsigned long long start = md->phys_addr;
+ unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+
+--- a/drivers/firmware/efi/arm-init.c
++++ b/drivers/firmware/efi/arm-init.c
+@@ -100,7 +100,7 @@ static phys_addr_t efi_to_phys(unsigned
+ {
+ efi_memory_desc_t *md;
+
+- for_each_efi_memory_desc(&memmap, md) {
++ for_each_efi_memory_desc_in_map(&memmap, md) {
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (md->virt_addr == 0)
+@@ -213,7 +213,7 @@ static __init void reserve_regions(void)
+ memblock_dump_all();
+ memblock_remove(0, (phys_addr_t)ULLONG_MAX);
+
+- for_each_efi_memory_desc(&memmap, md) {
++ for_each_efi_memory_desc_in_map(&memmap, md) {
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+
+--- a/drivers/firmware/efi/arm-runtime.c
++++ b/drivers/firmware/efi/arm-runtime.c
+@@ -48,7 +48,7 @@ static bool __init efi_virtmap_init(void
+ init_new_context(NULL, &efi_mm);
+
+ systab_found = false;
+- for_each_efi_memory_desc(&memmap, md) {
++ for_each_efi_memory_desc(md) {
+ phys_addr_t phys = md->phys_addr;
+ int ret;
+
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -532,16 +532,12 @@ char * __init efi_md_typeattr_format(cha
+ */
+ u64 __weak efi_mem_attributes(unsigned long phys_addr)
+ {
+- struct efi_memory_map *map;
+ efi_memory_desc_t *md;
+- void *p;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return 0;
+
+- map = efi.memmap;
+- for (p = map->map; p < map->map_end; p += map->desc_size) {
+- md = p;
++ for_each_efi_memory_desc(md) {
+ if ((md->phys_addr <= phys_addr) &&
+ (phys_addr < (md->phys_addr +
+ (md->num_pages << EFI_PAGE_SHIFT))))
+--- a/drivers/firmware/efi/fake_mem.c
++++ b/drivers/firmware/efi/fake_mem.c
+@@ -68,8 +68,7 @@ void __init efi_fake_memmap(void)
+ return;
+
+ /* count up the number of EFI memory descriptor */
+- for (old = memmap.map; old < memmap.map_end; old += memmap.desc_size) {
+- md = old;
++ for_each_efi_memory_desc(md) {
+ start = md->phys_addr;
+ end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+
+--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
++++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
+@@ -125,10 +125,12 @@ unsigned long get_dram_base(efi_system_t
+
+ map.map_end = map.map + map_size;
+
+- for_each_efi_memory_desc(&map, md)
+- if (md->attribute & EFI_MEMORY_WB)
++ for_each_efi_memory_desc_in_map(&map, md) {
++ if (md->attribute & EFI_MEMORY_WB) {
+ if (membase > md->phys_addr)
+ membase = md->phys_addr;
++ }
++ }
+
+ efi_call_early(free_pool, map.map);
+
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -1045,11 +1045,20 @@ static inline void efi_fake_memmap(void)
+ #endif
+
+ /* Iterate through an efi_memory_map */
+-#define for_each_efi_memory_desc(m, md) \
++#define for_each_efi_memory_desc_in_map(m, md) \
+ for ((md) = (m)->map; \
+ (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \
+ (md) = (void *)(md) + (m)->desc_size)
+
++/**
++ * for_each_efi_memory_desc - iterate over descriptors in efi.memmap
++ * @md: the efi_memory_desc_t * iterator
++ *
++ * Once the loop finishes @md must not be accessed.
++ */
++#define for_each_efi_memory_desc(md) \
++ for_each_efi_memory_desc_in_map(efi.memmap, md)
++
+ /*
+ * Format an EFI memory descriptor's type and attributes to a user-provided
+ * character buffer, as per snprintf(), and return the buffer.
diff --git a/patches.arch/qcom-0023-efi-Remove-global-memmap-EFI-memory-map.patch b/patches.arch/qcom-0023-efi-Remove-global-memmap-EFI-memory-map.patch
new file mode 100644
index 0000000000..2425319971
--- /dev/null
+++ b/patches.arch/qcom-0023-efi-Remove-global-memmap-EFI-memory-map.patch
@@ -0,0 +1,474 @@
+From: Matt Fleming <matt@codeblueprint.co.uk>
+Date: Mon, 25 Apr 2016 21:06:39 +0100
+Subject: efi: Remove global 'memmap' EFI memory map
+Git-commit: 884f4f66ffd6ffe632f3a8be4e6d10a858afdc37
+Patch-mainline: v4.7-rc1
+References: fate#320512
+
+Abolish the poorly named EFI memory map, 'memmap'. It is shadowed by a
+bunch of local definitions in various files and having two ways to
+access the EFI memory map ('efi.memmap' vs. 'memmap') is rather
+confusing.
+
+Furthermore, IA64 doesn't even provide this global object, which has
+caused issues when trying to write generic EFI memmap code.
+
+Replace all occurrences with efi.memmap, and convert the remaining
+iterator code to use for_each_efi_mem_desc().
+
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Luck, Tony <tony.luck@intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1461614832-17633-8-git-send-email-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/platform/efi/efi.c | 84 ++++++++++++++++++++-----------------
+ drivers/firmware/efi/arm-init.c | 20 +++-----
+ drivers/firmware/efi/arm-runtime.c | 12 ++---
+ drivers/firmware/efi/efi.c | 2
+ drivers/firmware/efi/fake_mem.c | 40 ++++++++---------
+ include/linux/efi.h | 5 --
+ 6 files changed, 85 insertions(+), 78 deletions(-)
+
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -56,8 +56,6 @@
+
+ #define EFI_DEBUG
+
+-struct efi_memory_map memmap;
+-
+ static struct efi efi_phys __initdata;
+ static efi_system_table_t efi_systab __initdata;
+
+@@ -207,15 +205,13 @@ int __init efi_memblock_x86_reserve_rang
+ #else
+ pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
+ #endif
+- memmap.phys_map = pmap;
+- memmap.nr_map = e->efi_memmap_size /
++ efi.memmap.phys_map = pmap;
++ efi.memmap.nr_map = e->efi_memmap_size /
+ e->efi_memdesc_size;
+- memmap.desc_size = e->efi_memdesc_size;
+- memmap.desc_version = e->efi_memdesc_version;
+-
+- memblock_reserve(pmap, memmap.nr_map * memmap.desc_size);
++ efi.memmap.desc_size = e->efi_memdesc_size;
++ efi.memmap.desc_version = e->efi_memdesc_version;
+
+- efi.memmap = &memmap;
++ memblock_reserve(pmap, efi.memmap.nr_map * efi.memmap.desc_size);
+
+ return 0;
+ }
+@@ -240,10 +236,14 @@ void __init efi_print_memmap(void)
+
+ void __init efi_unmap_memmap(void)
+ {
++ unsigned long size;
++
+ clear_bit(EFI_MEMMAP, &efi.flags);
+- if (memmap.map) {
+- early_memunmap(memmap.map, memmap.nr_map * memmap.desc_size);
+- memmap.map = NULL;
++
++ size = efi.memmap.nr_map * efi.memmap.desc_size;
++ if (efi.memmap.map) {
++ early_memunmap(efi.memmap.map, size);
++ efi.memmap.map = NULL;
+ }
+ }
+
+@@ -434,17 +434,22 @@ static int __init efi_runtime_init(void)
+
+ static int __init efi_memmap_init(void)
+ {
++ unsigned long addr, size;
++
+ if (efi_enabled(EFI_PARAVIRT))
+ return 0;
+
+ /* Map the EFI memory map */
+- memmap.map = early_memremap((unsigned long)memmap.phys_map,
+- memmap.nr_map * memmap.desc_size);
+- if (memmap.map == NULL) {
++ size = efi.memmap.nr_map * efi.memmap.desc_size;
++ addr = (unsigned long)efi.memmap.phys_map;
++
++ efi.memmap.map = early_memremap(addr, size);
++ if (efi.memmap.map == NULL) {
+ pr_err("Could not map the memory map!\n");
+ return -ENOMEM;
+ }
+- memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size);
++
++ efi.memmap.map_end = efi.memmap.map + size;
+
+ if (add_efi_memmap)
+ do_add_efi_memmap();
+@@ -640,6 +645,7 @@ static void __init get_systab_virt_addr(
+ static void __init save_runtime_map(void)
+ {
+ #ifdef CONFIG_KEXEC_CORE
++ unsigned long desc_size;
+ efi_memory_desc_t *md;
+ void *tmp, *q = NULL;
+ int count = 0;
+@@ -647,21 +653,23 @@ static void __init save_runtime_map(void
+ if (efi_enabled(EFI_OLD_MEMMAP))
+ return;
+
++ desc_size = efi.memmap.desc_size;
++
+ for_each_efi_memory_desc(md) {
+ if (!(md->attribute & EFI_MEMORY_RUNTIME) ||
+ (md->type == EFI_BOOT_SERVICES_CODE) ||
+ (md->type == EFI_BOOT_SERVICES_DATA))
+ continue;
+- tmp = krealloc(q, (count + 1) * memmap.desc_size, GFP_KERNEL);
++ tmp = krealloc(q, (count + 1) * desc_size, GFP_KERNEL);
+ if (!tmp)
+ goto out;
+ q = tmp;
+
+- memcpy(q + count * memmap.desc_size, md, memmap.desc_size);
++ memcpy(q + count * desc_size, md, desc_size);
+ count++;
+ }
+
+- efi_runtime_map_setup(q, count, memmap.desc_size);
++ efi_runtime_map_setup(q, count, desc_size);
+ return;
+
+ out:
+@@ -701,10 +709,10 @@ static inline void *efi_map_next_entry_r
+ {
+ /* Initial call */
+ if (!entry)
+- return memmap.map_end - memmap.desc_size;
++ return efi.memmap.map_end - efi.memmap.desc_size;
+
+- entry -= memmap.desc_size;
+- if (entry < memmap.map)
++ entry -= efi.memmap.desc_size;
++ if (entry < efi.memmap.map)
+ return NULL;
+
+ return entry;
+@@ -746,10 +754,10 @@ static void *efi_map_next_entry(void *en
+
+ /* Initial call */
+ if (!entry)
+- return memmap.map;
++ return efi.memmap.map;
+
+- entry += memmap.desc_size;
+- if (entry >= memmap.map_end)
++ entry += efi.memmap.desc_size;
++ if (entry >= efi.memmap.map_end)
+ return NULL;
+
+ return entry;
+@@ -763,8 +771,11 @@ static void * __init efi_map_regions(int
+ {
+ void *p, *new_memmap = NULL;
+ unsigned long left = 0;
++ unsigned long desc_size;
+ efi_memory_desc_t *md;
+
++ desc_size = efi.memmap.desc_size;
++
+ p = NULL;
+ while ((p = efi_map_next_entry(p))) {
+ md = p;
+@@ -779,7 +790,7 @@ static void * __init efi_map_regions(int
+ efi_map_region(md);
+ get_systab_virt_addr(md);
+
+- if (left < memmap.desc_size) {
++ if (left < desc_size) {
+ new_memmap = realloc_pages(new_memmap, *pg_shift);
+ if (!new_memmap)
+ return NULL;
+@@ -788,10 +799,9 @@ static void * __init efi_map_regions(int
+ (*pg_shift)++;
+ }
+
+- memcpy(new_memmap + (*count * memmap.desc_size), md,
+- memmap.desc_size);
++ memcpy(new_memmap + (*count * desc_size), md, desc_size);
+
+- left -= memmap.desc_size;
++ left -= desc_size;
+ (*count)++;
+ }
+
+@@ -835,10 +845,10 @@ static void __init kexec_enter_virtual_m
+
+ BUG_ON(!efi.systab);
+
+- num_pages = ALIGN(memmap.nr_map * memmap.desc_size, PAGE_SIZE);
++ num_pages = ALIGN(efi.memmap.nr_map * efi.memmap.desc_size, PAGE_SIZE);
+ num_pages >>= PAGE_SHIFT;
+
+- if (efi_setup_page_tables(memmap.phys_map, num_pages)) {
++ if (efi_setup_page_tables(efi.memmap.phys_map, num_pages)) {
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return;
+ }
+@@ -923,16 +933,16 @@ static void __init __efi_enter_virtual_m
+
+ if (efi_is_native()) {
+ status = phys_efi_set_virtual_address_map(
+- memmap.desc_size * count,
+- memmap.desc_size,
+- memmap.desc_version,
++ efi.memmap.desc_size * count,
++ efi.memmap.desc_size,
++ efi.memmap.desc_version,
+ (efi_memory_desc_t *)__pa(new_memmap));
+ } else {
+ status = efi_thunk_set_virtual_address_map(
+ efi_phys.set_virtual_address_map,
+- memmap.desc_size * count,
+- memmap.desc_size,
+- memmap.desc_version,
++ efi.memmap.desc_size * count,
++ efi.memmap.desc_size,
++ efi.memmap.desc_version,
+ (efi_memory_desc_t *)__pa(new_memmap));
+ }
+
+--- a/drivers/firmware/efi/arm-init.c
++++ b/drivers/firmware/efi/arm-init.c
+@@ -21,8 +21,6 @@
+
+ #include <asm/efi.h>
+
+-struct efi_memory_map memmap;
+-
+ u64 efi_system_table;
+
+ static int __init is_normal_ram(efi_memory_desc_t *md)
+@@ -41,7 +39,7 @@ static phys_addr_t efi_to_phys(unsigned
+ {
+ efi_memory_desc_t *md;
+
+- for_each_efi_memory_desc_in_map(&memmap, md) {
++ for_each_efi_memory_desc(md) {
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+ if (md->virt_addr == 0)
+@@ -154,7 +152,7 @@ static __init void reserve_regions(void)
+ memblock_dump_all();
+ memblock_remove(0, (phys_addr_t)ULLONG_MAX);
+
+- for_each_efi_memory_desc_in_map(&memmap, md) {
++ for_each_efi_memory_desc(md) {
+ paddr = md->phys_addr;
+ npages = md->num_pages;
+
+@@ -195,9 +193,9 @@ void __init efi_init(void)
+
+ efi_system_table = params.system_table;
+
+- memmap.phys_map = params.mmap;
+- memmap.map = early_memremap_ro(params.mmap, params.mmap_size);
+- if (memmap.map == NULL) {
++ efi.memmap.phys_map = params.mmap;
++ efi.memmap.map = early_memremap_ro(params.mmap, params.mmap_size);
++ if (efi.memmap.map == NULL) {
+ /*
+ * If we are booting via UEFI, the UEFI memory map is the only
+ * description of memory we have, so there is little point in
+@@ -205,15 +203,15 @@ void __init efi_init(void)
+ */
+ panic("Unable to map EFI memory map.\n");
+ }
+- memmap.map_end = memmap.map + params.mmap_size;
+- memmap.desc_size = params.desc_size;
+- memmap.desc_version = params.desc_ver;
++ efi.memmap.map_end = efi.memmap.map + params.mmap_size;
++ efi.memmap.desc_size = params.desc_size;
++ efi.memmap.desc_version = params.desc_ver;
+
+ if (uefi_init() < 0)
+ return;
+
+ reserve_regions();
+- early_memunmap(memmap.map, params.mmap_size);
++ early_memunmap(efi.memmap.map, params.mmap_size);
+ memblock_reserve(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size +
+ (params.mmap & ~PAGE_MASK)));
+--- a/drivers/firmware/efi/arm-runtime.c
++++ b/drivers/firmware/efi/arm-runtime.c
+@@ -103,15 +103,15 @@ static int __init arm_enable_runtime_ser
+
+ pr_info("Remapping and enabling EFI services.\n");
+
+- mapsize = memmap.map_end - memmap.map;
+- memmap.map = (__force void *)ioremap_cache(memmap.phys_map,
+- mapsize);
+- if (!memmap.map) {
++ mapsize = efi.memmap.map_end - efi.memmap.map;
++
++ efi.memmap.map = (__force void *)ioremap_cache(efi.memmap.phys_map,
++ mapsize);
++ if (!efi.memmap.map) {
+ pr_err("Failed to remap EFI memory map\n");
+ return -ENOMEM;
+ }
+- memmap.map_end = memmap.map + mapsize;
+- efi.memmap = &memmap;
++ efi.memmap.map_end = efi.memmap.map + mapsize;
+
+ set_bit(EFI_SYSTEM_TABLES, &efi.flags);
+
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -256,7 +256,7 @@ subsys_initcall(efisubsys_init);
+ */
+ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+ {
+- struct efi_memory_map *map = efi.memmap;
++ struct efi_memory_map *map = &efi.memmap;
+ phys_addr_t p, e;
+
+ if (!efi_enabled(EFI_MEMMAP)) {
+--- a/drivers/firmware/efi/fake_mem.c
++++ b/drivers/firmware/efi/fake_mem.c
+@@ -57,7 +57,7 @@ static int __init cmp_fake_mem(const voi
+ void __init efi_fake_memmap(void)
+ {
+ u64 start, end, m_start, m_end, m_attr;
+- int new_nr_map = memmap.nr_map;
++ int new_nr_map = efi.memmap.nr_map;
+ efi_memory_desc_t *md;
+ phys_addr_t new_memmap_phy;
+ void *new_memmap;
+@@ -94,25 +94,25 @@ void __init efi_fake_memmap(void)
+ }
+
+ /* allocate memory for new EFI memmap */
+- new_memmap_phy = memblock_alloc(memmap.desc_size * new_nr_map,
++ new_memmap_phy = memblock_alloc(efi.memmap.desc_size * new_nr_map,
+ PAGE_SIZE);
+ if (!new_memmap_phy)
+ return;
+
+ /* create new EFI memmap */
+ new_memmap = early_memremap(new_memmap_phy,
+- memmap.desc_size * new_nr_map);
++ efi.memmap.desc_size * new_nr_map);
+ if (!new_memmap) {
+- memblock_free(new_memmap_phy, memmap.desc_size * new_nr_map);
++ memblock_free(new_memmap_phy, efi.memmap.desc_size * new_nr_map);
+ return;
+ }
+
+- for (old = memmap.map, new = new_memmap;
+- old < memmap.map_end;
+- old += memmap.desc_size, new += memmap.desc_size) {
++ for (old = efi.memmap.map, new = new_memmap;
++ old < efi.memmap.map_end;
++ old += efi.memmap.desc_size, new += efi.memmap.desc_size) {
+
+ /* copy original EFI memory descriptor */
+- memcpy(new, old, memmap.desc_size);
++ memcpy(new, old, efi.memmap.desc_size);
+ md = new;
+ start = md->phys_addr;
+ end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+@@ -133,8 +133,8 @@ void __init efi_fake_memmap(void)
+ md->num_pages = (m_end - md->phys_addr + 1) >>
+ EFI_PAGE_SHIFT;
+ /* latter part */
+- new += memmap.desc_size;
+- memcpy(new, old, memmap.desc_size);
++ new += efi.memmap.desc_size;
++ memcpy(new, old, efi.memmap.desc_size);
+ md = new;
+ md->phys_addr = m_end + 1;
+ md->num_pages = (end - md->phys_addr + 1) >>
+@@ -146,16 +146,16 @@ void __init efi_fake_memmap(void)
+ md->num_pages = (m_start - md->phys_addr) >>
+ EFI_PAGE_SHIFT;
+ /* middle part */
+- new += memmap.desc_size;
+- memcpy(new, old, memmap.desc_size);
++ new += efi.memmap.desc_size;
++ memcpy(new, old, efi.memmap.desc_size);
+ md = new;
+ md->attribute |= m_attr;
+ md->phys_addr = m_start;
+ md->num_pages = (m_end - m_start + 1) >>
+ EFI_PAGE_SHIFT;
+ /* last part */
+- new += memmap.desc_size;
+- memcpy(new, old, memmap.desc_size);
++ new += efi.memmap.desc_size;
++ memcpy(new, old, efi.memmap.desc_size);
+ md = new;
+ md->phys_addr = m_end + 1;
+ md->num_pages = (end - m_end) >>
+@@ -168,8 +168,8 @@ void __init efi_fake_memmap(void)
+ md->num_pages = (m_start - md->phys_addr) >>
+ EFI_PAGE_SHIFT;
+ /* latter part */
+- new += memmap.desc_size;
+- memcpy(new, old, memmap.desc_size);
++ new += efi.memmap.desc_size;
++ memcpy(new, old, efi.memmap.desc_size);
+ md = new;
+ md->phys_addr = m_start;
+ md->num_pages = (end - md->phys_addr + 1) >>
+@@ -181,10 +181,10 @@ void __init efi_fake_memmap(void)
+
+ /* swap into new EFI memmap */
+ efi_unmap_memmap();
+- memmap.map = new_memmap;
+- memmap.phys_map = new_memmap_phy;
+- memmap.nr_map = new_nr_map;
+- memmap.map_end = memmap.map + memmap.nr_map * memmap.desc_size;
++ efi.memmap.map = new_memmap;
++ efi.memmap.phys_map = new_memmap_phy;
++ efi.memmap.nr_map = new_nr_map;
++ efi.memmap.map_end = efi.memmap.map + efi.memmap.nr_map * efi.memmap.desc_size;
+ set_bit(EFI_MEMMAP, &efi.flags);
+
+ /* print new EFI memmap */
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -940,7 +940,7 @@ extern struct efi {
+ efi_get_next_high_mono_count_t *get_next_high_mono_count;
+ efi_reset_system_t *reset_system;
+ efi_set_virtual_address_map_t *set_virtual_address_map;
+- struct efi_memory_map *memmap;
++ struct efi_memory_map memmap;
+ unsigned long flags;
+ } efi;
+
+@@ -1031,7 +1031,6 @@ extern void efi_initialize_iomem_resourc
+ extern void efi_get_time(struct timespec *now);
+ extern void efi_reserve_boot_services(void);
+ extern int efi_get_fdt_params(struct efi_fdt_params *params);
+-extern struct efi_memory_map memmap;
+ extern struct kobject *efi_kobj;
+
+ extern int efi_reboot_quirk_mode;
+@@ -1056,7 +1055,7 @@ static inline void efi_fake_memmap(void)
+ * Once the loop finishes @md must not be accessed.
+ */
+ #define for_each_efi_memory_desc(md) \
+- for_each_efi_memory_desc_in_map(efi.memmap, md)
++ for_each_efi_memory_desc_in_map(&efi.memmap, md)
+
+ /*
+ * Format an EFI memory descriptor's type and attributes to a user-provided
diff --git a/patches.arch/qcom-0024-efi-arm-Use-memremap-to-create-the-persistent-memmap.patch b/patches.arch/qcom-0024-efi-arm-Use-memremap-to-create-the-persistent-memmap.patch
new file mode 100644
index 0000000000..0f73f5412f
--- /dev/null
+++ b/patches.arch/qcom-0024-efi-arm-Use-memremap-to-create-the-persistent-memmap.patch
@@ -0,0 +1,44 @@
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Mon, 25 Apr 2016 21:06:41 +0100
+Subject: efi/arm*: Use memremap() to create the persistent memmap mapping
+
+Git-commit: 24d45d1dc275b818093fe1d0055a230ce5e8c4c7
+Patch-mainline: v4.7-rc1
+References: fate#320512
+
+Instead of using ioremap_cache(), which is slightly inappropriate for
+mapping firmware tables, and is not even allowed on ARM for mapping
+regions that are covered by a struct page, use memremap(), which was
+invented for this purpose, and will also reuse the existing kernel
+direct mapping if the requested region is covered by it.
+
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1461614832-17633-10-git-send-email-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/arm-runtime.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
+index 55a9ea041068..19283deac375 100644
+--- a/drivers/firmware/efi/arm-runtime.c
++++ b/drivers/firmware/efi/arm-runtime.c
+@@ -105,8 +105,7 @@ static int __init arm_enable_runtime_services(void)
+
+ mapsize = efi.memmap.map_end - efi.memmap.map;
+
+- efi.memmap.map = (__force void *)ioremap_cache(efi.memmap.phys_map,
+- mapsize);
++ efi.memmap.map = memremap(efi.memmap.phys_map, mapsize, MEMREMAP_WB);
+ if (!efi.memmap.map) {
+ pr_err("Failed to remap EFI memory map\n");
+ return -ENOMEM;
+--
+2.11.0
+
diff --git a/patches.arch/qcom-0025-efi-Add-support-for-the-EFI_MEMORY_ATTRIBUTES_TABLE-.patch b/patches.arch/qcom-0025-efi-Add-support-for-the-EFI_MEMORY_ATTRIBUTES_TABLE-.patch
new file mode 100644
index 0000000000..1f0cef2f57
--- /dev/null
+++ b/patches.arch/qcom-0025-efi-Add-support-for-the-EFI_MEMORY_ATTRIBUTES_TABLE-.patch
@@ -0,0 +1,85 @@
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Mon, 25 Apr 2016 21:06:44 +0100
+Subject: efi: Add support for the EFI_MEMORY_ATTRIBUTES_TABLE config table
+Git-commit: a604af075a3226adaff84b7026876f0c6dfe9f52
+Patch-mainline: v4.7-rc1
+References: fate#320512
+
+This declares the GUID and struct typedef for the new memory attributes
+table which contains the permissions that can be used to apply stricter
+permissions to UEFI Runtime Services memory regions.
+
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Peter Jones <pjones@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1461614832-17633-13-git-send-email-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/efi.c | 2 ++
+ include/linux/efi.h | 13 +++++++++++++
+ 2 files changed, 15 insertions(+)
+
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -41,6 +41,7 @@ struct efi __read_mostly efi = {
+ .config_table = EFI_INVALID_TABLE_ADDR,
+ .esrt = EFI_INVALID_TABLE_ADDR,
+ .properties_table = EFI_INVALID_TABLE_ADDR,
++ .mem_attr_table = EFI_INVALID_TABLE_ADDR,
+ };
+ EXPORT_SYMBOL(efi);
+
+@@ -336,6 +337,7 @@ static __initdata efi_config_table_type_
+ {UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
+ {EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt},
+ {EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
++ {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
+ {NULL_GUID, NULL, NULL},
+ };
+
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -664,6 +664,10 @@ void efi_native_runtime_setup(void);
+ EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, \
+ 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
+
++#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID \
++ EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, \
++ 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
++
+ typedef struct {
+ efi_guid_t guid;
+ u64 table;
+@@ -906,6 +910,14 @@ typedef struct {
+ /* efi_signature_data_t signatures[][] */
+ } efi_signature_list_t;
+
++typedef struct {
++ u32 version;
++ u32 num_entries;
++ u32 desc_size;
++ u32 reserved;
++ efi_memory_desc_t entry[0];
++} efi_memory_attributes_table_t;
++
+ /*
+ * All runtime access to EFI goes through this structure:
+ */
+@@ -927,6 +939,7 @@ extern struct efi {
+ unsigned long config_table; /* config tables */
+ unsigned long esrt; /* ESRT table */
+ unsigned long properties_table; /* properties table */
++ unsigned long mem_attr_table; /* memory attributes table */
+ efi_get_time_t *get_time;
+ efi_set_time_t *set_time;
+ efi_get_wakeup_time_t *get_wakeup_time;
diff --git a/patches.arch/qcom-0026-efi-Implement-generic-support-for-the-Memory-Attribu.patch b/patches.arch/qcom-0026-efi-Implement-generic-support-for-the-Memory-Attribu.patch
new file mode 100644
index 0000000000..059c3706c9
--- /dev/null
+++ b/patches.arch/qcom-0026-efi-Implement-generic-support-for-the-Memory-Attribu.patch
@@ -0,0 +1,260 @@
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Mon, 25 Apr 2016 21:06:45 +0100
+Subject: efi: Implement generic support for the Memory Attributes table
+Git-commit: 10f0d2f57705350bbbe5f28e9292ae3905823c3c
+Patch-mainline: v4.7-rc1
+References: fate#320512
+
+This implements shared support for discovering the presence of the
+Memory Attributes table, and for parsing and validating its contents.
+
+The table is validated against the construction rules in the UEFI spec.
+Since this is a new table, it makes sense to complain if we encounter
+a table that does not follow those rules.
+
+The parsing and validation routine takes a callback that can be specified
+per architecture, that gets passed each unique validated region, with the
+virtual address retrieved from the ordinary memory map.
+
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+[ Trim pr_*() strings to 80 cols and use EFI consistently. ]
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Peter Jones <pjones@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1461614832-17633-14-git-send-email-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/Makefile | 2
+ drivers/firmware/efi/memattr.c | 182 +++++++++++++++++++++++++++++++++++++++++
+ include/linux/efi.h | 13 ++
+ 3 files changed, 196 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/firmware/efi/memattr.c
+
+--- a/drivers/firmware/efi/Makefile
++++ b/drivers/firmware/efi/Makefile
+@@ -9,7 +9,7 @@
+ #
+ KASAN_SANITIZE_runtime-wrappers.o := n
+
+-obj-$(CONFIG_EFI) += efi.o vars.o reboot.o
++obj-$(CONFIG_EFI) += efi.o vars.o reboot.o memattr.o
+ obj-$(CONFIG_EFI) += capsule.o
+ obj-$(CONFIG_EFI_VARS) += efivars.o
+ obj-$(CONFIG_EFI_ESRT) += esrt.o
+--- /dev/null
++++ b/drivers/firmware/efi/memattr.c
+@@ -0,0 +1,182 @@
++/*
++ * Copyright (C) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#define pr_fmt(fmt) "efi: memattr: " fmt
++
++#include <linux/efi.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/memblock.h>
++
++#include <asm/early_ioremap.h>
++
++static int __initdata tbl_size;
++
++/*
++ * Reserve the memory associated with the Memory Attributes configuration
++ * table, if it exists.
++ */
++int __init efi_memattr_init(void)
++{
++ efi_memory_attributes_table_t *tbl;
++
++ if (efi.mem_attr_table == EFI_INVALID_TABLE_ADDR)
++ return 0;
++
++ tbl = early_memremap(efi.mem_attr_table, sizeof(*tbl));
++ if (!tbl) {
++ pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
++ efi.mem_attr_table);
++ return -ENOMEM;
++ }
++
++ if (tbl->version > 1) {
++ pr_warn("Unexpected EFI Memory Attributes table version %d\n",
++ tbl->version);
++ goto unmap;
++ }
++
++ tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size;
++ memblock_reserve(efi.mem_attr_table, tbl_size);
++
++unmap:
++ early_memunmap(tbl, sizeof(*tbl));
++ return 0;
++}
++
++/*
++ * Returns a copy @out of the UEFI memory descriptor @in if it is covered
++ * entirely by a UEFI memory map entry with matching attributes. The virtual
++ * address of @out is set according to the matching entry that was found.
++ */
++static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
++{
++ u64 in_paddr = in->phys_addr;
++ u64 in_size = in->num_pages << EFI_PAGE_SHIFT;
++ efi_memory_desc_t *md;
++
++ *out = *in;
++
++ if (in->type != EFI_RUNTIME_SERVICES_CODE &&
++ in->type != EFI_RUNTIME_SERVICES_DATA) {
++ pr_warn("Entry type should be RuntimeServiceCode/Data\n");
++ return false;
++ }
++
++ if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
++ pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
++ return false;
++ }
++
++ if (PAGE_SIZE > EFI_PAGE_SIZE &&
++ (!PAGE_ALIGNED(in->phys_addr) ||
++ !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
++ /*
++ * Since arm64 may execute with page sizes of up to 64 KB, the
++ * UEFI spec mandates that RuntimeServices memory regions must
++ * be 64 KB aligned. We need to validate this here since we will
++ * not be able to tighten permissions on such regions without
++ * affecting adjacent regions.
++ */
++ pr_warn("Entry address region misaligned\n");
++ return false;
++ }
++
++ for_each_efi_memory_desc(md) {
++ u64 md_paddr = md->phys_addr;
++ u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
++
++ if (!(md->attribute & EFI_MEMORY_RUNTIME))
++ continue;
++ if (md->virt_addr == 0) {
++ /* no virtual mapping has been installed by the stub */
++ break;
++ }
++
++ if (md_paddr > in_paddr || (in_paddr - md_paddr) >= md_size)
++ continue;
++
++ /*
++ * This entry covers the start of @in, check whether
++ * it covers the end as well.
++ */
++ if (md_paddr + md_size < in_paddr + in_size) {
++ pr_warn("Entry covers multiple EFI memory map regions\n");
++ return false;
++ }
++
++ if (md->type != in->type) {
++ pr_warn("Entry type deviates from EFI memory map region type\n");
++ return false;
++ }
++
++ out->virt_addr = in_paddr + (md->virt_addr - md_paddr);
++
++ return true;
++ }
++
++ pr_warn("No matching entry found in the EFI memory map\n");
++ return false;
++}
++
++/*
++ * To be called after the EFI page tables have been populated. If a memory
++ * attributes table is available, its contents will be used to update the
++ * mappings with tightened permissions as described by the table.
++ * This requires the UEFI memory map to have already been populated with
++ * virtual addresses.
++ */
++int __init efi_memattr_apply_permissions(struct mm_struct *mm,
++ efi_memattr_perm_setter fn)
++{
++ efi_memory_attributes_table_t *tbl;
++ int i, ret;
++
++ if (tbl_size <= sizeof(*tbl))
++ return 0;
++
++ /*
++ * We need the EFI memory map to be setup so we can use it to
++ * lookup the virtual addresses of all entries in the of EFI
++ * Memory Attributes table. If it isn't available, this
++ * function should not be called.
++ */
++ if (WARN_ON(!efi_enabled(EFI_MEMMAP)))
++ return 0;
++
++ tbl = memremap(efi.mem_attr_table, tbl_size, MEMREMAP_WB);
++ if (!tbl) {
++ pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
++ efi.mem_attr_table);
++ return -ENOMEM;
++ }
++
++ if (efi_enabled(EFI_DBG))
++ pr_info("Processing EFI Memory Attributes table:\n");
++
++ for (i = ret = 0; ret == 0 && i < tbl->num_entries; i++) {
++ efi_memory_desc_t md;
++ unsigned long size;
++ bool valid;
++ char buf[64];
++
++ valid = entry_is_valid((void *)tbl->entry + i * tbl->desc_size,
++ &md);
++ size = md.num_pages << EFI_PAGE_SHIFT;
++ if (efi_enabled(EFI_DBG) || !valid)
++ pr_info("%s 0x%012llx-0x%012llx %s\n",
++ valid ? "" : "!", md.phys_addr,
++ md.phys_addr + size - 1,
++ efi_md_typeattr_format(buf, sizeof(buf), &md));
++
++ if (valid)
++ ret = fn(mm, &md);
++ }
++ memunmap(tbl);
++ return ret;
++}
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -1043,6 +1043,19 @@ extern void __init efi_fake_memmap(void)
+ static inline void efi_fake_memmap(void) { }
+ #endif
+
++/*
++ * efi_memattr_perm_setter - arch specific callback function passed into
++ * efi_memattr_apply_permissions() that updates the
++ * mapping permissions described by the second
++ * argument in the page tables referred to by the
++ * first argument.
++ */
++typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *);
++
++extern int efi_memattr_init(void);
++extern int efi_memattr_apply_permissions(struct mm_struct *mm,
++ efi_memattr_perm_setter fn);
++
+ /* Iterate through an efi_memory_map */
+ #define for_each_efi_memory_desc_in_map(m, md) \
+ for ((md) = (m)->map; \
diff --git a/patches.arch/qcom-0027-efi-arm-Take-the-Memory-Attributes-table-into-accoun.patch b/patches.arch/qcom-0027-efi-arm-Take-the-Memory-Attributes-table-into-accoun.patch
new file mode 100644
index 0000000000..857768dcdb
--- /dev/null
+++ b/patches.arch/qcom-0027-efi-arm-Take-the-Memory-Attributes-table-into-accoun.patch
@@ -0,0 +1,74 @@
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Mon, 25 Apr 2016 21:06:46 +0100
+Subject: efi/arm*: Take the Memory Attributes table into account
+Git-commit: 789957ef72f976cb325e9057225fc4e9c4513060
+Patch-mainline: v4.7-rc1
+References: fate#320512
+
+Call into the generic memory attributes table support code at the
+appropriate times during the init sequence so that the UEFI Runtime
+Services region are mapped according to the strict permissions it
+specifies.
+
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Peter Jones <pjones@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@intel.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1461614832-17633-15-git-send-email-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/arm64/include/asm/efi.h | 2 ++
+ drivers/firmware/efi/arm-init.c | 1 +
+ drivers/firmware/efi/arm-runtime.c | 10 ++++++++--
+ 3 files changed, 11 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/include/asm/efi.h
++++ b/arch/arm64/include/asm/efi.h
+@@ -16,6 +16,8 @@ extern void efi_init_fdt(void *fdt);
+
+ int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
+
++#define efi_set_mapping_permissions efi_create_mapping
++
+ #define arch_efi_call_virt_setup() \
+ ({ \
+ kernel_neon_begin(); \
+--- a/drivers/firmware/efi/arm-init.c
++++ b/drivers/firmware/efi/arm-init.c
+@@ -271,6 +271,7 @@ void __init efi_init_fdt(void *fdt)
+ return;
+
+ reserve_regions();
++ efi_memattr_init();
+ early_memunmap(efi.memmap.map, params.mmap_size);
+ memblock_reserve(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size +
+--- a/drivers/firmware/efi/arm-runtime.c
++++ b/drivers/firmware/efi/arm-runtime.c
+@@ -79,9 +79,15 @@ static bool __init efi_virtmap_init(void
+ systab_found = true;
+ }
+ }
+- if (!systab_found)
++ if (!systab_found) {
+ pr_err("No virtual mapping found for the UEFI System Table\n");
+- return systab_found;
++ return false;
++ }
++
++ if (efi_memattr_apply_permissions(&efi_mm, efi_set_mapping_permissions))
++ return false;
++
++ return true;
+ }
+
+ /*
diff --git a/patches.arch/qcom-0028-x86-efi-Fix-boot-crash-by-always-mapping-boot-servic.patch b/patches.arch/qcom-0028-x86-efi-Fix-boot-crash-by-always-mapping-boot-servic.patch
new file mode 100644
index 0000000000..6728c992fb
--- /dev/null
+++ b/patches.arch/qcom-0028-x86-efi-Fix-boot-crash-by-always-mapping-boot-servic.patch
@@ -0,0 +1,199 @@
+From: Matt Fleming <matt@codeblueprint.co.uk>
+Date: Fri, 11 Mar 2016 11:19:23 +0000
+Subject: x86/efi: Fix boot crash by always mapping boot service regions into
+ new EFI page tables
+Git-commit: 452308de61056a539352a9306c46716d7af8a1f1
+Patch-mainline: v4.5
+References: fate#320512
+
+Some machines have EFI regions in page zero (physical address
+0x00000000) and historically that region has been added to the e820
+map via trim_bios_range(), and ultimately mapped into the kernel page
+tables. It was not mapped via efi_map_regions() as one would expect.
+
+Alexis reports that with the new separate EFI page tables some boot
+services regions, such as page zero, are not mapped. This triggers an
+oops during the SetVirtualAddressMap() runtime call.
+
+For the EFI boot services quirk on x86 we need to memblock_reserve()
+boot services regions until after SetVirtualAddressMap(). Doing that
+while respecting the ownership of regions that may have already been
+reserved by the kernel was the motivation behind this commit:
+
+ 7d68dc3f1003 ("x86, efi: Do not reserve boot services regions within reserved areas")
+
+That patch was merged at a time when the EFI runtime virtual mappings
+were inserted into the kernel page tables as described above, and the
+trick of setting ->numpages (and hence the region size) to zero to
+track regions that should not be freed in efi_free_boot_services()
+meant that we never mapped those regions in efi_map_regions(). Instead
+we were relying solely on the existing kernel mappings.
+
+Now that we have separate page tables we need to make sure the EFI
+boot services regions are mapped correctly, even if someone else has
+already called memblock_reserve(). Instead of stashing a tag in
+->numpages, set the EFI_MEMORY_RUNTIME bit of ->attribute. Since it
+generally makes no sense to mark a boot services region as required at
+runtime, it's pretty much guaranteed the firmware will not have
+already set this bit.
+
+For the record, the specific circumstances under which Alexis
+triggered this bug was that an EFI runtime driver on his machine was
+responding to the EVT_SIGNAL_VIRTUAL_ADDRESS_CHANGE event during
+SetVirtualAddressMap().
+
+The event handler for this driver looks like this,
+
+ sub rsp,0x28
+ lea rdx,[rip+0x2445] # 0xaa948720
+ mov ecx,0x4
+ call func_aa9447c0 ; call to ConvertPointer(4, & 0xaa948720)
+ mov r11,QWORD PTR [rip+0x2434] # 0xaa948720
+ xor eax,eax
+ mov BYTE PTR [r11+0x1],0x1
+ add rsp,0x28
+ ret
+
+Which is pretty typical code for an EVT_SIGNAL_VIRTUAL_ADDRESS_CHANGE
+handler. The "mov r11, QWORD PTR [rip+0x2424]" was the faulting
+instruction because ConvertPointer() was being called to convert the
+address 0x0000000000000000, which when converted is left unchanged and
+remains 0x0000000000000000.
+
+The output of the oops trace gave the impression of a standard NULL
+pointer dereference bug, but because we're accessing physical
+addresses during ConvertPointer(), it wasn't. EFI boot services code
+is stored at that address on Alexis' machine.
+
+Reported-by: Alexis Murzeau <amurzeau@gmail.com>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Maarten Lankhorst <maarten.lankhorst@canonical.com>
+Cc: Matthew Garrett <mjg59@srcf.ucam.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Raphael Hertzog <hertzog@debian.org>
+Cc: Roger Shimizu <rogershimizu@gmail.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1457695163-29632-2-git-send-email-matt@codeblueprint.co.uk
+Link: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=815125
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/platform/efi/quirks.c | 79 ++++++++++++++++++++++++++++++++---------
+ 1 file changed, 62 insertions(+), 17 deletions(-)
+
+--- a/arch/x86/platform/efi/quirks.c
++++ b/arch/x86/platform/efi/quirks.c
+@@ -130,6 +130,27 @@ efi_status_t efi_query_variable_store(u3
+ EXPORT_SYMBOL_GPL(efi_query_variable_store);
+
+ /*
++ * Helper function for efi_reserve_boot_services() to figure out if we
++ * can free regions in efi_free_boot_services().
++ *
++ * Use this function to ensure we do not free regions owned by somebody
++ * else. We must only reserve (and then free) regions:
++ *
++ * - Not within any part of the kernel
++ * - Not the BIOS reserved area (E820_RESERVED, E820_NVS, etc)
++ */
++static bool can_free_region(u64 start, u64 size)
++{
++ if (start + size > __pa_symbol(_text) && start <= __pa_symbol(_end))
++ return false;
++
++ if (!e820_all_mapped(start, start+size, E820_RAM))
++ return false;
++
++ return true;
++}
++
++/*
+ * The UEFI specification makes it clear that the operating system is free to do
+ * whatever it wants with boot services code after ExitBootServices() has been
+ * called. Ignoring this recommendation a significant bunch of EFI implementations
+@@ -145,26 +166,50 @@ void __init efi_reserve_boot_services(vo
+ for_each_efi_memory_desc(md) {
+ u64 start = md->phys_addr;
+ u64 size = md->num_pages << EFI_PAGE_SHIFT;
++ bool already_reserved;
+
+ if (md->type != EFI_BOOT_SERVICES_CODE &&
+ md->type != EFI_BOOT_SERVICES_DATA)
+ continue;
+- /* Only reserve where possible:
+- * - Not within any already allocated areas
+- * - Not over any memory area (really needed, if above?)
+- * - Not within any part of the kernel
+- * - Not the bios reserved area
+- */
+- if ((start + size > __pa_symbol(_text)
+- && start <= __pa_symbol(_end)) ||
+- !e820_all_mapped(start, start+size, E820_RAM) ||
+- memblock_is_region_reserved(start, size)) {
+- /* Could not reserve, skip it */
+- md->num_pages = 0;
+- memblock_dbg("Could not reserve boot range [0x%010llx-0x%010llx]\n",
+- start, start+size-1);
+- } else
++
++ already_reserved = memblock_is_region_reserved(start, size);
++
++ /*
++ * Because the following memblock_reserve() is paired
++ * with free_bootmem_late() for this region in
++ * efi_free_boot_services(), we must be extremely
++ * careful not to reserve, and subsequently free,
++ * critical regions of memory (like the kernel image) or
++ * those regions that somebody else has already
++ * reserved.
++ *
++ * A good example of a critical region that must not be
++ * freed is page zero (first 4Kb of memory), which may
++ * contain boot services code/data but is marked
++ * E820_RESERVED by trim_bios_range().
++ */
++ if (!already_reserved) {
+ memblock_reserve(start, size);
++
++ /*
++ * If we are the first to reserve the region, no
++ * one else cares about it. We own it and can
++ * free it later.
++ */
++ if (can_free_region(start, size))
++ continue;
++ }
++
++ /*
++ * We don't own the region. We must not free it.
++ *
++ * Setting this bit for a boot services region really
++ * doesn't make sense as far as the firmware is
++ * concerned, but it does provide us with a way to tag
++ * those regions that must not be paired with
++ * free_bootmem_late().
++ */
++ md->attribute |= EFI_MEMORY_RUNTIME;
+ }
+ }
+
+@@ -180,8 +225,8 @@ void __init efi_free_boot_services(void)
+ md->type != EFI_BOOT_SERVICES_DATA)
+ continue;
+
+- /* Could not reserve boot area */
+- if (!size)
++ /* Do not free, someone else owns it: */
++ if (md->attribute & EFI_MEMORY_RUNTIME)
+ continue;
+
+ free_bootmem_late(start, size);
diff --git a/patches.arch/qcom-0029-x86-boot-Reorganize-and-clean-up-the-BIOS-area-reser.patch b/patches.arch/qcom-0029-x86-boot-Reorganize-and-clean-up-the-BIOS-area-reser.patch
new file mode 100644
index 0000000000..0f818d0808
--- /dev/null
+++ b/patches.arch/qcom-0029-x86-boot-Reorganize-and-clean-up-the-BIOS-area-reser.patch
@@ -0,0 +1,293 @@
+From: Ingo Molnar <mingo@kernel.org>
+Date: Thu, 21 Jul 2016 09:53:52 +0200
+Subject: x86/boot: Reorganize and clean up the BIOS area reservation code
+Git-commit: edce21216a8887bf06ba85ee49a00695e44c4341
+Patch-mainline: v4.8-rc1
+References: fate#320512
+
+So the reserve_ebda_region() code has accumulated a number of
+problems over the years that make it really difficult to read
+and understand:
+
+- The calculation of 'lowmem' and 'ebda_addr' is an unnecessarily
+ interleaved mess of first lowmem, then ebda_addr, then lowmem tweaks...
+
+- 'lowmem' here means 'super low mem' - i.e. 16-bit addressable memory. In other
+ parts of the x86 code 'lowmem' means 32-bit addressable memory... This makes it
+ super confusing to read.
+
+- It does not help at all that we have various memory range markers, half of which
+ are 'start of range', half of which are 'end of range' - but this crucial
+ property is not obvious in the naming at all ... gave me a headache trying to
+ understand all this.
+
+- Also, the 'ebda_addr' name sucks: it highlights that it's an address (which is
+ obvious, all values here are addresses!), while it does not highlight that it's
+ the _start_ of the EBDA region ...
+
+- 'BIOS_LOWMEM_KILOBYTES' says a lot of things, except that this is the only value
+ that is a pointer to a value, not a memory range address!
+
+- The function name itself is a misnomer: it says 'reserve_ebda_region()' while
+ its main purpose is to reserve all the firmware ROM typically between 640K and
+ 1MB, while the 'EBDA' part is only a small part of that ...
+
+- Likewise, the paravirt quirk flag name 'ebda_search' is misleading as well: this
+ too should be about whether to reserve firmware areas in the paravirt case.
+
+- In fact thinking about this as 'end of RAM' is confusing: what this function
+ *really* wants to reserve is firmware data and code areas! Once the thinking is
+ inverted from a mixed 'ram' and 'reserved firmware area' notion to a pure
+ 'reserved area' notion everything becomes a lot clearer.
+
+To improve all this rewrite the whole code (without changing the logic):
+
+- Firstly invert the naming from 'lowmem end' to 'BIOS reserved area start'
+ and propagate this concept through all the variable names and constants.
+
+ BIOS_RAM_SIZE_KB_PTR // was: BIOS_LOWMEM_KILOBYTES
+
+ BIOS_START_MIN // was: INSANE_CUTOFF
+
+ ebda_start // was: ebda_addr
+ bios_start // was: lowmem
+
+ BIOS_START_MAX // was: LOWMEM_CAP
+
+- Then clean up the name of the function itself by renaming it
+ to reserve_bios_regions() and renaming the ::ebda_search paravirt
+ flag to ::reserve_bios_regions.
+
+- Fix up all the comments (fix typos), harmonize and simplify their
+ formulation and remove comments that become unnecessary due to
+ the much better naming all around.
+
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/include/asm/bios_ebda.h | 2
+ arch/x86/include/asm/x86_init.h | 3
+ arch/x86/kernel/ebda.c | 128 ++++++++++++++++++++++++--------------
+ arch/x86/kernel/head32.c | 2
+ arch/x86/kernel/head64.c | 2
+ arch/x86/kernel/platform-quirks.c | 4 -
+ 6 files changed, 90 insertions(+), 51 deletions(-)
+
+--- a/arch/x86/include/asm/bios_ebda.h
++++ b/arch/x86/include/asm/bios_ebda.h
+@@ -38,7 +38,7 @@ static inline unsigned int get_bios_ebda
+ return length;
+ }
+
+-void reserve_ebda_region(void);
++void reserve_bios_regions(void);
+
+ #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
+ /*
+--- a/arch/x86/include/asm/x86_init.h
++++ b/arch/x86/include/asm/x86_init.h
+@@ -147,12 +147,12 @@ struct timespec;
+ * struct x86_legacy_features - legacy x86 features
+ *
+ * @rtc: this device has a CMOS real-time clock present
+- * @ebda_search: it's safe to search for the EBDA signature in the hardware's
++ * @reserve_bios_regions: it's safe to search for the EBDA signature in the hardware's
+ * low RAM
+ */
+ struct x86_legacy_features {
+ int rtc;
+- int ebda_search;
++ int reserve_bios_regions;
+ };
+
+ /**
+--- a/arch/x86/kernel/ebda.c
++++ b/arch/x86/kernel/ebda.c
+@@ -6,66 +6,104 @@
+ #include <asm/bios_ebda.h>
+
+ /*
++ * This function reserves all conventional PC system BIOS related
++ * firmware memory areas (some of which are data, some of which
++ * are code), that must not be used by the kernel as available
++ * RAM.
++ *
+ * The BIOS places the EBDA/XBDA at the top of conventional
+ * memory, and usually decreases the reported amount of
+- * conventional memory (int 0x12) too. This also contains a
+- * workaround for Dell systems that neglect to reserve EBDA.
+- * The same workaround also avoids a problem with the AMD768MPX
+- * chipset: reserve a page before VGA to prevent PCI prefetch
+- * into it (errata #56). Usually the page is reserved anyways,
+- * unless you have no PS/2 mouse plugged in.
+- *
+- * This functions is deliberately very conservative. Losing
+- * memory in the bottom megabyte is rarely a problem, as long
+- * as we have enough memory to install the trampoline. Using
+- * memory that is in use by the BIOS or by some DMA device
+- * the BIOS didn't shut down *is* a big problem.
++ * conventional memory (int 0x12) too.
++ *
++ * This means that as a first approximation on most systems we can
++ * guess the reserved BIOS area by looking at the low BIOS RAM size
++ * value and assume that everything above that value (up to 1MB) is
++ * reserved.
++ *
++ * But life in firmware country is not that simple:
++ *
++ * - This code also contains a quirk for Dell systems that neglect
++ * to reserve the EBDA area in the 'RAM size' value ...
++ *
++ * - The same quirk also avoids a problem with the AMD768MPX
++ * chipset: reserve a page before VGA to prevent PCI prefetch
++ * into it (errata #56). (Usually the page is reserved anyways,
++ * unless you have no PS/2 mouse plugged in.)
++ *
++ * - Plus paravirt systems don't have a reliable value in the
++ * 'BIOS RAM size' pointer we can rely on, so we must quirk
++ * them too.
++ *
++ * Due to those various problems this function is deliberately
++ * very conservative and tries to err on the side of reserving
++ * too much, to not risk reserving too little.
++ *
++ * Losing a small amount of memory in the bottom megabyte is
++ * rarely a problem, as long as we have enough memory to install
++ * the SMP bootup trampoline which *must* be in this area.
++ *
++ * Using memory that is in use by the BIOS or by some DMA device
++ * the BIOS didn't shut down *is* a big problem to the kernel,
++ * obviously.
+ */
+
+-#define BIOS_LOWMEM_KILOBYTES 0x413
+-#define LOWMEM_CAP 0x9f000U /* Absolute maximum */
+-#define INSANE_CUTOFF 0x20000U /* Less than this = insane */
++#define BIOS_RAM_SIZE_KB_PTR 0x413
+
+-void __init reserve_ebda_region(void)
++#define BIOS_START_MIN 0x20000U /* 128K, less than this is insane */
++#define BIOS_START_MAX 0x9f000U /* 640K, absolute maximum */
++
++void __init reserve_bios_regions(void)
+ {
+- unsigned int lowmem, ebda_addr;
++ unsigned int bios_start, ebda_start;
+
+ /*
+- * To determine the position of the EBDA and the
+- * end of conventional memory, we need to look at
+- * the BIOS data area. In a paravirtual environment
+- * that area is absent. We'll just have to assume
+- * that the paravirt case can handle memory setup
+- * correctly, without our help.
++ * NOTE: In a paravirtual environment the BIOS reserved
++ * area is absent. We'll just have to assume that the
++ * paravirt case can handle memory setup correctly,
++ * without our help.
+ */
+- if (!x86_platform.legacy.ebda_search)
++ if (!x86_platform.legacy.reserve_bios_regions)
+ return;
+
+- /* end of low (conventional) memory */
+- lowmem = *(unsigned short *)__va(BIOS_LOWMEM_KILOBYTES);
+- lowmem <<= 10;
++ /* Get the start address of the EBDA page: */
++ ebda_start = get_bios_ebda();
++
++ /*
++ * Quirk: some old Dells seem to have a 4k EBDA without
++ * reporting so in their BIOS RAM size value, so just
++ * consider the memory above 640K to be off limits
++ * (bugzilla 2990).
++ *
++ * We detect this case by filtering for nonsensical EBDA
++ * addresses below 128K, where we can assume that they
++ * are bogus and bump it up to a fixed 640K value:
++ */
++ if (ebda_start < BIOS_START_MIN)
++ ebda_start = BIOS_START_MAX;
+
+- /* start of EBDA area */
+- ebda_addr = get_bios_ebda();
++ /*
++ * BIOS RAM size is encoded in kilobytes, convert it
++ * to bytes to get a first guess at where the BIOS
++ * firmware area starts:
++ */
++ bios_start = *(unsigned short *)__va(BIOS_RAM_SIZE_KB_PTR);
++ bios_start <<= 10;
+
+ /*
+- * Note: some old Dells seem to need 4k EBDA without
+- * reporting so, so just consider the memory above 0x9f000
+- * to be off limits (bugzilla 2990).
++ * If bios_start is less than 128K, assume it is bogus
++ * and bump it up to 640K:
+ */
++ if (bios_start < BIOS_START_MIN)
++ bios_start = BIOS_START_MAX;
+
+- /* If the EBDA address is below 128K, assume it is bogus */
+- if (ebda_addr < INSANE_CUTOFF)
+- ebda_addr = LOWMEM_CAP;
+-
+- /* If lowmem is less than 128K, assume it is bogus */
+- if (lowmem < INSANE_CUTOFF)
+- lowmem = LOWMEM_CAP;
+-
+- /* Use the lower of the lowmem and EBDA markers as the cutoff */
+- lowmem = min(lowmem, ebda_addr);
+- lowmem = min(lowmem, LOWMEM_CAP); /* Absolute cap */
++ /*
++ * Use the lower of the bios_start and ebda_start
++ * as the starting point, but don't allow it to
++ * go beyond 640K:
++ */
++ bios_start = min(bios_start, ebda_start);
++ bios_start = min(bios_start, BIOS_START_MAX);
+
+- /* reserve all memory between lowmem and the 1MB mark */
+- memblock_reserve(lowmem, 0x100000 - lowmem);
++ /* Reserve all memory between bios_start and the 1MB mark: */
++ memblock_reserve(bios_start, 0x100000 - bios_start);
+ }
+--- a/arch/x86/kernel/head32.c
++++ b/arch/x86/kernel/head32.c
+@@ -26,7 +26,7 @@ static void __init i386_default_early_se
+ x86_init.resources.reserve_resources = i386_reserve_resources;
+ x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc;
+
+- reserve_ebda_region();
++ reserve_bios_regions();
+ }
+
+ asmlinkage __visible void __init i386_start_kernel(void)
+--- a/arch/x86/kernel/head64.c
++++ b/arch/x86/kernel/head64.c
+@@ -191,7 +191,7 @@ void __init x86_64_start_reservations(ch
+ copy_bootdata(__va(real_mode_data));
+
+ x86_early_init_platform_quirks();
+- reserve_ebda_region();
++ reserve_bios_regions();
+
+ start_kernel();
+ }
+--- a/arch/x86/kernel/platform-quirks.c
++++ b/arch/x86/kernel/platform-quirks.c
+@@ -7,11 +7,11 @@
+ void __init x86_early_init_platform_quirks(void)
+ {
+ x86_platform.legacy.rtc = 1;
+- x86_platform.legacy.ebda_search = 0;
++ x86_platform.legacy.reserve_bios_regions = 0;
+
+ switch (boot_params.hdr.hardware_subarch) {
+ case X86_SUBARCH_PC:
+- x86_platform.legacy.ebda_search = 1;
++ x86_platform.legacy.reserve_bios_regions = 1;
+ break;
+ case X86_SUBARCH_XEN:
+ case X86_SUBARCH_LGUEST:
diff --git a/patches.arch/qcom-0030-efi-capsule-Allocate-whole-capsule-into-virtual-memo.patch b/patches.arch/qcom-0030-efi-capsule-Allocate-whole-capsule-into-virtual-memo.patch
new file mode 100644
index 0000000000..7439a53084
--- /dev/null
+++ b/patches.arch/qcom-0030-efi-capsule-Allocate-whole-capsule-into-virtual-memo.patch
@@ -0,0 +1,87 @@
+From: Austin Christ <austinwc@codeaurora.org>
+Date: Thu, 11 Aug 2016 11:42:00 +0100
+Subject: efi/capsule: Allocate whole capsule into virtual memory
+
+Git-commit: 6862e6ad95e984991a6ceec592cf67831658f928
+Patch-mainline: v4.8-rc2
+References: fate#320512
+
+According to UEFI 2.6 section 7.5.3, the capsule should be in contiguous
+virtual memory and firmware may consume the capsule immediately. To
+correctly implement this functionality, the kernel driver needs to vmap
+the entire capsule at the time it is made available to firmware.
+
+The virtual allocation of the capsule update has been changed from kmap,
+which was only allocating the first page of the update, to vmap, and
+allocates the entire data payload.
+
+Signed-off-by: Austin Christ <austinwc@codeaurora.org>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Reviewed-by: Matt Fleming <matt@codeblueprint.co.uk>
+Reviewed-by: Lee, Chun-Yi <jlee@suse.com>
+Cc: <stable@vger.kernel.org> # v4.7
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Bryan O'Donoghue <pure.logic@nexus-software.ie>
+Cc: Denys Vlasenko <dvlasenk@redhat.com>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: Kweh Hock Leong <hock.leong.kweh@intel.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Link: http://lkml.kernel.org/r/1470912120-22831-3-git-send-email-matt@codeblueprint.co.uk
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/capsule-loader.c | 8 +++++---
+ drivers/firmware/efi/capsule.c | 6 +++---
+ 2 files changed, 8 insertions(+), 6 deletions(-)
+
+--- a/drivers/firmware/efi/capsule-loader.c
++++ b/drivers/firmware/efi/capsule-loader.c
+@@ -16,6 +16,7 @@
+ #include <linux/slab.h>
+ #include <linux/mutex.h>
+ #include <linux/efi.h>
++#include <linux/vmalloc.h>
+
+ #define NO_FURTHER_WRITE_ACTION -1
+
+@@ -108,14 +109,15 @@ static ssize_t efi_capsule_submit_update
+ int ret;
+ void *cap_hdr_temp;
+
+- cap_hdr_temp = kmap(cap_info->pages[0]);
++ cap_hdr_temp = vmap(cap_info->pages, cap_info->index,
++ VM_MAP, PAGE_KERNEL);
+ if (!cap_hdr_temp) {
+- pr_debug("%s: kmap() failed\n", __func__);
++ pr_debug("%s: vmap() failed\n", __func__);
+ return -EFAULT;
+ }
+
+ ret = efi_capsule_update(cap_hdr_temp, cap_info->pages);
+- kunmap(cap_info->pages[0]);
++ vunmap(cap_hdr_temp);
+ if (ret) {
+ pr_err("%s: efi_capsule_update() failed\n", __func__);
+ return ret;
+--- a/drivers/firmware/efi/capsule.c
++++ b/drivers/firmware/efi/capsule.c
+@@ -197,9 +197,9 @@ efi_capsule_update_locked(efi_capsule_he
+ * map the capsule described by @capsule with its data in @pages and
+ * send it to the firmware via the UpdateCapsule() runtime service.
+ *
+- * @capsule must be a virtual mapping of the first page in @pages
+- * (@pages[0]) in the kernel address space. That is, a
+- * capsule_header_t that describes the entire contents of the capsule
++ * @capsule must be a virtual mapping of the complete capsule update in the
++ * kernel address space, as the capsule can be consumed immediately.
++ * A capsule_header_t that describes the entire contents of the capsule
+ * must be at the start of the first data page.
+ *
+ * Even though this function will validate that the firmware supports
diff --git a/patches.arch/qcom-0031-efi-libstub-Allocate-headspace-in-efi_get_memory_map.patch b/patches.arch/qcom-0031-efi-libstub-Allocate-headspace-in-efi_get_memory_map.patch
new file mode 100644
index 0000000000..8b3cedb1bc
--- /dev/null
+++ b/patches.arch/qcom-0031-efi-libstub-Allocate-headspace-in-efi_get_memory_map.patch
@@ -0,0 +1,351 @@
+From: Jeffrey Hugo <jhugo@codeaurora.org>
+Date: Mon, 29 Aug 2016 14:38:51 -0600
+Subject: efi/libstub: Allocate headspace in efi_get_memory_map()
+
+Git-commit: dadb57abc37499f565b23933dbf49b435c3ba8af
+Patch-mainline: v4.8-rc7
+References: fate#320512
+
+efi_get_memory_map() allocates a buffer to store the memory map that it
+retrieves. This buffer may need to be reused by the client after
+ExitBootServices() is called, at which point allocations are not longer
+permitted. To support this usecase, provide the allocated buffer size back
+to the client, and allocate some additional headroom to account for any
+reasonable growth in the map that is likely to happen between the call to
+efi_get_memory_map() and the client reusing the buffer.
+
+Signed-off-by: Jeffrey Hugo <jhugo@codeaurora.org>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/boot/compressed/eboot.c | 18 +++-
+ drivers/firmware/efi/libstub/efi-stub-helper.c | 96 +++++++++++++++++--------
+ drivers/firmware/efi/libstub/fdt.c | 17 +++-
+ include/linux/efi.h | 15 ++-
+ 4 files changed, 100 insertions(+), 46 deletions(-)
+
+--- a/arch/x86/boot/compressed/eboot.c
++++ b/arch/x86/boot/compressed/eboot.c
+@@ -1047,7 +1047,7 @@ static efi_status_t exit_boot(struct boo
+ void *handle, bool is64)
+ {
+ struct efi_info *efi = &boot_params->efi_info;
+- unsigned long map_sz, key, desc_size;
++ unsigned long map_sz, key, desc_size, buff_size;
+ efi_memory_desc_t *mem_map;
+ struct setup_data *e820ext;
+ const char *signature;
+@@ -1058,14 +1058,20 @@ static efi_status_t exit_boot(struct boo
+ bool called_exit = false;
+ u8 nr_entries;
+ int i;
++ struct efi_boot_memmap map;
+
+- nr_desc = 0;
+- e820ext = NULL;
+- e820ext_size = 0;
++ nr_desc = 0;
++ e820ext = NULL;
++ e820ext_size = 0;
++ map.map = &mem_map;
++ map.map_size = &map_sz;
++ map.desc_size = &desc_size;
++ map.desc_ver = &desc_version;
++ map.key_ptr = &key;
++ map.buff_size = &buff_size;
+
+ get_map:
+- status = efi_get_memory_map(sys_table, &mem_map, &map_sz, &desc_size,
+- &desc_version, &key);
++ status = efi_get_memory_map(sys_table, &map);
+
+ if (status != EFI_SUCCESS)
+ return status;
+--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
++++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
+@@ -41,6 +41,8 @@ static unsigned long __chunk_size = EFI_
+ #define EFI_ALLOC_ALIGN EFI_PAGE_SIZE
+ #endif
+
++#define EFI_MMAP_NR_SLACK_SLOTS 8
++
+ struct file_info {
+ efi_file_handle_t *handle;
+ u64 size;
+@@ -63,49 +65,62 @@ void efi_printk(efi_system_table_t *sys_
+ }
+ }
+
++static inline bool mmap_has_headroom(unsigned long buff_size,
++ unsigned long map_size,
++ unsigned long desc_size)
++{
++ unsigned long slack = buff_size - map_size;
++
++ return slack / desc_size >= EFI_MMAP_NR_SLACK_SLOTS;
++}
++
+ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
+- efi_memory_desc_t **map,
+- unsigned long *map_size,
+- unsigned long *desc_size,
+- u32 *desc_ver,
+- unsigned long *key_ptr)
++ struct efi_boot_memmap *map)
+ {
+ efi_memory_desc_t *m = NULL;
+ efi_status_t status;
+ unsigned long key;
+ u32 desc_version;
+
+- *map_size = sizeof(*m) * 32;
++ *map->desc_size = sizeof(*m);
++ *map->map_size = *map->desc_size * 32;
++ *map->buff_size = *map->map_size;
+ again:
+- /*
+- * Add an additional efi_memory_desc_t because we're doing an
+- * allocation which may be in a new descriptor region.
+- */
+- *map_size += sizeof(*m);
+ status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+- *map_size, (void **)&m);
++ *map->map_size, (void **)&m);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+- *desc_size = 0;
++ *map->desc_size = 0;
+ key = 0;
+- status = efi_call_early(get_memory_map, map_size, m,
+- &key, desc_size, &desc_version);
+- if (status == EFI_BUFFER_TOO_SMALL) {
++ status = efi_call_early(get_memory_map, map->map_size, m,
++ &key, map->desc_size, &desc_version);
++ if (status == EFI_BUFFER_TOO_SMALL ||
++ !mmap_has_headroom(*map->buff_size, *map->map_size,
++ *map->desc_size)) {
+ efi_call_early(free_pool, m);
++ /*
++ * Make sure there is some entries of headroom so that the
++ * buffer can be reused for a new map after allocations are
++ * no longer permitted. Its unlikely that the map will grow to
++ * exceed this headroom once we are ready to trigger
++ * ExitBootServices()
++ */
++ *map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS;
++ *map->buff_size = *map->map_size;
+ goto again;
+ }
+
+ if (status != EFI_SUCCESS)
+ efi_call_early(free_pool, m);
+
+- if (key_ptr && status == EFI_SUCCESS)
+- *key_ptr = key;
+- if (desc_ver && status == EFI_SUCCESS)
+- *desc_ver = desc_version;
++ if (map->key_ptr && status == EFI_SUCCESS)
++ *map->key_ptr = key;
++ if (map->desc_ver && status == EFI_SUCCESS)
++ *map->desc_ver = desc_version;
+
+ fail:
+- *map = m;
++ *map->map = m;
+ return status;
+ }
+
+@@ -113,13 +128,20 @@ fail:
+ unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
+ {
+ efi_status_t status;
+- unsigned long map_size;
++ unsigned long map_size, buff_size;
+ unsigned long membase = EFI_ERROR;
+ struct efi_memory_map map;
+ efi_memory_desc_t *md;
++ struct efi_boot_memmap boot_map;
+
+- status = efi_get_memory_map(sys_table_arg, (efi_memory_desc_t **)&map.map,
+- &map_size, &map.desc_size, NULL, NULL);
++ boot_map.map = (efi_memory_desc_t **)&map.map;
++ boot_map.map_size = &map_size;
++ boot_map.desc_size = &map.desc_size;
++ boot_map.desc_ver = NULL;
++ boot_map.key_ptr = NULL;
++ boot_map.buff_size = &buff_size;
++
++ status = efi_get_memory_map(sys_table_arg, &boot_map);
+ if (status != EFI_SUCCESS)
+ return membase;
+
+@@ -144,15 +166,22 @@ efi_status_t efi_high_alloc(efi_system_t
+ unsigned long size, unsigned long align,
+ unsigned long *addr, unsigned long max)
+ {
+- unsigned long map_size, desc_size;
++ unsigned long map_size, desc_size, buff_size;
+ efi_memory_desc_t *map;
+ efi_status_t status;
+ unsigned long nr_pages;
+ u64 max_addr = 0;
+ int i;
++ struct efi_boot_memmap boot_map;
++
++ boot_map.map = &map;
++ boot_map.map_size = &map_size;
++ boot_map.desc_size = &desc_size;
++ boot_map.desc_ver = NULL;
++ boot_map.key_ptr = NULL;
++ boot_map.buff_size = &buff_size;
+
+- status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
+- NULL, NULL);
++ status = efi_get_memory_map(sys_table_arg, &boot_map);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+@@ -230,14 +259,21 @@ efi_status_t efi_low_alloc(efi_system_ta
+ unsigned long size, unsigned long align,
+ unsigned long *addr)
+ {
+- unsigned long map_size, desc_size;
++ unsigned long map_size, desc_size, buff_size;
+ efi_memory_desc_t *map;
+ efi_status_t status;
+ unsigned long nr_pages;
+ int i;
++ struct efi_boot_memmap boot_map;
++
++ boot_map.map = &map;
++ boot_map.map_size = &map_size;
++ boot_map.desc_size = &desc_size;
++ boot_map.desc_ver = NULL;
++ boot_map.key_ptr = NULL;
++ boot_map.buff_size = &buff_size;
+
+- status = efi_get_memory_map(sys_table_arg, &map, &map_size, &desc_size,
+- NULL, NULL);
++ status = efi_get_memory_map(sys_table_arg, &boot_map);
+ if (status != EFI_SUCCESS)
+ goto fail;
+
+--- a/drivers/firmware/efi/libstub/fdt.c
++++ b/drivers/firmware/efi/libstub/fdt.c
+@@ -156,13 +156,21 @@ efi_status_t allocate_new_fdt_and_exit_b
+ unsigned long fdt_addr,
+ unsigned long fdt_size)
+ {
+- unsigned long map_size, desc_size;
++ unsigned long map_size, desc_size, buff_size;
+ u32 desc_ver;
+ unsigned long mmap_key;
+ efi_memory_desc_t *memory_map, *runtime_map;
+ unsigned long new_fdt_size;
+ efi_status_t status;
+ int runtime_entry_count = 0;
++ struct efi_boot_memmap map;
++
++ map.map = &runtime_map;
++ map.map_size = &map_size;
++ map.desc_size = &desc_size;
++ map.desc_ver = &desc_ver;
++ map.key_ptr = &mmap_key;
++ map.buff_size = &buff_size;
+
+ /*
+ * Get a copy of the current memory map that we will use to prepare
+@@ -170,8 +178,7 @@ efi_status_t allocate_new_fdt_and_exit_b
+ * subsequent allocations adding entries, since they could not affect
+ * the number of EFI_MEMORY_RUNTIME regions.
+ */
+- status = efi_get_memory_map(sys_table, &runtime_map, &map_size,
+- &desc_size, &desc_ver, &mmap_key);
++ status = efi_get_memory_map(sys_table, &map);
+ if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table, "Unable to retrieve UEFI memory map.\n");
+ return status;
+@@ -180,6 +187,7 @@ efi_status_t allocate_new_fdt_and_exit_b
+ pr_efi(sys_table,
+ "Exiting boot services and installing virtual address map...\n");
+
++ map.map = &memory_map;
+ /*
+ * Estimate size of new FDT, and allocate memory for it. We
+ * will allocate a bigger buffer if this ends up being too
+@@ -204,8 +212,7 @@ efi_status_t allocate_new_fdt_and_exit_b
+ * we can get the memory map key needed for
+ * exit_boot_services().
+ */
+- status = efi_get_memory_map(sys_table, &memory_map, &map_size,
+- &desc_size, &desc_ver, &mmap_key);
++ status = efi_get_memory_map(sys_table, &map);
+ if (status != EFI_SUCCESS)
+ goto fail_free_new_fdt;
+
+diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c
+index 53f6d3f..0c9f58c 100644
+--- a/drivers/firmware/efi/libstub/random.c
++++ b/drivers/firmware/efi/libstub/random.c
+@@ -73,12 +73,20 @@ efi_status_t efi_random_alloc(efi_system_table_t *sys_table_arg,
+ unsigned long random_seed)
+ {
+ unsigned long map_size, desc_size, total_slots = 0, target_slot;
++ unsigned long buff_size;
+ efi_status_t status;
+ efi_memory_desc_t *memory_map;
+ int map_offset;
++ struct efi_boot_memmap map;
+
+- status = efi_get_memory_map(sys_table_arg, &memory_map, &map_size,
+- &desc_size, NULL, NULL);
++ map.map = &memory_map;
++ map.map_size = &map_size;
++ map.desc_size = &desc_size;
++ map.desc_ver = NULL;
++ map.key_ptr = NULL;
++ map.buff_size = &buff_size;
++
++ status = efi_get_memory_map(sys_table_arg, &map);
+ if (status != EFI_SUCCESS)
+ return status;
+
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 23cd3ce..943fee5 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -146,6 +146,15 @@ typedef struct {
+ u32 imagesize;
+ } efi_capsule_header_t;
+
++struct efi_boot_memmap {
++ efi_memory_desc_t **map;
++ unsigned long *map_size;
++ unsigned long *desc_size;
++ u32 *desc_ver;
++ unsigned long *key_ptr;
++ unsigned long *buff_size;
++};
++
+ /*
+ * EFI capsule flags
+ */
+@@ -1482,11 +1491,7 @@ char *efi_convert_cmdline(efi_system_tab
+ efi_loaded_image_t *image, int *cmd_line_len);
+
+ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
+- efi_memory_desc_t **map,
+- unsigned long *map_size,
+- unsigned long *desc_size,
+- u32 *desc_ver,
+- unsigned long *key_ptr);
++ struct efi_boot_memmap *map);
+
+ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
+ unsigned long size, unsigned long align,
diff --git a/patches.arch/qcom-0032-efi-libstub-Introduce-ExitBootServices-helper.patch b/patches.arch/qcom-0032-efi-libstub-Introduce-ExitBootServices-helper.patch
new file mode 100644
index 0000000000..f9e70c2aec
--- /dev/null
+++ b/patches.arch/qcom-0032-efi-libstub-Introduce-ExitBootServices-helper.patch
@@ -0,0 +1,135 @@
+From: Jeffrey Hugo <jhugo@codeaurora.org>
+Date: Mon, 29 Aug 2016 14:38:52 -0600
+Subject: efi/libstub: Introduce ExitBootServices helper
+
+Git-commit: fc07716ba803483be91bc4b2344f9c84985e6f07
+Patch-mainline: v4.8-rc7
+References: fate#320512
+
+The spec allows ExitBootServices to fail with EFI_INVALID_PARAMETER if a
+race condition has occurred where the EFI has updated the memory map after
+the stub grabbed a reference to the map. The spec defines a retry
+proceedure with specific requirements to handle this scenario.
+
+This scenario was previously observed on x86 - commit d3768d885c6c ("x86,
+efi: retry ExitBootServices() on failure") but the current fix is not spec
+compliant and the scenario is now observed on the Qualcomm Technologies
+QDF2432 via the FDT stub which does not handle the error and thus causes
+boot failures. The user will notice the boot failure as the kernel is not
+executed and the system may drop back to a UEFI shell, but will be
+unresponsive to input and the system will require a power cycle to recover.
+
+Add a helper to the stub library that correctly adheres to the spec in the
+case of EFI_INVALID_PARAMETER from ExitBootServices and can be universally
+used across all stub implementations.
+
+Signed-off-by: Jeffrey Hugo <jhugo@codeaurora.org>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/libstub/efi-stub-helper.c | 73 +++++++++++++++++++++++++
+ include/linux/efi.h | 11 +++
+ 2 files changed, 84 insertions(+)
+
+--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
++++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
+@@ -735,3 +735,76 @@ char *efi_convert_cmdline(efi_system_tab
+ *cmd_line_len = options_bytes;
+ return (char *)cmdline_addr;
+ }
++
++/*
++ * Handle calling ExitBootServices according to the requirements set out by the
++ * spec. Obtains the current memory map, and returns that info after calling
++ * ExitBootServices. The client must specify a function to perform any
++ * processing of the memory map data prior to ExitBootServices. A client
++ * specific structure may be passed to the function via priv. The client
++ * function may be called multiple times.
++ */
++efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table_arg,
++ void *handle,
++ struct efi_boot_memmap *map,
++ void *priv,
++ efi_exit_boot_map_processing priv_func)
++{
++ efi_status_t status;
++
++ status = efi_get_memory_map(sys_table_arg, map);
++
++ if (status != EFI_SUCCESS)
++ goto fail;
++
++ status = priv_func(sys_table_arg, map, priv);
++ if (status != EFI_SUCCESS)
++ goto free_map;
++
++ status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
++
++ if (status == EFI_INVALID_PARAMETER) {
++ /*
++ * The memory map changed between efi_get_memory_map() and
++ * exit_boot_services(). Per the UEFI Spec v2.6, Section 6.4:
++ * EFI_BOOT_SERVICES.ExitBootServices we need to get the
++ * updated map, and try again. The spec implies one retry
++ * should be sufficent, which is confirmed against the EDK2
++ * implementation. Per the spec, we can only invoke
++ * get_memory_map() and exit_boot_services() - we cannot alloc
++ * so efi_get_memory_map() cannot be used, and we must reuse
++ * the buffer. For all practical purposes, the headroom in the
++ * buffer should account for any changes in the map so the call
++ * to get_memory_map() is expected to succeed here.
++ */
++ *map->map_size = *map->buff_size;
++ status = efi_call_early(get_memory_map,
++ map->map_size,
++ *map->map,
++ map->key_ptr,
++ map->desc_size,
++ map->desc_ver);
++
++ /* exit_boot_services() was called, thus cannot free */
++ if (status != EFI_SUCCESS)
++ goto fail;
++
++ status = priv_func(sys_table_arg, map, priv);
++ /* exit_boot_services() was called, thus cannot free */
++ if (status != EFI_SUCCESS)
++ goto fail;
++
++ status = efi_call_early(exit_boot_services, handle, *map->key_ptr);
++ }
++
++ /* exit_boot_services() was called, thus cannot free */
++ if (status != EFI_SUCCESS)
++ goto fail;
++
++ return EFI_SUCCESS;
++
++free_map:
++ efi_call_early(free_pool, *map->map);
++fail:
++ return status;
++}
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -1522,4 +1522,15 @@ efi_status_t efi_setup_gop(efi_system_ta
+ unsigned long size);
+
+ bool efi_runtime_disabled(void);
++
++typedef efi_status_t (*efi_exit_boot_map_processing)(
++ efi_system_table_t *sys_table_arg,
++ struct efi_boot_memmap *map,
++ void *priv);
++
++efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
++ void *handle,
++ struct efi_boot_memmap *map,
++ void *priv,
++ efi_exit_boot_map_processing priv_func);
+ #endif /* _LINUX_EFI_H */
diff --git a/patches.arch/qcom-0033-efi-libstub-Use-efi_exit_boot_services-in-FDT.patch b/patches.arch/qcom-0033-efi-libstub-Use-efi_exit_boot_services-in-FDT.patch
new file mode 100644
index 0000000000..c19c0861f6
--- /dev/null
+++ b/patches.arch/qcom-0033-efi-libstub-Use-efi_exit_boot_services-in-FDT.patch
@@ -0,0 +1,88 @@
+From: Jeffrey Hugo <jhugo@codeaurora.org>
+Date: Mon, 29 Aug 2016 14:38:53 -0600
+Subject: efi/libstub: Use efi_exit_boot_services() in FDT
+
+Git-commit: ed9cc156c42ff0c0bf9b1d09df48a12bf0873473
+Patch-mainline: v4.8-rc7
+References: fate#320512
+
+The FDT code directly calls ExitBootServices. This is inadvisable as the
+UEFI spec details a complex set of errors, race conditions, and API
+interactions that the caller of ExitBootServices must get correct. The
+FDT code does not handle EFI_INVALID_PARAMETER as required by the spec,
+which causes intermittent boot failures on the Qualcomm Technologies
+QDF2432. Call the efi_exit_boot_services() helper intead, which handles
+the EFI_INVALID_PARAMETER scenario properly.
+
+Signed-off-by: Jeffrey Hugo <jhugo@codeaurora.org>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/libstub/fdt.c | 37 +++++++++++++++++++++++++++----------
+ 1 file changed, 27 insertions(+), 10 deletions(-)
+
+--- a/drivers/firmware/efi/libstub/fdt.c
++++ b/drivers/firmware/efi/libstub/fdt.c
+@@ -152,6 +152,27 @@ fdt_set_fail:
+ #define EFI_FDT_ALIGN EFI_PAGE_SIZE
+ #endif
+
++struct exit_boot_struct {
++ efi_memory_desc_t *runtime_map;
++ int *runtime_entry_count;
++};
++
++static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
++ struct efi_boot_memmap *map,
++ void *priv)
++{
++ struct exit_boot_struct *p = priv;
++ /*
++ * Update the memory map with virtual addresses. The function will also
++ * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
++ * entries so that we can pass it straight to SetVirtualAddressMap()
++ */
++ efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
++ p->runtime_map, p->runtime_entry_count);
++
++ return EFI_SUCCESS;
++}
++
+ /*
+ * Allocate memory for a new FDT, then add EFI, commandline, and
+ * initrd related fields to the FDT. This routine increases the
+@@ -183,6 +204,7 @@ efi_status_t allocate_new_fdt_and_exit_b
+ efi_status_t status;
+ int runtime_entry_count = 0;
+ struct efi_boot_memmap map;
++ struct exit_boot_struct priv;
+
+ map.map = &runtime_map;
+ map.map_size = &map_size;
+@@ -257,16 +279,11 @@ efi_status_t allocate_new_fdt_and_exit_b
+ }
+ }
+
+- /*
+- * Update the memory map with virtual addresses. The function will also
+- * populate @runtime_map with copies of just the EFI_MEMORY_RUNTIME
+- * entries so that we can pass it straight into SetVirtualAddressMap()
+- */
+- efi_get_virtmap(memory_map, map_size, desc_size, runtime_map,
+- &runtime_entry_count);
+-
+- /* Now we are ready to exit_boot_services.*/
+- status = sys_table->boottime->exit_boot_services(handle, mmap_key);
++ sys_table->boottime->free_pool(memory_map);
++ priv.runtime_map = runtime_map;
++ priv.runtime_entry_count = &runtime_entry_count;
++ status = efi_exit_boot_services(sys_table, handle, &map, &priv,
++ exit_boot_func);
+
+ if (status == EFI_SUCCESS) {
+ efi_set_virtual_address_map_t *svam;
diff --git a/patches.arch/qcom-0034-x86-efi-Use-efi_exit_boot_services.patch b/patches.arch/qcom-0034-x86-efi-Use-efi_exit_boot_services.patch
new file mode 100644
index 0000000000..234810c268
--- /dev/null
+++ b/patches.arch/qcom-0034-x86-efi-Use-efi_exit_boot_services.patch
@@ -0,0 +1,192 @@
+From: Jeffrey Hugo <jhugo@codeaurora.org>
+Date: Mon, 29 Aug 2016 14:38:54 -0600
+Subject: x86/efi: Use efi_exit_boot_services()
+
+Git-commit: d64934019f6cc39202e2f78063709f61ca5cb364
+Patch-mainline: v4.8-rc7
+References: fate#320512
+
+The eboot code directly calls ExitBootServices. This is inadvisable as the
+UEFI spec details a complex set of errors, race conditions, and API
+interactions that the caller of ExitBootServices must get correct. The
+eboot code attempts allocations after calling ExitBootSerives which is
+not permitted per the spec. Call the efi_exit_boot_services() helper
+intead, which handles the allocation scenario properly.
+
+Signed-off-by: Jeffrey Hugo <jhugo@codeaurora.org>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/boot/compressed/eboot.c | 134 +++++++++++++++++++--------------------
+ 1 file changed, 66 insertions(+), 68 deletions(-)
+
+--- a/arch/x86/boot/compressed/eboot.c
++++ b/arch/x86/boot/compressed/eboot.c
+@@ -1043,85 +1043,87 @@ static efi_status_t alloc_e820ext(u32 nr
+ return status;
+ }
+
++struct exit_boot_struct {
++ struct boot_params *boot_params;
++ struct efi_info *efi;
++ struct setup_data *e820ext;
++ __u32 e820ext_size;
++ bool is64;
++};
++
++static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
++ struct efi_boot_memmap *map,
++ void *priv)
++{
++ static bool first = true;
++ const char *signature;
++ __u32 nr_desc;
++ efi_status_t status;
++ struct exit_boot_struct *p = priv;
++
++ if (first) {
++ nr_desc = *map->buff_size / *map->desc_size;
++ if (nr_desc > ARRAY_SIZE(p->boot_params->e820_map)) {
++ u32 nr_e820ext = nr_desc -
++ ARRAY_SIZE(p->boot_params->e820_map);
++
++ status = alloc_e820ext(nr_e820ext, &p->e820ext,
++ &p->e820ext_size);
++ if (status != EFI_SUCCESS)
++ return status;
++ }
++ first = false;
++ }
++
++ signature = p->is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
++ memcpy(&p->efi->efi_loader_signature, signature, sizeof(__u32));
++
++ p->efi->efi_systab = (unsigned long)sys_table_arg;
++ p->efi->efi_memdesc_size = *map->desc_size;
++ p->efi->efi_memdesc_version = *map->desc_ver;
++ p->efi->efi_memmap = (unsigned long)*map->map;
++ p->efi->efi_memmap_size = *map->map_size;
++
++#ifdef CONFIG_X86_64
++ p->efi->efi_systab_hi = (unsigned long)sys_table_arg >> 32;
++ p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32;
++#endif
++
++ return EFI_SUCCESS;
++}
++
+ static efi_status_t exit_boot(struct boot_params *boot_params,
+ void *handle, bool is64)
+ {
+- struct efi_info *efi = &boot_params->efi_info;
+ unsigned long map_sz, key, desc_size, buff_size;
+ efi_memory_desc_t *mem_map;
+ struct setup_data *e820ext;
+- const char *signature;
+ __u32 e820ext_size;
+- __u32 nr_desc, prev_nr_desc;
+ efi_status_t status;
+ __u32 desc_version;
+- bool called_exit = false;
+- u8 nr_entries;
+- int i;
+ struct efi_boot_memmap map;
++ struct exit_boot_struct priv;
+
+- nr_desc = 0;
+- e820ext = NULL;
+- e820ext_size = 0;
+- map.map = &mem_map;
+- map.map_size = &map_sz;
+- map.desc_size = &desc_size;
+- map.desc_ver = &desc_version;
+- map.key_ptr = &key;
+- map.buff_size = &buff_size;
+-
+-get_map:
+- status = efi_get_memory_map(sys_table, &map);
++ map.map = &mem_map;
++ map.map_size = &map_sz;
++ map.desc_size = &desc_size;
++ map.desc_ver = &desc_version;
++ map.key_ptr = &key;
++ map.buff_size = &buff_size;
++ priv.boot_params = boot_params;
++ priv.efi = &boot_params->efi_info;
++ priv.e820ext = NULL;
++ priv.e820ext_size = 0;
++ priv.is64 = is64;
+
++ /* Might as well exit boot services now */
++ status = efi_exit_boot_services(sys_table, handle, &map, &priv,
++ exit_boot_func);
+ if (status != EFI_SUCCESS)
+ return status;
+
+- prev_nr_desc = nr_desc;
+- nr_desc = map_sz / desc_size;
+- if (nr_desc > prev_nr_desc &&
+- nr_desc > ARRAY_SIZE(boot_params->e820_map)) {
+- u32 nr_e820ext = nr_desc - ARRAY_SIZE(boot_params->e820_map);
+-
+- status = alloc_e820ext(nr_e820ext, &e820ext, &e820ext_size);
+- if (status != EFI_SUCCESS)
+- goto free_mem_map;
+-
+- efi_call_early(free_pool, mem_map);
+- goto get_map; /* Allocated memory, get map again */
+- }
+-
+- signature = is64 ? EFI64_LOADER_SIGNATURE : EFI32_LOADER_SIGNATURE;
+- memcpy(&efi->efi_loader_signature, signature, sizeof(__u32));
+-
+- efi->efi_systab = (unsigned long)sys_table;
+- efi->efi_memdesc_size = desc_size;
+- efi->efi_memdesc_version = desc_version;
+- efi->efi_memmap = (unsigned long)mem_map;
+- efi->efi_memmap_size = map_sz;
+-
+-#ifdef CONFIG_X86_64
+- efi->efi_systab_hi = (unsigned long)sys_table >> 32;
+- efi->efi_memmap_hi = (unsigned long)mem_map >> 32;
+-#endif
+-
+- /* Might as well exit boot services now */
+- status = efi_call_early(exit_boot_services, handle, key);
+- if (status != EFI_SUCCESS) {
+- /*
+- * ExitBootServices() will fail if any of the event
+- * handlers change the memory map. In which case, we
+- * must be prepared to retry, but only once so that
+- * we're guaranteed to exit on repeated failures instead
+- * of spinning forever.
+- */
+- if (called_exit)
+- goto free_mem_map;
+-
+- called_exit = true;
+- efi_call_early(free_pool, mem_map);
+- goto get_map;
+- }
+-
++ e820ext = priv.e820ext;
++ e820ext_size = priv.e820ext_size;
+ /* Historic? */
+ boot_params->alt_mem_k = 32 * 1024;
+
+@@ -1130,10 +1132,6 @@ get_map:
+ return status;
+
+ return EFI_SUCCESS;
+-
+-free_mem_map:
+- efi_call_early(free_pool, mem_map);
+- return status;
+ }
+
+ #ifdef CONFIG_HIBERNATE_VERIFICATION
diff --git a/patches.arch/qcom-0035-x86-efi-Test-for-EFI_MEMMAP-functionality-when-itera.patch b/patches.arch/qcom-0035-x86-efi-Test-for-EFI_MEMMAP-functionality-when-itera.patch
new file mode 100644
index 0000000000..cb21db98a3
--- /dev/null
+++ b/patches.arch/qcom-0035-x86-efi-Test-for-EFI_MEMMAP-functionality-when-itera.patch
@@ -0,0 +1,70 @@
+From: Matt Fleming <matt@codeblueprint.co.uk>
+Date: Tue, 21 Jun 2016 23:11:38 +0100
+Subject: x86/efi: Test for EFI_MEMMAP functionality when iterating EFI memmap
+
+Git-commit: 4971531af319f8bdd9a81a87eecfb6b19f2f8c8e
+Patch-mainline: v4.9-rc1
+References: fate#320512
+
+Both efi_find_mirror() and efi_fake_memmap() really want to know
+whether the EFI memory map is available, not just whether the machine
+was booted using EFI. efi_fake_memmap() even has a check for
+EFI_MEMMAP at the start of the function.
+
+Since we've already got other code that has this dependency, merge
+everything under one if() conditional, and remove the now superfluous
+check from efi_fake_memmap().
+
+Tested-by: Dave Young <dyoung@redhat.com> [kexec/kdump]
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> [arm]
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Taku Izumi <izumi.taku@jp.fujitsu.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Xishi Qiu <qiuxishi@huawei.com>
+Cc: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/kernel/setup.c | 16 ++++++++--------
+ drivers/firmware/efi/fake_mem.c | 2 +-
+ 2 files changed, 9 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -1106,17 +1106,17 @@ void __init setup_arch(char **cmdline_p)
+ memblock_set_current_limit(ISA_END_ADDRESS);
+ memblock_x86_fill();
+
+- if (efi_enabled(EFI_BOOT)) {
++ reserve_bios_regions();
++
++ if (efi_enabled(EFI_MEMMAP)) {
+ efi_fake_memmap();
+ efi_find_mirror();
+- }
+-
+- /*
+- * The EFI specification says that boot service code won't be called
+- * after ExitBootServices(). This is, in fact, a lie.
+- */
+- if (efi_enabled(EFI_MEMMAP))
++ /*
++ * The EFI specification says that boot service code won't be
++ * called after ExitBootServices(). This is, in fact, a lie.
++ */
+ efi_reserve_boot_services();
++ }
+
+ /* preallocate 4k for mptable mpc */
+ early_reserve_e820_mpc_new();
+--- a/drivers/firmware/efi/fake_mem.c
++++ b/drivers/firmware/efi/fake_mem.c
+@@ -64,7 +64,7 @@ void __init efi_fake_memmap(void)
+ void *old, *new;
+ int i;
+
+- if (!nr_fake_mem || !efi_enabled(EFI_MEMMAP))
++ if (!nr_fake_mem)
+ return;
+
+ /* count up the number of EFI memory descriptor */
diff --git a/patches.arch/qcom-0036-efi-Refactor-efi_memmap_init_early-into-arch-neutral.patch b/patches.arch/qcom-0036-efi-Refactor-efi_memmap_init_early-into-arch-neutral.patch
new file mode 100644
index 0000000000..d7f46f71e3
--- /dev/null
+++ b/patches.arch/qcom-0036-efi-Refactor-efi_memmap_init_early-into-arch-neutral.patch
@@ -0,0 +1,383 @@
+From: Matt Fleming <matt@codeblueprint.co.uk>
+Date: Fri, 26 Feb 2016 21:22:05 +0000
+Subject: efi: Refactor efi_memmap_init_early() into arch-neutral code
+Git-commit: 9479c7cebfb568f8b8b424be7f1cac120e9eea95
+Patch-mainline: v4.9-rc1
+References: fate#320512
+
+Every EFI architecture apart from ia64 needs to setup the EFI memory
+map at efi.memmap, and the code for doing that is essentially the same
+across all implementations. Therefore, it makes sense to factor this
+out into the common code under drivers/firmware/efi/.
+
+The only slight variation is the data structure out of which we pull
+the initial memory map information, such as physical address, memory
+descriptor size and version, etc. We can address this by passing a
+generic data structure (struct efi_memory_map_data) as the argument to
+efi_memmap_init_early() which contains the minimum info required for
+initialising the memory map.
+
+In the process, this patch also fixes a few undesirable implementation
+differences:
+
+ - ARM and arm64 were failing to clear the EFI_MEMMAP bit when
+ unmapping the early EFI memory map. EFI_MEMMAP indicates whether
+ the EFI memory map is mapped (not the regions contained within) and
+ can be traversed. It's more correct to set the bit as soon as we
+ memremap() the passed in EFI memmap.
+
+ - Rename efi_unmmap_memmap() to efi_memmap_unmap() to adhere to the
+ regular naming scheme.
+
+This patch also uses a read-write mapping for the memory map instead
+of the read-only mapping currently used on ARM and arm64. x86 needs
+the ability to update the memory map in-place when assigning virtual
+addresses to regions (efi_map_region()) and tagging regions when
+reserving boot services (efi_reserve_boot_services()).
+
+There's no way for the generic fake_mem code to know which mapping to
+use without introducing some arch-specific constant/hook, so just use
+read-write since read-only is of dubious value for the EFI memory map.
+
+Tested-by: Dave Young <dyoung@redhat.com> [kexec/kdump]
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> [arm]
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Peter Jones <pjones@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/include/asm/efi.h | 1
+ arch/x86/platform/efi/efi.c | 66 +++++++++----------------------------
+ arch/x86/platform/efi/quirks.c | 4 +-
+ drivers/firmware/efi/arm-init.c | 17 ++++-----
+ drivers/firmware/efi/arm-runtime.c | 2 -
+ drivers/firmware/efi/efi.c | 46 +++++++++++++++++++++++++
+ drivers/firmware/efi/fake_mem.c | 15 +++++---
+ include/linux/efi.h | 16 ++++++++
+ 8 files changed, 99 insertions(+), 68 deletions(-)
+
+--- a/arch/x86/include/asm/efi.h
++++ b/arch/x86/include/asm/efi.h
+@@ -120,7 +120,6 @@ extern int __init efi_memblock_x86_reser
+ extern pgd_t * __init efi_call_phys_prolog(void);
+ extern void __init efi_call_phys_epilog(pgd_t *save_pgd);
+ extern void __init efi_print_memmap(void);
+-extern void __init efi_unmap_memmap(void);
+ extern void __init efi_memory_uc(u64 addr, unsigned long size);
+ extern void __init efi_map_region(efi_memory_desc_t *md);
+ extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -190,7 +190,9 @@ static void __init do_add_efi_memmap(voi
+ int __init efi_memblock_x86_reserve_range(void)
+ {
+ struct efi_info *e = &boot_params.efi_info;
++ struct efi_memory_map_data data;
+ phys_addr_t pmap;
++ int rv;
+
+ if (efi_enabled(EFI_PARAVIRT))
+ return 0;
+@@ -205,11 +207,17 @@ int __init efi_memblock_x86_reserve_rang
+ #else
+ pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
+ #endif
+- efi.memmap.phys_map = pmap;
+- efi.memmap.nr_map = e->efi_memmap_size /
+- e->efi_memdesc_size;
+- efi.memmap.desc_size = e->efi_memdesc_size;
+- efi.memmap.desc_version = e->efi_memdesc_version;
++ data.phys_map = pmap;
++ data.size = e->efi_memmap_size;
++ data.desc_size = e->efi_memdesc_size;
++ data.desc_version = e->efi_memdesc_version;
++
++ rv = efi_memmap_init_early(&data);
++ if (rv)
++ return rv;
++
++ if (add_efi_memmap)
++ do_add_efi_memmap();
+
+ memblock_reserve(pmap, efi.memmap.nr_map * efi.memmap.desc_size);
+
+@@ -234,19 +242,6 @@ void __init efi_print_memmap(void)
+ #endif /* EFI_DEBUG */
+ }
+
+-void __init efi_unmap_memmap(void)
+-{
+- unsigned long size;
+-
+- clear_bit(EFI_MEMMAP, &efi.flags);
+-
+- size = efi.memmap.nr_map * efi.memmap.desc_size;
+- if (efi.memmap.map) {
+- early_memunmap(efi.memmap.map, size);
+- efi.memmap.map = NULL;
+- }
+-}
+-
+ static int __init efi_systab_init(void *phys)
+ {
+ if (efi_enabled(EFI_64BIT)) {
+@@ -432,33 +427,6 @@ static int __init efi_runtime_init(void)
+ return 0;
+ }
+
+-static int __init efi_memmap_init(void)
+-{
+- unsigned long addr, size;
+-
+- if (efi_enabled(EFI_PARAVIRT))
+- return 0;
+-
+- /* Map the EFI memory map */
+- size = efi.memmap.nr_map * efi.memmap.desc_size;
+- addr = (unsigned long)efi.memmap.phys_map;
+-
+- efi.memmap.map = early_memremap(addr, size);
+- if (efi.memmap.map == NULL) {
+- pr_err("Could not map the memory map!\n");
+- return -ENOMEM;
+- }
+-
+- efi.memmap.map_end = efi.memmap.map + size;
+-
+- if (add_efi_memmap)
+- do_add_efi_memmap();
+-
+- set_bit(EFI_MEMMAP, &efi.flags);
+-
+- return 0;
+-}
+-
+ void __init efi_init(void)
+ {
+ efi_char16_t *c16;
+@@ -516,11 +484,11 @@ void __init efi_init(void)
+ if (!efi_runtime_supported())
+ pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
+ else {
+- if (efi_runtime_disabled() || efi_runtime_init())
++ if (efi_runtime_disabled() || efi_runtime_init()) {
++ efi_memmap_unmap();
+ return;
++ }
+ }
+- if (efi_memmap_init())
+- return;
+
+ if (efi_enabled(EFI_DBG))
+ efi_print_memmap();
+@@ -821,7 +789,7 @@ static void __init kexec_enter_virtual_m
+ * non-native EFI
+ */
+ if (!efi_is_native()) {
+- efi_unmap_memmap();
++ efi_memmap_unmap();
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return;
+ }
+--- a/arch/x86/platform/efi/quirks.c
++++ b/arch/x86/platform/efi/quirks.c
+@@ -232,7 +232,7 @@ void __init efi_free_boot_services(void)
+ free_bootmem_late(start, size);
+ }
+
+- efi_unmap_memmap();
++ efi_memmap_unmap();
+ }
+
+ /*
+@@ -300,7 +300,7 @@ void __init efi_apply_memmap_quirks(void
+ */
+ if (!efi_runtime_supported()) {
+ pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
+- efi_unmap_memmap();
++ efi_memmap_unmap();
+ }
+
+ /*
+--- a/drivers/firmware/efi/arm-init.c
++++ b/drivers/firmware/efi/arm-init.c
+@@ -179,12 +179,11 @@ static __init void reserve_regions(void)
+ if (efi_enabled(EFI_DBG))
+ pr_cont("\n");
+ }
+-
+- set_bit(EFI_MEMMAP, &efi.flags);
+ }
+
+ void __init efi_init(void)
+ {
++ struct efi_memory_map_data data;
+ struct efi_fdt_params params;
+
+ /* Grab UEFI information placed in FDT by stub */
+@@ -193,9 +192,12 @@ void __init efi_init(void)
+
+ efi_system_table = params.system_table;
+
+- efi.memmap.phys_map = params.mmap;
+- efi.memmap.map = early_memremap_ro(params.mmap, params.mmap_size);
+- if (efi.memmap.map == NULL) {
++ data.desc_version = params.desc_ver;
++ data.desc_size = params.desc_size;
++ data.size = params.mmap_size;
++ data.phys_map = params.mmap;
++
++ if (efi_memmap_init_early(&data) < 0) {
+ /*
+ * If we are booting via UEFI, the UEFI memory map is the only
+ * description of memory we have, so there is little point in
+@@ -203,16 +205,13 @@ void __init efi_init(void)
+ */
+ panic("Unable to map EFI memory map.\n");
+ }
+- efi.memmap.map_end = efi.memmap.map + params.mmap_size;
+- efi.memmap.desc_size = params.desc_size;
+- efi.memmap.desc_version = params.desc_ver;
+
+ if (uefi_init() < 0)
+ return;
+
+ reserve_regions();
+ efi_memattr_init();
+- early_memunmap(efi.memmap.map, params.mmap_size);
++ efi_memmap_unmap();
+ memblock_reserve(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size +
+ (params.mmap & ~PAGE_MASK)));
+--- a/drivers/firmware/efi/arm-runtime.c
++++ b/drivers/firmware/efi/arm-runtime.c
+@@ -109,7 +109,7 @@ static int __init arm_enable_runtime_ser
+
+ pr_info("Remapping and enabling EFI services.\n");
+
+- mapsize = efi.memmap.map_end - efi.memmap.map;
++ mapsize = efi.memmap.desc_size * efi.memmap.nr_map;
+
+ efi.memmap.map = memremap(efi.memmap.phys_map, mapsize, MEMREMAP_WB);
+ if (!efi.memmap.map) {
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -447,6 +447,52 @@ int __init efi_config_init(efi_config_ta
+ return ret;
+ }
+
++/**
++ * efi_memmap_init_early - Map the EFI memory map data structure
++ * @data: EFI memory map data
++ *
++ * Use early_memremap() to map the passed in EFI memory map and assign
++ * it to efi.memmap.
++ */
++int __init efi_memmap_init_early(struct efi_memory_map_data *data)
++{
++ struct efi_memory_map map;
++
++ if (efi_enabled(EFI_PARAVIRT))
++ return 0;
++
++ map.phys_map = data->phys_map;
++
++ map.map = early_memremap(data->phys_map, data->size);
++ if (!map.map) {
++ pr_err("Could not map the memory map!\n");
++ return -ENOMEM;
++ }
++
++ map.nr_map = data->size / data->desc_size;
++ map.map_end = map.map + data->size;
++
++ map.desc_version = data->desc_version;
++ map.desc_size = data->desc_size;
++
++ set_bit(EFI_MEMMAP, &efi.flags);
++
++ efi.memmap = map;
++
++ return 0;
++}
++
++void __init efi_memmap_unmap(void)
++{
++ unsigned long size;
++
++ size = efi.memmap.desc_size * efi.memmap.nr_map;
++
++ early_memunmap(efi.memmap.map, size);
++ efi.memmap.map = NULL;
++ clear_bit(EFI_MEMMAP, &efi.flags);
++}
++
+ #ifdef CONFIG_EFI_VARS_MODULE
+ static int __init efi_load_efivars(void)
+ {
+--- a/drivers/firmware/efi/fake_mem.c
++++ b/drivers/firmware/efi/fake_mem.c
+@@ -57,6 +57,7 @@ static int __init cmp_fake_mem(const voi
+ void __init efi_fake_memmap(void)
+ {
+ u64 start, end, m_start, m_end, m_attr;
++ struct efi_memory_map_data data;
+ int new_nr_map = efi.memmap.nr_map;
+ efi_memory_desc_t *md;
+ phys_addr_t new_memmap_phy;
+@@ -180,12 +181,14 @@ void __init efi_fake_memmap(void)
+ }
+
+ /* swap into new EFI memmap */
+- efi_unmap_memmap();
+- efi.memmap.map = new_memmap;
+- efi.memmap.phys_map = new_memmap_phy;
+- efi.memmap.nr_map = new_nr_map;
+- efi.memmap.map_end = efi.memmap.map + efi.memmap.nr_map * efi.memmap.desc_size;
+- set_bit(EFI_MEMMAP, &efi.flags);
++ early_memunmap(new_memmap, efi.memmap.desc_size * new_nr_map);
++ efi_memmap_unmap();
++
++ data.phys_map = new_memmap_phy;
++ data.size = efi.memmap.desc_size * new_nr_map;
++ data.desc_version = efi.memmap.desc_version;
++ data.desc_size = efi.memmap.desc_size;
++ efi_memmap_init_early(&data);
+
+ /* print new EFI memmap */
+ efi_print_memmap();
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -760,6 +760,18 @@ typedef struct {
+ unsigned long tables;
+ } efi_system_table_t;
+
++/*
++ * Architecture independent structure for describing a memory map for the
++ * benefit of efi_memmap_init_early(), saving us the need to pass four
++ * parameters.
++ */
++struct efi_memory_map_data {
++ phys_addr_t phys_map;
++ unsigned long size;
++ unsigned long desc_version;
++ unsigned long desc_size;
++};
++
+ struct efi_memory_map {
+ phys_addr_t phys_map;
+ void *map;
+@@ -1033,6 +1045,10 @@ static inline efi_status_t efi_query_var
+ }
+ #endif
+ extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
++
++extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
++extern void __init efi_memmap_unmap(void);
++
+ extern int efi_config_init(efi_config_table_type_t *arch_tables);
+ #ifdef CONFIG_EFI_ESRT
+ extern void __init efi_esrt_init(void);
diff --git a/patches.arch/qcom-0037-x86-efi-Consolidate-region-mapping-logic.patch b/patches.arch/qcom-0037-x86-efi-Consolidate-region-mapping-logic.patch
new file mode 100644
index 0000000000..ddf2fbaa1a
--- /dev/null
+++ b/patches.arch/qcom-0037-x86-efi-Consolidate-region-mapping-logic.patch
@@ -0,0 +1,130 @@
+From: Matt Fleming <matt@codeblueprint.co.uk>
+Date: Mon, 20 Jun 2016 14:36:51 +0100
+Subject: x86/efi: Consolidate region mapping logic
+
+Git-commit: ab72a27da4c6c19b0e3d6d7556fdd4afb581c8ac
+Patch-mainline: v4.9-rc1
+References: fate#320512
+
+EFI regions are currently mapped in two separate places. The bulk of
+the work is done in efi_map_regions() but when CONFIG_EFI_MIXED is
+enabled the additional regions that are required when operating in
+mixed mode are mapping in efi_setup_page_tables().
+
+Pull everything into efi_map_regions() and refactor the test for
+which regions should be mapped into a should_map_region() function.
+Generously sprinkle comments to clarify the different cases.
+
+Acked-by: Borislav Petkov <bp@suse.de>
+Tested-by: Dave Young <dyoung@redhat.com> [kexec/kdump]
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> [arm]
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/platform/efi/efi.c | 50 +++++++++++++++++++++++++++++++++++------
+ arch/x86/platform/efi/efi_64.c | 20 ----------------
+ 2 files changed, 43 insertions(+), 27 deletions(-)
+
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -763,6 +763,46 @@ static void *efi_map_next_entry(void *en
+ return entry;
+ }
+
++static bool should_map_region(efi_memory_desc_t *md)
++{
++ /*
++ * Runtime regions always require runtime mappings (obviously).
++ */
++ if (md->attribute & EFI_MEMORY_RUNTIME)
++ return true;
++
++ /*
++ * 32-bit EFI doesn't suffer from the bug that requires us to
++ * reserve boot services regions, and mixed mode support
++ * doesn't exist for 32-bit kernels.
++ */
++ if (IS_ENABLED(CONFIG_X86_32))
++ return false;
++
++ /*
++ * Map all of RAM so that we can access arguments in the 1:1
++ * mapping when making EFI runtime calls.
++ */
++ if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_is_native()) {
++ if (md->type == EFI_CONVENTIONAL_MEMORY ||
++ md->type == EFI_LOADER_DATA ||
++ md->type == EFI_LOADER_CODE)
++ return true;
++ }
++
++ /*
++ * Map boot services regions as a workaround for buggy
++ * firmware that accesses them even when they shouldn't.
++ *
++ * See efi_{reserve,free}_boot_services().
++ */
++ if (md->type == EFI_BOOT_SERVICES_CODE ||
++ md->type == EFI_BOOT_SERVICES_DATA)
++ return true;
++
++ return false;
++}
++
+ /*
+ * Map the efi memory ranges of the runtime services and update new_mmap with
+ * virtual addresses.
+@@ -780,13 +820,9 @@ static void * __init efi_map_regions(int
+ p = NULL;
+ while ((p = efi_map_next_entry(p))) {
+ md = p;
+- if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
+-#ifdef CONFIG_X86_64
+- if (md->type != EFI_BOOT_SERVICES_CODE &&
+- md->type != EFI_BOOT_SERVICES_DATA)
+-#endif
+- continue;
+- }
++
++ if (!should_map_region(md))
++ continue;
+
+ efi_map_region(md);
+ get_systab_virt_addr(md);
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -211,7 +211,6 @@ void efi_sync_low_kernel_mappings(void)
+ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+ {
+ unsigned long text;
+- efi_memory_desc_t *md;
+ struct page *page;
+ unsigned npages;
+ pgd_t *pgd;
+@@ -244,25 +243,6 @@ int __init efi_setup_page_tables(unsigne
+ if (!IS_ENABLED(CONFIG_EFI_MIXED))
+ return 0;
+
+- /*
+- * Map all of RAM so that we can access arguments in the 1:1
+- * mapping when making EFI runtime calls.
+- */
+- for_each_efi_memory_desc(md) {
+- if (md->type != EFI_CONVENTIONAL_MEMORY &&
+- md->type != EFI_LOADER_DATA &&
+- md->type != EFI_LOADER_CODE)
+- continue;
+-
+- pfn = md->phys_addr >> PAGE_SHIFT;
+- npages = md->num_pages;
+-
+- if (kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, npages, 0)) {
+- pr_err("Failed to map 1:1 memory\n");
+- return 1;
+- }
+- }
+-
+ page = alloc_page(GFP_KERNEL|__GFP_DMA32);
+ if (!page)
+ panic("Unable to allocate EFI runtime stack < 4GB\n");
diff --git a/patches.arch/qcom-0038-efi-Add-efi_memmap_init_late-for-permanent-EFI-memma.patch b/patches.arch/qcom-0038-efi-Add-efi_memmap_init_late-for-permanent-EFI-memma.patch
new file mode 100644
index 0000000000..d1c6539ac1
--- /dev/null
+++ b/patches.arch/qcom-0038-efi-Add-efi_memmap_init_late-for-permanent-EFI-memma.patch
@@ -0,0 +1,400 @@
+From: Matt Fleming <matt@codeblueprint.co.uk>
+Date: Sat, 27 Feb 2016 15:52:50 +0000
+Subject: efi: Add efi_memmap_init_late() for permanent EFI memmap
+
+Git-commit: dca0f971ea6fcf2f1bb78f7995adf80da9f4767f
+Patch-mainline: v4.9-rc1
+References: fate#320512
+
+Drivers need a way to access the EFI memory map at runtime. ARM and
+arm64 currently provide this by remapping the EFI memory map into the
+vmalloc space before setting up the EFI virtual mappings.
+
+x86 does not provide this functionality which has resulted in the code
+in efi_mem_desc_lookup() where it will manually map individual EFI
+memmap entries if the memmap has already been torn down on x86,
+
+ /*
+ * If a driver calls this after efi_free_boot_services,
+ * ->map will be NULL, and the target may also not be mapped.
+ * So just always get our own virtual map on the CPU.
+ *
+ */
+ md = early_memremap(p, sizeof (*md));
+
+There isn't a good reason for not providing a permanent EFI memory map
+for runtime queries, especially since the EFI regions are not mapped
+into the standard kernel page tables.
+
+Tested-by: Dave Young <dyoung@redhat.com> [kexec/kdump]
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> [arm]
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Peter Jones <pjones@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/platform/efi/efi.c | 46 ++++++++----
+ arch/x86/platform/efi/quirks.c | 2
+ drivers/firmware/efi/arm-runtime.c | 4 -
+ drivers/firmware/efi/efi.c | 135 ++++++++++++++++++++++++++-----------
+ include/linux/efi.h | 2
+ 5 files changed, 130 insertions(+), 59 deletions(-)
+
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -845,6 +845,19 @@ static void __init kexec_enter_virtual_m
+ get_systab_virt_addr(md);
+ }
+
++ /*
++ * Unregister the early EFI memmap from efi_init() and install
++ * the new EFI memory map.
++ */
++ efi_memmap_unmap();
++
++ if (efi_memmap_init_late(efi.memmap.phys_map,
++ efi.memmap.desc_size * efi.memmap.nr_map)) {
++ pr_err("Failed to remap late EFI memory map\n");
++ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
++ return;
++ }
++
+ save_runtime_map();
+
+ BUG_ON(!efi.systab);
+@@ -906,6 +919,7 @@ static void __init __efi_enter_virtual_m
+ int count = 0, pg_shift = 0;
+ void *new_memmap = NULL;
+ efi_status_t status;
++ phys_addr_t pa;
+
+ efi.systab = NULL;
+
+@@ -923,11 +937,26 @@ static void __init __efi_enter_virtual_m
+ return;
+ }
+
++ pa = __pa(new_memmap);
++
++ /*
++ * Unregister the early EFI memmap from efi_init() and install
++ * the new EFI memory map that we are about to pass to the
++ * firmware via SetVirtualAddressMap().
++ */
++ efi_memmap_unmap();
++
++ if (efi_memmap_init_late(pa, efi.memmap.desc_size * count)) {
++ pr_err("Failed to remap late EFI memory map\n");
++ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
++ return;
++ }
++
+ save_runtime_map();
+
+ BUG_ON(!efi.systab);
+
+- if (efi_setup_page_tables(__pa(new_memmap), 1 << pg_shift)) {
++ if (efi_setup_page_tables(pa, 1 << pg_shift)) {
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return;
+ }
+@@ -940,14 +969,14 @@ static void __init __efi_enter_virtual_m
+ efi.memmap.desc_size * count,
+ efi.memmap.desc_size,
+ efi.memmap.desc_version,
+- (efi_memory_desc_t *)__pa(new_memmap));
++ (efi_memory_desc_t *)pa);
+ } else {
+ status = efi_thunk_set_virtual_address_map(
+ efi_phys.set_virtual_address_map,
+ efi.memmap.desc_size * count,
+ efi.memmap.desc_size,
+ efi.memmap.desc_version,
+- (efi_memory_desc_t *)__pa(new_memmap));
++ (efi_memory_desc_t *)pa);
+ }
+
+ if (status != EFI_SUCCESS) {
+@@ -973,17 +1002,6 @@ static void __init __efi_enter_virtual_m
+
+ efi_runtime_mkexec();
+
+- /*
+- * We mapped the descriptor array into the EFI pagetable above
+- * but we're not unmapping it here because if we're running in
+- * EFI mixed mode we need all of memory to be accessible when
+- * we pass parameters to the EFI runtime services in the
+- * thunking code.
+- *
+- * efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift);
+- */
+- free_pages((unsigned long)new_memmap, pg_shift);
+-
+ /* clean DUMMY object */
+ efi_delete_dummy_variable();
+ }
+--- a/arch/x86/platform/efi/quirks.c
++++ b/arch/x86/platform/efi/quirks.c
+@@ -186,8 +186,6 @@ void __init efi_free_boot_services(void)
+
+ free_bootmem_late(start, size);
+ }
+-
+- efi_memmap_unmap();
+ }
+
+ /*
+--- a/drivers/firmware/efi/arm-runtime.c
++++ b/drivers/firmware/efi/arm-runtime.c
+@@ -107,12 +107,10 @@ static int __init arm_enable_runtime_ser
+
+ mapsize = efi.memmap.desc_size * efi.memmap.nr_map;
+
+- efi.memmap.map = memremap(efi.memmap.phys_map, mapsize, MEMREMAP_WB);
+- if (!efi.memmap.map) {
++ if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) {
+ pr_err("Failed to remap EFI memory map\n");
+ return -ENOMEM;
+ }
+- efi.memmap.map_end = efi.memmap.map + mapsize;
+
+ set_bit(EFI_SYSTEM_TABLES, &efi.flags);
+
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -248,56 +248,31 @@ subsys_initcall(efisubsys_init);
+
+ /*
+ * Find the efi memory descriptor for a given physical address. Given a
+- * physicall address, determine if it exists within an EFI Memory Map entry,
++ * physical address, determine if it exists within an EFI Memory Map entry,
+ * and if so, populate the supplied memory descriptor with the appropriate
+ * data.
+ */
+ int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+ {
+- struct efi_memory_map *map = &efi.memmap;
+- phys_addr_t p, e;
++ efi_memory_desc_t *md;
+
+ if (!efi_enabled(EFI_MEMMAP)) {
+ pr_err_once("EFI_MEMMAP is not enabled.\n");
+ return -EINVAL;
+ }
+
+- if (!map) {
+- pr_err_once("efi.memmap is not set.\n");
+- return -EINVAL;
+- }
+ if (!out_md) {
+ pr_err_once("out_md is null.\n");
+ return -EINVAL;
+ }
+- if (WARN_ON_ONCE(!map->phys_map))
+- return -EINVAL;
+- if (WARN_ON_ONCE(map->nr_map == 0) || WARN_ON_ONCE(map->desc_size == 0))
+- return -EINVAL;
+
+- e = map->phys_map + map->nr_map * map->desc_size;
+- for (p = map->phys_map; p < e; p += map->desc_size) {
+- efi_memory_desc_t *md;
++ for_each_efi_memory_desc(md) {
+ u64 size;
+ u64 end;
+
+- /*
+- * If a driver calls this after efi_free_boot_services,
+- * ->map will be NULL, and the target may also not be mapped.
+- * So just always get our own virtual map on the CPU.
+- *
+- */
+- md = early_memremap(p, sizeof (*md));
+- if (!md) {
+- pr_err_once("early_memremap(%pa, %zu) failed.\n",
+- &p, sizeof (*md));
+- return -ENOMEM;
+- }
+-
+ if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
+ md->type != EFI_BOOT_SERVICES_DATA &&
+ md->type != EFI_RUNTIME_SERVICES_DATA) {
+- early_memunmap(md, sizeof (*md));
+ continue;
+ }
+
+@@ -305,11 +280,8 @@ int __init efi_mem_desc_lookup(u64 phys_
+ end = md->phys_addr + size;
+ if (phys_addr >= md->phys_addr && phys_addr < end) {
+ memcpy(out_md, md, sizeof(*out_md));
+- early_memunmap(md, sizeof (*md));
+ return 0;
+ }
+-
+- early_memunmap(md, sizeof (*md));
+ }
+ pr_err_once("requested map not found.\n");
+ return -ENOENT;
+@@ -476,32 +448,49 @@ int __init efi_config_init(efi_config_ta
+ }
+
+ /**
+- * efi_memmap_init_early - Map the EFI memory map data structure
++ * __efi_memmap_init - Common code for mapping the EFI memory map
+ * @data: EFI memory map data
++ * @late: Use early or late mapping function?
+ *
+- * Use early_memremap() to map the passed in EFI memory map and assign
+- * it to efi.memmap.
++ * This function takes care of figuring out which function to use to
++ * map the EFI memory map in efi.memmap based on how far into the boot
++ * we are.
++ *
++ * During bootup @late should be %false since we only have access to
++ * the early_memremap*() functions as the vmalloc space isn't setup.
++ * Once the kernel is fully booted we can fallback to the more robust
++ * memremap*() API.
++ *
++ * Returns zero on success, a negative error code on failure.
+ */
+-int __init efi_memmap_init_early(struct efi_memory_map_data *data)
++static int __init
++__efi_memmap_init(struct efi_memory_map_data *data, bool late)
+ {
+ struct efi_memory_map map;
++ phys_addr_t phys_map;
+
+ if (efi_enabled(EFI_PARAVIRT))
+ return 0;
+
+- map.phys_map = data->phys_map;
++ phys_map = data->phys_map;
++
++ if (late)
++ map.map = memremap(phys_map, data->size, MEMREMAP_WB);
++ else
++ map.map = early_memremap(phys_map, data->size);
+
+- map.map = early_memremap(data->phys_map, data->size);
+ if (!map.map) {
+ pr_err("Could not map the memory map!\n");
+ return -ENOMEM;
+ }
+
++ map.phys_map = data->phys_map;
+ map.nr_map = data->size / data->desc_size;
+ map.map_end = map.map + data->size;
+
+ map.desc_version = data->desc_version;
+ map.desc_size = data->desc_size;
++ map.late = late;
+
+ set_bit(EFI_MEMMAP, &efi.flags);
+
+@@ -510,17 +499,83 @@ int __init efi_memmap_init_early(struct
+ return 0;
+ }
+
++/**
++ * efi_memmap_init_early - Map the EFI memory map data structure
++ * @data: EFI memory map data
++ *
++ * Use early_memremap() to map the passed in EFI memory map and assign
++ * it to efi.memmap.
++ */
++int __init efi_memmap_init_early(struct efi_memory_map_data *data)
++{
++ /* Cannot go backwards */
++ WARN_ON(efi.memmap.late);
++
++ return __efi_memmap_init(data, false);
++}
++
+ void __init efi_memmap_unmap(void)
+ {
+- unsigned long size;
++ if (!efi.memmap.late) {
++ unsigned long size;
+
+- size = efi.memmap.desc_size * efi.memmap.nr_map;
++ size = efi.memmap.desc_size * efi.memmap.nr_map;
++ early_memunmap(efi.memmap.map, size);
++ } else {
++ memunmap(efi.memmap.map);
++ }
+
+- early_memunmap(efi.memmap.map, size);
+ efi.memmap.map = NULL;
+ clear_bit(EFI_MEMMAP, &efi.flags);
+ }
+
++/**
++ * efi_memmap_init_late - Map efi.memmap with memremap()
++ * @phys_addr: Physical address of the new EFI memory map
++ * @size: Size in bytes of the new EFI memory map
++ *
++ * Setup a mapping of the EFI memory map using ioremap_cache(). This
++ * function should only be called once the vmalloc space has been
++ * setup and is therefore not suitable for calling during early EFI
++ * initialise, e.g. in efi_init(). Additionally, it expects
++ * efi_memmap_init_early() to have already been called.
++ *
++ * The reason there are two EFI memmap initialisation
++ * (efi_memmap_init_early() and this late version) is because the
++ * early EFI memmap should be explicitly unmapped once EFI
++ * initialisation is complete as the fixmap space used to map the EFI
++ * memmap (via early_memremap()) is a scarce resource.
++ *
++ * This late mapping is intended to persist for the duration of
++ * runtime so that things like efi_mem_desc_lookup() and
++ * efi_mem_attributes() always work.
++ *
++ * Returns zero on success, a negative error code on failure.
++ */
++int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
++{
++ struct efi_memory_map_data data = {
++ .phys_map = addr,
++ .size = size,
++ };
++
++ /* Did we forget to unmap the early EFI memmap? */
++ WARN_ON(efi.memmap.map);
++
++ /* Were we already called? */
++ WARN_ON(efi.memmap.late);
++
++ /*
++ * It makes no sense to allow callers to register different
++ * values for the following fields. Copy them out of the
++ * existing early EFI memmap.
++ */
++ data.desc_version = efi.memmap.desc_version;
++ data.desc_size = efi.memmap.desc_size;
++
++ return __efi_memmap_init(&data, true);
++}
++
+ #ifdef CONFIG_EFI_VARS_MODULE
+ static int __init efi_load_efivars(void)
+ {
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -775,6 +775,7 @@ struct efi_memory_map {
+ int nr_map;
+ unsigned long desc_version;
+ unsigned long desc_size;
++ bool late;
+ };
+
+ struct efi_fdt_params {
+@@ -1035,6 +1036,7 @@ static inline efi_status_t efi_query_var
+ extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
+
+ extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
++extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
+ extern void __init efi_memmap_unmap(void);
+
+ extern int efi_config_init(efi_config_table_type_t *arch_tables);
diff --git a/patches.arch/qcom-0039-efi-fake_mem-Refactor-main-two-code-chunks-into-func.patch b/patches.arch/qcom-0039-efi-fake_mem-Refactor-main-two-code-chunks-into-func.patch
new file mode 100644
index 0000000000..e30aa091de
--- /dev/null
+++ b/patches.arch/qcom-0039-efi-fake_mem-Refactor-main-two-code-chunks-into-func.patch
@@ -0,0 +1,292 @@
+From: Matt Fleming <matt@codeblueprint.co.uk>
+Date: Mon, 29 Feb 2016 16:58:18 +0000
+Subject: efi/fake_mem: Refactor main two code chunks into functions
+
+Git-commit: c8c1a4c5e4ead0d2dcf0f0bcb8bdbdcf877fb3bb
+Patch-mainline: v4.9-rc1
+References: fate#320512
+
+There is a whole load of generic EFI memory map code inside of the
+fake_mem driver which is better suited to being grouped with the rest
+of the generic EFI code for manipulating EFI memory maps.
+
+In preparation for that, this patch refactors the core code, so that
+it's possible to move entire functions later.
+
+Tested-by: Dave Young <dyoung@redhat.com> [kexec/kdump]
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> [arm]
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Peter Jones <pjones@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Taku Izumi <izumi.taku@jp.fujitsu.com>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/fake_mem.c | 229 +++++++++++++++++++++++-----------------
+ 1 file changed, 134 insertions(+), 95 deletions(-)
+
+diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
+index 939eec47139f..446c669431c0 100644
+--- a/drivers/firmware/efi/fake_mem.c
++++ b/drivers/firmware/efi/fake_mem.c
+@@ -54,43 +54,151 @@ static int __init cmp_fake_mem(const void *x1, const void *x2)
+ return 0;
+ }
+
++/**
++ * efi_fake_memmap_split_count - Count number of additional EFI memmap entries
++ * @md: EFI memory descriptor to split
++ * @range: Address range (start, end) to split around
++ *
++ * Returns the number of additional EFI memmap entries required to
++ * accomodate @range.
++ */
++static int efi_fake_memmap_split_count(efi_memory_desc_t *md, struct range *range)
++{
++ u64 m_start, m_end;
++ u64 start, end;
++ int count = 0;
++
++ start = md->phys_addr;
++ end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
++
++ /* modifying range */
++ m_start = range->start;
++ m_end = range->end;
++
++ if (m_start <= start) {
++ /* split into 2 parts */
++ if (start < m_end && m_end < end)
++ count++;
++ }
++
++ if (start < m_start && m_start < end) {
++ /* split into 3 parts */
++ if (m_end < end)
++ count += 2;
++ /* split into 2 parts */
++ if (end <= m_end)
++ count++;
++ }
++
++ return count;
++}
++
++/**
++ * efi_fake_memmap_insert - Insert a fake memory region in an EFI memmap
++ * @old_memmap: The existing EFI memory map structure
++ * @buf: Address of buffer to store new map
++ * @mem: Fake memory map entry to insert
++ *
++ * It is suggested that you call efi_fake_memmap_split_count() first
++ * to see how large @buf needs to be.
++ */
++static void efi_fake_memmap_insert(struct efi_memory_map *old_memmap,
++ void *buf, struct fake_mem *mem)
++{
++ u64 m_start, m_end, m_attr;
++ efi_memory_desc_t *md;
++ u64 start, end;
++ void *old, *new;
++
++ /* modifying range */
++ m_start = mem->range.start;
++ m_end = mem->range.end;
++ m_attr = mem->attribute;
++
++ for (old = old_memmap->map, new = buf;
++ old < old_memmap->map_end;
++ old += old_memmap->desc_size, new += old_memmap->desc_size) {
++
++ /* copy original EFI memory descriptor */
++ memcpy(new, old, old_memmap->desc_size);
++ md = new;
++ start = md->phys_addr;
++ end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
++
++ if (m_start <= start && end <= m_end)
++ md->attribute |= m_attr;
++
++ if (m_start <= start &&
++ (start < m_end && m_end < end)) {
++ /* first part */
++ md->attribute |= m_attr;
++ md->num_pages = (m_end - md->phys_addr + 1) >>
++ EFI_PAGE_SHIFT;
++ /* latter part */
++ new += old_memmap->desc_size;
++ memcpy(new, old, old_memmap->desc_size);
++ md = new;
++ md->phys_addr = m_end + 1;
++ md->num_pages = (end - md->phys_addr + 1) >>
++ EFI_PAGE_SHIFT;
++ }
++
++ if ((start < m_start && m_start < end) && m_end < end) {
++ /* first part */
++ md->num_pages = (m_start - md->phys_addr) >>
++ EFI_PAGE_SHIFT;
++ /* middle part */
++ new += old_memmap->desc_size;
++ memcpy(new, old, old_memmap->desc_size);
++ md = new;
++ md->attribute |= m_attr;
++ md->phys_addr = m_start;
++ md->num_pages = (m_end - m_start + 1) >>
++ EFI_PAGE_SHIFT;
++ /* last part */
++ new += old_memmap->desc_size;
++ memcpy(new, old, old_memmap->desc_size);
++ md = new;
++ md->phys_addr = m_end + 1;
++ md->num_pages = (end - m_end) >>
++ EFI_PAGE_SHIFT;
++ }
++
++ if ((start < m_start && m_start < end) &&
++ (end <= m_end)) {
++ /* first part */
++ md->num_pages = (m_start - md->phys_addr) >>
++ EFI_PAGE_SHIFT;
++ /* latter part */
++ new += old_memmap->desc_size;
++ memcpy(new, old, old_memmap->desc_size);
++ md = new;
++ md->phys_addr = m_start;
++ md->num_pages = (end - md->phys_addr + 1) >>
++ EFI_PAGE_SHIFT;
++ md->attribute |= m_attr;
++ }
++ }
++}
++
+ void __init efi_fake_memmap(void)
+ {
+- u64 start, end, m_start, m_end, m_attr;
+ struct efi_memory_map_data data;
+ int new_nr_map = efi.memmap.nr_map;
+ efi_memory_desc_t *md;
+ phys_addr_t new_memmap_phy;
+ void *new_memmap;
+- void *old, *new;
+ int i;
+
+ if (!nr_fake_mem)
+ return;
+
+ /* count up the number of EFI memory descriptor */
+- for_each_efi_memory_desc(md) {
+- start = md->phys_addr;
+- end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+-
+- for (i = 0; i < nr_fake_mem; i++) {
+- /* modifying range */
+- m_start = fake_mems[i].range.start;
+- m_end = fake_mems[i].range.end;
+-
+- if (m_start <= start) {
+- /* split into 2 parts */
+- if (start < m_end && m_end < end)
+- new_nr_map++;
+- }
+- if (start < m_start && m_start < end) {
+- /* split into 3 parts */
+- if (m_end < end)
+- new_nr_map += 2;
+- /* split into 2 parts */
+- if (end <= m_end)
+- new_nr_map++;
+- }
++ for (i = 0; i < nr_fake_mem; i++) {
++ for_each_efi_memory_desc(md) {
++ struct range *r = &fake_mems[i].range;
++
++ new_nr_map += efi_fake_memmap_split_count(md, r);
+ }
+ }
+
+@@ -108,77 +216,8 @@ void __init efi_fake_memmap(void)
+ return;
+ }
+
+- for (old = efi.memmap.map, new = new_memmap;
+- old < efi.memmap.map_end;
+- old += efi.memmap.desc_size, new += efi.memmap.desc_size) {
+-
+- /* copy original EFI memory descriptor */
+- memcpy(new, old, efi.memmap.desc_size);
+- md = new;
+- start = md->phys_addr;
+- end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+-
+- for (i = 0; i < nr_fake_mem; i++) {
+- /* modifying range */
+- m_start = fake_mems[i].range.start;
+- m_end = fake_mems[i].range.end;
+- m_attr = fake_mems[i].attribute;
+-
+- if (m_start <= start && end <= m_end)
+- md->attribute |= m_attr;
+-
+- if (m_start <= start &&
+- (start < m_end && m_end < end)) {
+- /* first part */
+- md->attribute |= m_attr;
+- md->num_pages = (m_end - md->phys_addr + 1) >>
+- EFI_PAGE_SHIFT;
+- /* latter part */
+- new += efi.memmap.desc_size;
+- memcpy(new, old, efi.memmap.desc_size);
+- md = new;
+- md->phys_addr = m_end + 1;
+- md->num_pages = (end - md->phys_addr + 1) >>
+- EFI_PAGE_SHIFT;
+- }
+-
+- if ((start < m_start && m_start < end) && m_end < end) {
+- /* first part */
+- md->num_pages = (m_start - md->phys_addr) >>
+- EFI_PAGE_SHIFT;
+- /* middle part */
+- new += efi.memmap.desc_size;
+- memcpy(new, old, efi.memmap.desc_size);
+- md = new;
+- md->attribute |= m_attr;
+- md->phys_addr = m_start;
+- md->num_pages = (m_end - m_start + 1) >>
+- EFI_PAGE_SHIFT;
+- /* last part */
+- new += efi.memmap.desc_size;
+- memcpy(new, old, efi.memmap.desc_size);
+- md = new;
+- md->phys_addr = m_end + 1;
+- md->num_pages = (end - m_end) >>
+- EFI_PAGE_SHIFT;
+- }
+-
+- if ((start < m_start && m_start < end) &&
+- (end <= m_end)) {
+- /* first part */
+- md->num_pages = (m_start - md->phys_addr) >>
+- EFI_PAGE_SHIFT;
+- /* latter part */
+- new += efi.memmap.desc_size;
+- memcpy(new, old, efi.memmap.desc_size);
+- md = new;
+- md->phys_addr = m_start;
+- md->num_pages = (end - md->phys_addr + 1) >>
+- EFI_PAGE_SHIFT;
+- md->attribute |= m_attr;
+- }
+- }
+- }
++ for (i = 0; i < nr_fake_mem; i++)
++ efi_fake_memmap_insert(&efi.memmap, new_memmap, &fake_mems[i]);
+
+ /* swap into new EFI memmap */
+ early_memunmap(new_memmap, efi.memmap.desc_size * new_nr_map);
+--
+2.11.0
+
diff --git a/patches.arch/qcom-0040-efi-Split-out-EFI-memory-map-functions-into-new-file.patch b/patches.arch/qcom-0040-efi-Split-out-EFI-memory-map-functions-into-new-file.patch
new file mode 100644
index 0000000000..8df9da7e00
--- /dev/null
+++ b/patches.arch/qcom-0040-efi-Split-out-EFI-memory-map-functions-into-new-file.patch
@@ -0,0 +1,670 @@
+From: Matt Fleming <matt@codeblueprint.co.uk>
+Date: Mon, 29 Feb 2016 20:30:39 +0000
+Subject: efi: Split out EFI memory map functions into new file
+
+Git-commit: 60863c0d1a96b740048cc7d94a2d00d6f89ba3d8
+Patch-mainline: v4.9-rc1
+References: fate#320512
+
+Also move the functions from the EFI fake mem driver since future
+patches will require access to the memmap insertion code even if
+CONFIG_EFI_FAKE_MEM isn't enabled.
+
+This will be useful when we need to build custom EFI memory maps to
+allow drivers to mark regions as reserved.
+
+Tested-by: Dave Young <dyoung@redhat.com> [kexec/kdump]
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> [arm]
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Peter Jones <pjones@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Taku Izumi <izumi.taku@jp.fujitsu.com>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/Makefile | 2
+ drivers/firmware/efi/efi.c | 129 -------------------
+ drivers/firmware/efi/fake_mem.c | 143 ---------------------
+ drivers/firmware/efi/memmap.c | 267 ++++++++++++++++++++++++++++++++++++++++
+ include/linux/efi.h | 10 +
+ 5 files changed, 284 insertions(+), 267 deletions(-)
+ create mode 100644 drivers/firmware/efi/memmap.c
+
+--- a/drivers/firmware/efi/Makefile
++++ b/drivers/firmware/efi/Makefile
+@@ -10,7 +10,7 @@
+ KASAN_SANITIZE_runtime-wrappers.o := n
+
+ obj-$(CONFIG_EFI) += efi.o vars.o reboot.o memattr.o
+-obj-$(CONFIG_EFI) += capsule.o
++obj-$(CONFIG_EFI) += capsule.o memmap.o
+ obj-$(CONFIG_EFI_VARS) += efivars.o
+ obj-$(CONFIG_EFI_ESRT) += esrt.o
+ obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -415,135 +415,6 @@ int __init efi_config_init(efi_config_ta
+ return ret;
+ }
+
+-/**
+- * __efi_memmap_init - Common code for mapping the EFI memory map
+- * @data: EFI memory map data
+- * @late: Use early or late mapping function?
+- *
+- * This function takes care of figuring out which function to use to
+- * map the EFI memory map in efi.memmap based on how far into the boot
+- * we are.
+- *
+- * During bootup @late should be %false since we only have access to
+- * the early_memremap*() functions as the vmalloc space isn't setup.
+- * Once the kernel is fully booted we can fallback to the more robust
+- * memremap*() API.
+- *
+- * Returns zero on success, a negative error code on failure.
+- */
+-static int __init
+-__efi_memmap_init(struct efi_memory_map_data *data, bool late)
+-{
+- struct efi_memory_map map;
+- phys_addr_t phys_map;
+-
+- if (efi_enabled(EFI_PARAVIRT))
+- return 0;
+-
+- phys_map = data->phys_map;
+-
+- if (late)
+- map.map = memremap(phys_map, data->size, MEMREMAP_WB);
+- else
+- map.map = early_memremap(phys_map, data->size);
+-
+- if (!map.map) {
+- pr_err("Could not map the memory map!\n");
+- return -ENOMEM;
+- }
+-
+- map.phys_map = data->phys_map;
+- map.nr_map = data->size / data->desc_size;
+- map.map_end = map.map + data->size;
+-
+- map.desc_version = data->desc_version;
+- map.desc_size = data->desc_size;
+- map.late = late;
+-
+- set_bit(EFI_MEMMAP, &efi.flags);
+-
+- efi.memmap = map;
+-
+- return 0;
+-}
+-
+-/**
+- * efi_memmap_init_early - Map the EFI memory map data structure
+- * @data: EFI memory map data
+- *
+- * Use early_memremap() to map the passed in EFI memory map and assign
+- * it to efi.memmap.
+- */
+-int __init efi_memmap_init_early(struct efi_memory_map_data *data)
+-{
+- /* Cannot go backwards */
+- WARN_ON(efi.memmap.late);
+-
+- return __efi_memmap_init(data, false);
+-}
+-
+-void __init efi_memmap_unmap(void)
+-{
+- if (!efi.memmap.late) {
+- unsigned long size;
+-
+- size = efi.memmap.desc_size * efi.memmap.nr_map;
+- early_memunmap(efi.memmap.map, size);
+- } else {
+- memunmap(efi.memmap.map);
+- }
+-
+- efi.memmap.map = NULL;
+- clear_bit(EFI_MEMMAP, &efi.flags);
+-}
+-
+-/**
+- * efi_memmap_init_late - Map efi.memmap with memremap()
+- * @phys_addr: Physical address of the new EFI memory map
+- * @size: Size in bytes of the new EFI memory map
+- *
+- * Setup a mapping of the EFI memory map using ioremap_cache(). This
+- * function should only be called once the vmalloc space has been
+- * setup and is therefore not suitable for calling during early EFI
+- * initialise, e.g. in efi_init(). Additionally, it expects
+- * efi_memmap_init_early() to have already been called.
+- *
+- * The reason there are two EFI memmap initialisation
+- * (efi_memmap_init_early() and this late version) is because the
+- * early EFI memmap should be explicitly unmapped once EFI
+- * initialisation is complete as the fixmap space used to map the EFI
+- * memmap (via early_memremap()) is a scarce resource.
+- *
+- * This late mapping is intended to persist for the duration of
+- * runtime so that things like efi_mem_desc_lookup() and
+- * efi_mem_attributes() always work.
+- *
+- * Returns zero on success, a negative error code on failure.
+- */
+-int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
+-{
+- struct efi_memory_map_data data = {
+- .phys_map = addr,
+- .size = size,
+- };
+-
+- /* Did we forget to unmap the early EFI memmap? */
+- WARN_ON(efi.memmap.map);
+-
+- /* Were we already called? */
+- WARN_ON(efi.memmap.late);
+-
+- /*
+- * It makes no sense to allow callers to register different
+- * values for the following fields. Copy them out of the
+- * existing early EFI memmap.
+- */
+- data.desc_version = efi.memmap.desc_version;
+- data.desc_size = efi.memmap.desc_size;
+-
+- return __efi_memmap_init(&data, true);
+-}
+-
+ #ifdef CONFIG_EFI_VARS_MODULE
+ static int __init efi_load_efivars(void)
+ {
+--- a/drivers/firmware/efi/fake_mem.c
++++ b/drivers/firmware/efi/fake_mem.c
+@@ -35,17 +35,13 @@
+
+ #define EFI_MAX_FAKEMEM CONFIG_EFI_MAX_FAKE_MEM
+
+-struct fake_mem {
+- struct range range;
+- u64 attribute;
+-};
+-static struct fake_mem fake_mems[EFI_MAX_FAKEMEM];
++static struct efi_mem_range fake_mems[EFI_MAX_FAKEMEM];
+ static int nr_fake_mem;
+
+ static int __init cmp_fake_mem(const void *x1, const void *x2)
+ {
+- const struct fake_mem *m1 = x1;
+- const struct fake_mem *m2 = x2;
++ const struct efi_mem_range *m1 = x1;
++ const struct efi_mem_range *m2 = x2;
+
+ if (m1->range.start < m2->range.start)
+ return -1;
+@@ -54,133 +50,6 @@ static int __init cmp_fake_mem(const voi
+ return 0;
+ }
+
+-/**
+- * efi_fake_memmap_split_count - Count number of additional EFI memmap entries
+- * @md: EFI memory descriptor to split
+- * @range: Address range (start, end) to split around
+- *
+- * Returns the number of additional EFI memmap entries required to
+- * accomodate @range.
+- */
+-static int efi_fake_memmap_split_count(efi_memory_desc_t *md, struct range *range)
+-{
+- u64 m_start, m_end;
+- u64 start, end;
+- int count = 0;
+-
+- start = md->phys_addr;
+- end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+-
+- /* modifying range */
+- m_start = range->start;
+- m_end = range->end;
+-
+- if (m_start <= start) {
+- /* split into 2 parts */
+- if (start < m_end && m_end < end)
+- count++;
+- }
+-
+- if (start < m_start && m_start < end) {
+- /* split into 3 parts */
+- if (m_end < end)
+- count += 2;
+- /* split into 2 parts */
+- if (end <= m_end)
+- count++;
+- }
+-
+- return count;
+-}
+-
+-/**
+- * efi_fake_memmap_insert - Insert a fake memory region in an EFI memmap
+- * @old_memmap: The existing EFI memory map structure
+- * @buf: Address of buffer to store new map
+- * @mem: Fake memory map entry to insert
+- *
+- * It is suggested that you call efi_fake_memmap_split_count() first
+- * to see how large @buf needs to be.
+- */
+-static void efi_fake_memmap_insert(struct efi_memory_map *old_memmap,
+- void *buf, struct fake_mem *mem)
+-{
+- u64 m_start, m_end, m_attr;
+- efi_memory_desc_t *md;
+- u64 start, end;
+- void *old, *new;
+-
+- /* modifying range */
+- m_start = mem->range.start;
+- m_end = mem->range.end;
+- m_attr = mem->attribute;
+-
+- for (old = old_memmap->map, new = buf;
+- old < old_memmap->map_end;
+- old += old_memmap->desc_size, new += old_memmap->desc_size) {
+-
+- /* copy original EFI memory descriptor */
+- memcpy(new, old, old_memmap->desc_size);
+- md = new;
+- start = md->phys_addr;
+- end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
+-
+- if (m_start <= start && end <= m_end)
+- md->attribute |= m_attr;
+-
+- if (m_start <= start &&
+- (start < m_end && m_end < end)) {
+- /* first part */
+- md->attribute |= m_attr;
+- md->num_pages = (m_end - md->phys_addr + 1) >>
+- EFI_PAGE_SHIFT;
+- /* latter part */
+- new += old_memmap->desc_size;
+- memcpy(new, old, old_memmap->desc_size);
+- md = new;
+- md->phys_addr = m_end + 1;
+- md->num_pages = (end - md->phys_addr + 1) >>
+- EFI_PAGE_SHIFT;
+- }
+-
+- if ((start < m_start && m_start < end) && m_end < end) {
+- /* first part */
+- md->num_pages = (m_start - md->phys_addr) >>
+- EFI_PAGE_SHIFT;
+- /* middle part */
+- new += old_memmap->desc_size;
+- memcpy(new, old, old_memmap->desc_size);
+- md = new;
+- md->attribute |= m_attr;
+- md->phys_addr = m_start;
+- md->num_pages = (m_end - m_start + 1) >>
+- EFI_PAGE_SHIFT;
+- /* last part */
+- new += old_memmap->desc_size;
+- memcpy(new, old, old_memmap->desc_size);
+- md = new;
+- md->phys_addr = m_end + 1;
+- md->num_pages = (end - m_end) >>
+- EFI_PAGE_SHIFT;
+- }
+-
+- if ((start < m_start && m_start < end) &&
+- (end <= m_end)) {
+- /* first part */
+- md->num_pages = (m_start - md->phys_addr) >>
+- EFI_PAGE_SHIFT;
+- /* latter part */
+- new += old_memmap->desc_size;
+- memcpy(new, old, old_memmap->desc_size);
+- md = new;
+- md->phys_addr = m_start;
+- md->num_pages = (end - md->phys_addr + 1) >>
+- EFI_PAGE_SHIFT;
+- md->attribute |= m_attr;
+- }
+- }
+-}
+-
+ void __init efi_fake_memmap(void)
+ {
+ struct efi_memory_map_data data;
+@@ -198,7 +67,7 @@ void __init efi_fake_memmap(void)
+ for_each_efi_memory_desc(md) {
+ struct range *r = &fake_mems[i].range;
+
+- new_nr_map += efi_fake_memmap_split_count(md, r);
++ new_nr_map += efi_memmap_split_count(md, r);
+ }
+ }
+
+@@ -217,7 +86,7 @@ void __init efi_fake_memmap(void)
+ }
+
+ for (i = 0; i < nr_fake_mem; i++)
+- efi_fake_memmap_insert(&efi.memmap, new_memmap, &fake_mems[i]);
++ efi_memmap_insert(&efi.memmap, new_memmap, &fake_mems[i]);
+
+ /* swap into new EFI memmap */
+ early_memunmap(new_memmap, efi.memmap.desc_size * new_nr_map);
+@@ -265,7 +134,7 @@ static int __init setup_fake_mem(char *p
+ p++;
+ }
+
+- sort(fake_mems, nr_fake_mem, sizeof(struct fake_mem),
++ sort(fake_mems, nr_fake_mem, sizeof(struct efi_mem_range),
+ cmp_fake_mem, NULL);
+
+ for (i = 0; i < nr_fake_mem; i++)
+--- /dev/null
++++ b/drivers/firmware/efi/memmap.c
+@@ -0,0 +1,267 @@
++/*
++ * Common EFI memory map functions.
++ */
++
++#define pr_fmt(fmt) "efi: " fmt
++
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/efi.h>
++#include <linux/io.h>
++#include <asm/early_ioremap.h>
++
++/**
++ * __efi_memmap_init - Common code for mapping the EFI memory map
++ * @data: EFI memory map data
++ * @late: Use early or late mapping function?
++ *
++ * This function takes care of figuring out which function to use to
++ * map the EFI memory map in efi.memmap based on how far into the boot
++ * we are.
++ *
++ * During bootup @late should be %false since we only have access to
++ * the early_memremap*() functions as the vmalloc space isn't setup.
++ * Once the kernel is fully booted we can fallback to the more robust
++ * memremap*() API.
++ *
++ * Returns zero on success, a negative error code on failure.
++ */
++static int __init
++__efi_memmap_init(struct efi_memory_map_data *data, bool late)
++{
++ struct efi_memory_map map;
++ phys_addr_t phys_map;
++
++ if (efi_enabled(EFI_PARAVIRT))
++ return 0;
++
++ phys_map = data->phys_map;
++
++ if (late)
++ map.map = memremap(phys_map, data->size, MEMREMAP_WB);
++ else
++ map.map = early_memremap(phys_map, data->size);
++
++ if (!map.map) {
++ pr_err("Could not map the memory map!\n");
++ return -ENOMEM;
++ }
++
++ map.phys_map = data->phys_map;
++ map.nr_map = data->size / data->desc_size;
++ map.map_end = map.map + data->size;
++
++ map.desc_version = data->desc_version;
++ map.desc_size = data->desc_size;
++ map.late = late;
++
++ set_bit(EFI_MEMMAP, &efi.flags);
++
++ efi.memmap = map;
++
++ return 0;
++}
++
++/**
++ * efi_memmap_init_early - Map the EFI memory map data structure
++ * @data: EFI memory map data
++ *
++ * Use early_memremap() to map the passed in EFI memory map and assign
++ * it to efi.memmap.
++ */
++int __init efi_memmap_init_early(struct efi_memory_map_data *data)
++{
++ /* Cannot go backwards */
++ WARN_ON(efi.memmap.late);
++
++ return __efi_memmap_init(data, false);
++}
++
++void __init efi_memmap_unmap(void)
++{
++ if (!efi.memmap.late) {
++ unsigned long size;
++
++ size = efi.memmap.desc_size * efi.memmap.nr_map;
++ early_memunmap(efi.memmap.map, size);
++ } else {
++ memunmap(efi.memmap.map);
++ }
++
++ efi.memmap.map = NULL;
++ clear_bit(EFI_MEMMAP, &efi.flags);
++}
++
++/**
++ * efi_memmap_init_late - Map efi.memmap with memremap()
++ * @phys_addr: Physical address of the new EFI memory map
++ * @size: Size in bytes of the new EFI memory map
++ *
++ * Setup a mapping of the EFI memory map using ioremap_cache(). This
++ * function should only be called once the vmalloc space has been
++ * setup and is therefore not suitable for calling during early EFI
++ * initialise, e.g. in efi_init(). Additionally, it expects
++ * efi_memmap_init_early() to have already been called.
++ *
++ * The reason there are two EFI memmap initialisation
++ * (efi_memmap_init_early() and this late version) is because the
++ * early EFI memmap should be explicitly unmapped once EFI
++ * initialisation is complete as the fixmap space used to map the EFI
++ * memmap (via early_memremap()) is a scarce resource.
++ *
++ * This late mapping is intended to persist for the duration of
++ * runtime so that things like efi_mem_desc_lookup() and
++ * efi_mem_attributes() always work.
++ *
++ * Returns zero on success, a negative error code on failure.
++ */
++int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
++{
++ struct efi_memory_map_data data = {
++ .phys_map = addr,
++ .size = size,
++ };
++
++ /* Did we forget to unmap the early EFI memmap? */
++ WARN_ON(efi.memmap.map);
++
++ /* Were we already called? */
++ WARN_ON(efi.memmap.late);
++
++ /*
++ * It makes no sense to allow callers to register different
++ * values for the following fields. Copy them out of the
++ * existing early EFI memmap.
++ */
++ data.desc_version = efi.memmap.desc_version;
++ data.desc_size = efi.memmap.desc_size;
++
++ return __efi_memmap_init(&data, true);
++}
++
++/**
++ * efi_memmap_split_count - Count number of additional EFI memmap entries
++ * @md: EFI memory descriptor to split
++ * @range: Address range (start, end) to split around
++ *
++ * Returns the number of additional EFI memmap entries required to
++ * accomodate @range.
++ */
++int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
++{
++ u64 m_start, m_end;
++ u64 start, end;
++ int count = 0;
++
++ start = md->phys_addr;
++ end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
++
++ /* modifying range */
++ m_start = range->start;
++ m_end = range->end;
++
++ if (m_start <= start) {
++ /* split into 2 parts */
++ if (start < m_end && m_end < end)
++ count++;
++ }
++
++ if (start < m_start && m_start < end) {
++ /* split into 3 parts */
++ if (m_end < end)
++ count += 2;
++ /* split into 2 parts */
++ if (end <= m_end)
++ count++;
++ }
++
++ return count;
++}
++
++/**
++ * efi_memmap_insert - Insert a memory region in an EFI memmap
++ * @old_memmap: The existing EFI memory map structure
++ * @buf: Address of buffer to store new map
++ * @mem: Memory map entry to insert
++ *
++ * It is suggested that you call efi_memmap_split_count() first
++ * to see how large @buf needs to be.
++ */
++void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf,
++ struct efi_mem_range *mem)
++{
++ u64 m_start, m_end, m_attr;
++ efi_memory_desc_t *md;
++ u64 start, end;
++ void *old, *new;
++
++ /* modifying range */
++ m_start = mem->range.start;
++ m_end = mem->range.end;
++ m_attr = mem->attribute;
++
++ for (old = old_memmap->map, new = buf;
++ old < old_memmap->map_end;
++ old += old_memmap->desc_size, new += old_memmap->desc_size) {
++
++ /* copy original EFI memory descriptor */
++ memcpy(new, old, old_memmap->desc_size);
++ md = new;
++ start = md->phys_addr;
++ end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
++
++ if (m_start <= start && end <= m_end)
++ md->attribute |= m_attr;
++
++ if (m_start <= start &&
++ (start < m_end && m_end < end)) {
++ /* first part */
++ md->attribute |= m_attr;
++ md->num_pages = (m_end - md->phys_addr + 1) >>
++ EFI_PAGE_SHIFT;
++ /* latter part */
++ new += old_memmap->desc_size;
++ memcpy(new, old, old_memmap->desc_size);
++ md = new;
++ md->phys_addr = m_end + 1;
++ md->num_pages = (end - md->phys_addr + 1) >>
++ EFI_PAGE_SHIFT;
++ }
++
++ if ((start < m_start && m_start < end) && m_end < end) {
++ /* first part */
++ md->num_pages = (m_start - md->phys_addr) >>
++ EFI_PAGE_SHIFT;
++ /* middle part */
++ new += old_memmap->desc_size;
++ memcpy(new, old, old_memmap->desc_size);
++ md = new;
++ md->attribute |= m_attr;
++ md->phys_addr = m_start;
++ md->num_pages = (m_end - m_start + 1) >>
++ EFI_PAGE_SHIFT;
++ /* last part */
++ new += old_memmap->desc_size;
++ memcpy(new, old, old_memmap->desc_size);
++ md = new;
++ md->phys_addr = m_end + 1;
++ md->num_pages = (end - m_end) >>
++ EFI_PAGE_SHIFT;
++ }
++
++ if ((start < m_start && m_start < end) &&
++ (end <= m_end)) {
++ /* first part */
++ md->num_pages = (m_start - md->phys_addr) >>
++ EFI_PAGE_SHIFT;
++ /* latter part */
++ new += old_memmap->desc_size;
++ memcpy(new, old, old_memmap->desc_size);
++ md = new;
++ md->phys_addr = m_start;
++ md->num_pages = (end - md->phys_addr + 1) >>
++ EFI_PAGE_SHIFT;
++ md->attribute |= m_attr;
++ }
++ }
++}
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -20,6 +20,7 @@
+ #include <linux/ioport.h>
+ #include <linux/pfn.h>
+ #include <linux/pstore.h>
++#include <linux/range.h>
+ #include <linux/reboot.h>
+ #include <linux/screen_info.h>
+
+@@ -778,6 +779,11 @@ struct efi_memory_map {
+ bool late;
+ };
+
++struct efi_mem_range {
++ struct range range;
++ u64 attribute;
++};
++
+ struct efi_fdt_params {
+ u64 system_table;
+ u64 mmap;
+@@ -1038,6 +1044,10 @@ extern void __iomem *efi_lookup_mapped_a
+ extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
+ extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
+ extern void __init efi_memmap_unmap(void);
++extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
++ struct range *range);
++extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap,
++ void *buf, struct efi_mem_range *mem);
+
+ extern int efi_config_init(efi_config_table_type_t *arch_tables);
+ #ifdef CONFIG_EFI_ESRT
diff --git a/patches.arch/qcom-0041-efi-Add-efi_memmap_install-for-installing-new-EFI-me.patch b/patches.arch/qcom-0041-efi-Add-efi_memmap_install-for-installing-new-EFI-me.patch
new file mode 100644
index 0000000000..d0b05d40e1
--- /dev/null
+++ b/patches.arch/qcom-0041-efi-Add-efi_memmap_install-for-installing-new-EFI-me.patch
@@ -0,0 +1,110 @@
+From: Matt Fleming <matt@codeblueprint.co.uk>
+Date: Wed, 22 Jun 2016 16:54:00 +0100
+Subject: efi: Add efi_memmap_install() for installing new EFI memory maps
+
+Git-commit: c45f4da33a297f85435f8dccb26a24852ea01bb9
+Patch-mainline: v4.9-rc1
+References: fate#320512
+
+While efi_memmap_init_{early,late}() exist for architecture code to
+install memory maps from firmware data and for the virtual memory
+regions respectively, drivers don't care which stage of the boot we're
+at and just want to swap the existing memmap for a modified one.
+
+efi_memmap_install() abstracts the details of how the new memory map
+should be mapped and the existing one unmapped.
+
+Tested-by: Dave Young <dyoung@redhat.com> [kexec/kdump]
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> [arm]
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Peter Jones <pjones@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Taku Izumi <izumi.taku@jp.fujitsu.com>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/fake_mem.c | 8 +-------
+ drivers/firmware/efi/memmap.c | 25 +++++++++++++++++++++++++
+ include/linux/efi.h | 1 +
+ 3 files changed, 27 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
+index 0054730f9bae..520a40e5e0e4 100644
+--- a/drivers/firmware/efi/fake_mem.c
++++ b/drivers/firmware/efi/fake_mem.c
+@@ -52,7 +52,6 @@ static int __init cmp_fake_mem(const void *x1, const void *x2)
+
+ void __init efi_fake_memmap(void)
+ {
+- struct efi_memory_map_data data;
+ int new_nr_map = efi.memmap.nr_map;
+ efi_memory_desc_t *md;
+ phys_addr_t new_memmap_phy;
+@@ -90,13 +89,8 @@ void __init efi_fake_memmap(void)
+
+ /* swap into new EFI memmap */
+ early_memunmap(new_memmap, efi.memmap.desc_size * new_nr_map);
+- efi_memmap_unmap();
+
+- data.phys_map = new_memmap_phy;
+- data.size = efi.memmap.desc_size * new_nr_map;
+- data.desc_version = efi.memmap.desc_version;
+- data.desc_size = efi.memmap.desc_size;
+- efi_memmap_init_early(&data);
++ efi_memmap_install(new_memmap_phy, new_nr_map);
+
+ /* print new EFI memmap */
+ efi_print_memmap();
+diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
+index 2df7238eb44e..cd96086fd851 100644
+--- a/drivers/firmware/efi/memmap.c
++++ b/drivers/firmware/efi/memmap.c
+@@ -140,6 +140,31 @@ int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
+ }
+
+ /**
++ * efi_memmap_install - Install a new EFI memory map in efi.memmap
++ * @addr: Physical address of the memory map
++ * @nr_map: Number of entries in the memory map
++ *
++ * Unlike efi_memmap_init_*(), this function does not allow the caller
++ * to switch from early to late mappings. It simply uses the existing
++ * mapping function and installs the new memmap.
++ *
++ * Returns zero on success, a negative error code on failure.
++ */
++int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map)
++{
++ struct efi_memory_map_data data;
++
++ efi_memmap_unmap();
++
++ data.phys_map = addr;
++ data.size = efi.memmap.desc_size * nr_map;
++ data.desc_version = efi.memmap.desc_version;
++ data.desc_size = efi.memmap.desc_size;
++
++ return __efi_memmap_init(&data, efi.memmap.late);
++}
++
++/**
+ * efi_memmap_split_count - Count number of additional EFI memmap entries
+ * @md: EFI memory descriptor to split
+ * @range: Address range (start, end) to split around
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 84c8638c7a8b..987c18f6fcae 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -923,6 +923,7 @@ extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
+ extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
+ extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
+ extern void __init efi_memmap_unmap(void);
++extern int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map);
+ extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
+ struct range *range);
+ extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap,
+--
+2.11.0
+
diff --git a/patches.arch/qcom-0042-efi-Allow-drivers-to-reserve-boot-services-forever.patch b/patches.arch/qcom-0042-efi-Allow-drivers-to-reserve-boot-services-forever.patch
new file mode 100644
index 0000000000..236d5270c3
--- /dev/null
+++ b/patches.arch/qcom-0042-efi-Allow-drivers-to-reserve-boot-services-forever.patch
@@ -0,0 +1,252 @@
+From: Matt Fleming <matt@codeblueprint.co.uk>
+Date: Mon, 29 Feb 2016 21:22:52 +0000
+Subject: efi: Allow drivers to reserve boot services forever
+
+Git-commit: 816e76129ed5fadd28e526c43397c79775194b5c
+Patch-mainline: v4.9-rc1
+References: fate#320512
+
+Today, it is not possible for drivers to reserve EFI boot services for
+access after efi_free_boot_services() has been called on x86. For
+ARM/arm64 it can be done simply by calling memblock_reserve().
+
+Having this ability for all three architectures is desirable for a
+couple of reasons,
+
+ 1) It saves drivers copying data out of those regions
+ 2) kexec reboot can now make use of things like ESRT
+
+Instead of using the standard memblock_reserve() which is insufficient
+to reserve the region on x86 (see efi_reserve_boot_services()), a new
+API is introduced in this patch; efi_mem_reserve().
+
+efi.memmap now always represents which EFI memory regions are
+available. On x86 the EFI boot services regions that have not been
+reserved via efi_mem_reserve() will be removed from efi.memmap during
+efi_free_boot_services().
+
+This has implications for kexec, since it is not possible for a newly
+kexec'd kernel to access the same boot services regions that the
+initial boot kernel had access to unless they are reserved by every
+kexec kernel in the chain.
+
+Tested-by: Dave Young <dyoung@redhat.com> [kexec/kdump]
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> [arm]
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Peter Jones <pjones@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/platform/efi/quirks.c | 119 +++++++++++++++++++++++++++++++++++++----
+ drivers/firmware/efi/efi.c | 30 ++++++++++
+ include/linux/efi.h | 1
+ 3 files changed, 140 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/platform/efi/quirks.c
++++ b/arch/x86/platform/efi/quirks.c
+@@ -151,14 +151,70 @@ static bool can_free_region(u64 start, u
+ }
+
+ /*
+- * The UEFI specification makes it clear that the operating system is free to do
+- * whatever it wants with boot services code after ExitBootServices() has been
+- * called. Ignoring this recommendation a significant bunch of EFI implementations
+- * continue calling into boot services code (SetVirtualAddressMap). In order to
+- * work around such buggy implementations we reserve boot services region during
+- * EFI init and make sure it stays executable. Then, after SetVirtualAddressMap(), it
+-* is discarded.
+-*/
++ * The UEFI specification makes it clear that the operating system is
++ * free to do whatever it wants with boot services code after
++ * ExitBootServices() has been called. Ignoring this recommendation a
++ * significant bunch of EFI implementations continue calling into boot
++ * services code (SetVirtualAddressMap). In order to work around such
++ * buggy implementations we reserve boot services region during EFI
++ * init and make sure it stays executable. Then, after
++ * SetVirtualAddressMap(), it is discarded.
++ *
++ * However, some boot services regions contain data that is required
++ * by drivers, so we need to track which memory ranges can never be
++ * freed. This is done by tagging those regions with the
++ * EFI_MEMORY_RUNTIME attribute.
++ *
++ * Any driver that wants to mark a region as reserved must use
++ * efi_mem_reserve() which will insert a new EFI memory descriptor
++ * into efi.memmap (splitting existing regions if necessary) and tag
++ * it with EFI_MEMORY_RUNTIME.
++ */
++void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
++{
++ phys_addr_t new_phys, new_size;
++ struct efi_mem_range mr;
++ efi_memory_desc_t md;
++ int num_entries;
++ void *new;
++
++ if (efi_mem_desc_lookup(addr, &md)) {
++ pr_err("Failed to lookup EFI memory descriptor for %pa\n", &addr);
++ return;
++ }
++
++ if (addr + size > md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT)) {
++ pr_err("Region spans EFI memory descriptors, %pa\n", &addr);
++ return;
++ }
++
++ mr.range.start = addr;
++ mr.range.end = addr + size;
++ mr.attribute = md.attribute | EFI_MEMORY_RUNTIME;
++
++ num_entries = efi_memmap_split_count(&md, &mr.range);
++ num_entries += efi.memmap.nr_map;
++
++ new_size = efi.memmap.desc_size * num_entries;
++
++ new_phys = memblock_alloc(new_size, 0);
++ if (!new_phys) {
++ pr_err("Could not allocate boot services memmap\n");
++ return;
++ }
++
++ new = early_memremap(new_phys, new_size);
++ if (!new) {
++ pr_err("Failed to map new boot services memmap\n");
++ return;
++ }
++
++ efi_memmap_insert(&efi.memmap, new, &mr);
++ early_memunmap(new, new_size);
++
++ efi_memmap_install(new_phys, num_entries);
++}
++
+ void __init efi_reserve_boot_services(void)
+ {
+ efi_memory_desc_t *md;
+@@ -215,22 +271,65 @@ void __init efi_reserve_boot_services(vo
+
+ void __init efi_free_boot_services(void)
+ {
++ phys_addr_t new_phys, new_size;
+ efi_memory_desc_t *md;
++ int num_entries = 0;
++ void *new, *new_md;
+
+ for_each_efi_memory_desc(md) {
+ unsigned long long start = md->phys_addr;
+ unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+
+ if (md->type != EFI_BOOT_SERVICES_CODE &&
+- md->type != EFI_BOOT_SERVICES_DATA)
++ md->type != EFI_BOOT_SERVICES_DATA) {
++ num_entries++;
+ continue;
++ }
+
+ /* Do not free, someone else owns it: */
+- if (md->attribute & EFI_MEMORY_RUNTIME)
++ if (md->attribute & EFI_MEMORY_RUNTIME) {
++ num_entries++;
+ continue;
++ }
+
+ free_bootmem_late(start, size);
+ }
++
++ new_size = efi.memmap.desc_size * num_entries;
++ new_phys = memblock_alloc(new_size, 0);
++ if (!new_phys) {
++ pr_err("Failed to allocate new EFI memmap\n");
++ return;
++ }
++
++ new = memremap(new_phys, new_size, MEMREMAP_WB);
++ if (!new) {
++ pr_err("Failed to map new EFI memmap\n");
++ return;
++ }
++
++ /*
++ * Build a new EFI memmap that excludes any boot services
++ * regions that are not tagged EFI_MEMORY_RUNTIME, since those
++ * regions have now been freed.
++ */
++ new_md = new;
++ for_each_efi_memory_desc(md) {
++ if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
++ (md->type == EFI_BOOT_SERVICES_CODE ||
++ md->type == EFI_BOOT_SERVICES_DATA))
++ continue;
++
++ memcpy(new_md, md, efi.memmap.desc_size);
++ new_md += efi.memmap.desc_size;
++ }
++
++ memunmap(new);
++
++ if (efi_memmap_install(new_phys, num_entries)) {
++ pr_err("Could not install new EFI memmap\n");
++ return;
++ }
+ }
+
+ /*
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -24,6 +24,7 @@
+ #include <linux/of_fdt.h>
+ #include <linux/io.h>
+ #include <linux/platform_device.h>
++#include <linux/memblock.h>
+
+ #include <asm/early_ioremap.h>
+
+@@ -300,6 +301,35 @@ u64 __init efi_mem_desc_end(efi_memory_d
+ return end;
+ }
+
++void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
++
++/**
++ * efi_mem_reserve - Reserve an EFI memory region
++ * @addr: Physical address to reserve
++ * @size: Size of reservation
++ *
++ * Mark a region as reserved from general kernel allocation and
++ * prevent it being released by efi_free_boot_services().
++ *
++ * This function should be called drivers once they've parsed EFI
++ * configuration tables to figure out where their data lives, e.g.
++ * efi_esrt_init().
++ */
++void __init efi_mem_reserve(phys_addr_t addr, u64 size)
++{
++ if (!memblock_is_region_reserved(addr, size))
++ memblock_reserve(addr, size);
++
++ /*
++ * Some architectures (x86) reserve all boot services ranges
++ * until efi_free_boot_services() because of buggy firmware
++ * implementations. This means the above memblock_reserve() is
++ * superfluous on x86 and instead what it needs to do is
++ * ensure the @start, @size is not freed.
++ */
++ efi_arch_mem_reserve(addr, size);
++}
++
+ static __initdata efi_config_table_type_t common_tables[] = {
+ {ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20},
+ {ACPI_TABLE_GUID, "ACPI", &efi.acpi},
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -1077,6 +1077,7 @@ extern u64 efi_mem_attribute (unsigned l
+ extern int __init efi_uart_console_only (void);
+ extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
+ extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
++extern void efi_mem_reserve(phys_addr_t addr, u64 size);
+ extern void efi_initialize_iomem_resources(struct resource *code_resource,
+ struct resource *data_resource, struct resource *bss_resource);
+ extern void efi_get_time(struct timespec *now);
diff --git a/patches.arch/qcom-0043-efi-runtime-map-Use-efi.memmap-directly-instead-of-a.patch b/patches.arch/qcom-0043-efi-runtime-map-Use-efi.memmap-directly-instead-of-a.patch
new file mode 100644
index 0000000000..b5ab1c1743
--- /dev/null
+++ b/patches.arch/qcom-0043-efi-runtime-map-Use-efi.memmap-directly-instead-of-a.patch
@@ -0,0 +1,218 @@
+From: Matt Fleming <matt@codeblueprint.co.uk>
+Date: Tue, 1 Mar 2016 23:02:56 +0000
+Subject: efi/runtime-map: Use efi.memmap directly instead of a copy
+
+Git-commit: 31ce8cc68180803aa481c0c1daac29d8eaceca9d
+Patch-mainline: v4.9-rc1
+References: fate#320512
+
+Now that efi.memmap is available all of the time there's no need to
+allocate and build a separate copy of the EFI memory map.
+
+Furthermore, efi.memmap contains boot services regions but only those
+regions that have been reserved via efi_mem_reserve(). Using
+efi.memmap allows us to pass boot services across kexec reboot so that
+the ESRT and BGRT drivers will now work.
+
+Tested-by: Dave Young <dyoung@redhat.com> [kexec/kdump]
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> [arm]
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Peter Jones <pjones@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/platform/efi/efi.c | 40 --------------------------------------
+ drivers/firmware/efi/runtime-map.c | 35 +++++++++++++--------------------
+ include/linux/efi.h | 4 ----
+ 3 files changed, 13 insertions(+), 66 deletions(-)
+
+diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
+index 33996987ac70..342cebd1e17c 100644
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -592,42 +592,6 @@ static void __init get_systab_virt_addr(efi_memory_desc_t *md)
+ }
+ }
+
+-static void __init save_runtime_map(void)
+-{
+-#ifdef CONFIG_KEXEC_CORE
+- unsigned long desc_size;
+- efi_memory_desc_t *md;
+- void *tmp, *q = NULL;
+- int count = 0;
+-
+- if (efi_enabled(EFI_OLD_MEMMAP))
+- return;
+-
+- desc_size = efi.memmap.desc_size;
+-
+- for_each_efi_memory_desc(md) {
+- if (!(md->attribute & EFI_MEMORY_RUNTIME) ||
+- (md->type == EFI_BOOT_SERVICES_CODE) ||
+- (md->type == EFI_BOOT_SERVICES_DATA))
+- continue;
+- tmp = krealloc(q, (count + 1) * desc_size, GFP_KERNEL);
+- if (!tmp)
+- goto out;
+- q = tmp;
+-
+- memcpy(q + count * desc_size, md, desc_size);
+- count++;
+- }
+-
+- efi_runtime_map_setup(q, count, desc_size);
+- return;
+-
+-out:
+- kfree(q);
+- pr_err("Error saving runtime map, efi runtime on kexec non-functional!!\n");
+-#endif
+-}
+-
+ static void *realloc_pages(void *old_memmap, int old_shift)
+ {
+ void *ret;
+@@ -840,8 +804,6 @@ static void __init kexec_enter_virtual_mode(void)
+ return;
+ }
+
+- save_runtime_map();
+-
+ BUG_ON(!efi.systab);
+
+ num_pages = ALIGN(efi.memmap.nr_map * efi.memmap.desc_size, PAGE_SIZE);
+@@ -934,8 +896,6 @@ static void __init __efi_enter_virtual_mode(void)
+ return;
+ }
+
+- save_runtime_map();
+-
+ BUG_ON(!efi.systab);
+
+ if (efi_setup_page_tables(pa, 1 << pg_shift)) {
+diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
+index 5c55227a34c8..8e64b77aeac9 100644
+--- a/drivers/firmware/efi/runtime-map.c
++++ b/drivers/firmware/efi/runtime-map.c
+@@ -14,10 +14,6 @@
+
+ #include <asm/setup.h>
+
+-static void *efi_runtime_map;
+-static int nr_efi_runtime_map;
+-static u32 efi_memdesc_size;
+-
+ struct efi_runtime_map_entry {
+ efi_memory_desc_t md;
+ struct kobject kobj; /* kobject for each entry */
+@@ -106,7 +102,8 @@ static struct kobj_type __refdata map_ktype = {
+ static struct kset *map_kset;
+
+ static struct efi_runtime_map_entry *
+-add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
++add_sysfs_runtime_map_entry(struct kobject *kobj, int nr,
++ efi_memory_desc_t *md)
+ {
+ int ret;
+ struct efi_runtime_map_entry *entry;
+@@ -124,8 +121,7 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
+ return ERR_PTR(-ENOMEM);
+ }
+
+- memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size,
+- sizeof(efi_memory_desc_t));
++ memcpy(&entry->md, md, sizeof(efi_memory_desc_t));
+
+ kobject_init(&entry->kobj, &map_ktype);
+ entry->kobj.kset = map_kset;
+@@ -142,12 +138,12 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
+
+ int efi_get_runtime_map_size(void)
+ {
+- return nr_efi_runtime_map * efi_memdesc_size;
++ return efi.memmap.nr_map * efi.memmap.desc_size;
+ }
+
+ int efi_get_runtime_map_desc_size(void)
+ {
+- return efi_memdesc_size;
++ return efi.memmap.desc_size;
+ }
+
+ int efi_runtime_map_copy(void *buf, size_t bufsz)
+@@ -157,38 +153,33 @@ int efi_runtime_map_copy(void *buf, size_t bufsz)
+ if (sz > bufsz)
+ sz = bufsz;
+
+- memcpy(buf, efi_runtime_map, sz);
++ memcpy(buf, efi.memmap.map, sz);
+ return 0;
+ }
+
+-void efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size)
+-{
+- efi_runtime_map = map;
+- nr_efi_runtime_map = nr_entries;
+- efi_memdesc_size = desc_size;
+-}
+-
+ int __init efi_runtime_map_init(struct kobject *efi_kobj)
+ {
+ int i, j, ret = 0;
+ struct efi_runtime_map_entry *entry;
++ efi_memory_desc_t *md;
+
+- if (!efi_runtime_map)
++ if (!efi_enabled(EFI_MEMMAP))
+ return 0;
+
+- map_entries = kzalloc(nr_efi_runtime_map * sizeof(entry), GFP_KERNEL);
++ map_entries = kzalloc(efi.memmap.nr_map * sizeof(entry), GFP_KERNEL);
+ if (!map_entries) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+- for (i = 0; i < nr_efi_runtime_map; i++) {
+- entry = add_sysfs_runtime_map_entry(efi_kobj, i);
++ i = 0;
++ for_each_efi_memory_desc(md) {
++ entry = add_sysfs_runtime_map_entry(efi_kobj, i, md);
+ if (IS_ERR(entry)) {
+ ret = PTR_ERR(entry);
+ goto out_add_entry;
+ }
+- *(map_entries + i) = entry;
++ *(map_entries + i++) = entry;
+ }
+
+ return 0;
+diff --git a/include/linux/efi.h b/include/linux/efi.h
+index 3fe4f3c47834..d8b555db81c7 100644
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -1357,7 +1357,6 @@ extern int efi_capsule_update(efi_capsule_header_t *capsule,
+
+ #ifdef CONFIG_EFI_RUNTIME_MAP
+ int efi_runtime_map_init(struct kobject *);
+-void efi_runtime_map_setup(void *, int, u32);
+ int efi_get_runtime_map_size(void);
+ int efi_get_runtime_map_desc_size(void);
+ int efi_runtime_map_copy(void *buf, size_t bufsz);
+@@ -1367,9 +1366,6 @@ static inline int efi_runtime_map_init(struct kobject *kobj)
+ return 0;
+ }
+
+-static inline void
+-efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) {}
+-
+ static inline int efi_get_runtime_map_size(void)
+ {
+ return 0;
+--
+2.11.0
+
diff --git a/patches.arch/qcom-0044-efi-esrt-Use-efi_mem_reserve-and-avoid-a-kmalloc.patch b/patches.arch/qcom-0044-efi-esrt-Use-efi_mem_reserve-and-avoid-a-kmalloc.patch
new file mode 100644
index 0000000000..eec7d5c7b1
--- /dev/null
+++ b/patches.arch/qcom-0044-efi-esrt-Use-efi_mem_reserve-and-avoid-a-kmalloc.patch
@@ -0,0 +1,93 @@
+From: Matt Fleming <matt@codeblueprint.co.uk>
+Date: Tue, 1 Mar 2016 23:08:03 +0000
+Subject: efi/esrt: Use efi_mem_reserve() and avoid a kmalloc()
+
+Git-commit: 8e80632fb23f021ce5a6957f2edcdae4645a7030
+Patch-mainline: v4.9-rc1
+References: fate#320512
+
+We can use the new efi_mem_reserve() API to mark the ESRT table as
+reserved forever and save ourselves the trouble of copying the data
+out into a kmalloc buffer.
+
+The added advantage is that now the ESRT driver will work across
+kexec reboot.
+
+Tested-by: Dave Young <dyoung@redhat.com> [kexec/kdump]
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> [arm]
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Peter Jones <pjones@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/esrt.c | 20 ++++----------------
+ 1 file changed, 4 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
+index 75feb3f5829b..b93cd11f9bcc 100644
+--- a/drivers/firmware/efi/esrt.c
++++ b/drivers/firmware/efi/esrt.c
+@@ -235,7 +235,7 @@ static struct attribute_group esrt_attr_group = {
+ };
+
+ /*
+- * remap the table, copy it to kmalloced pages, and unmap it.
++ * remap the table, validate it, mark it reserved and unmap it.
+ */
+ void __init efi_esrt_init(void)
+ {
+@@ -335,7 +335,7 @@ void __init efi_esrt_init(void)
+
+ end = esrt_data + size;
+ pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end);
+- memblock_reserve(esrt_data, esrt_data_size);
++ efi_mem_reserve(esrt_data, esrt_data_size);
+
+ pr_debug("esrt-init: loaded.\n");
+ err_memunmap:
+@@ -382,28 +382,18 @@ static void cleanup_entry_list(void)
+ static int __init esrt_sysfs_init(void)
+ {
+ int error;
+- struct efi_system_resource_table __iomem *ioesrt;
+
+ pr_debug("esrt-sysfs: loading.\n");
+ if (!esrt_data || !esrt_data_size)
+ return -ENOSYS;
+
+- ioesrt = ioremap(esrt_data, esrt_data_size);
+- if (!ioesrt) {
++ esrt = ioremap(esrt_data, esrt_data_size);
++ if (!esrt) {
+ pr_err("ioremap(%pa, %zu) failed.\n", &esrt_data,
+ esrt_data_size);
+ return -ENOMEM;
+ }
+
+- esrt = kmalloc(esrt_data_size, GFP_KERNEL);
+- if (!esrt) {
+- pr_err("kmalloc failed. (wanted %zu bytes)\n", esrt_data_size);
+- iounmap(ioesrt);
+- return -ENOMEM;
+- }
+-
+- memcpy_fromio(esrt, ioesrt, esrt_data_size);
+-
+ esrt_kobj = kobject_create_and_add("esrt", efi_kobj);
+ if (!esrt_kobj) {
+ pr_err("Firmware table registration failed.\n");
+@@ -429,8 +419,6 @@ static int __init esrt_sysfs_init(void)
+ if (error)
+ goto err_cleanup_list;
+
+- memblock_remove(esrt_data, esrt_data_size);
+-
+ pr_debug("esrt-sysfs: loaded.\n");
+
+ return 0;
+--
+2.11.0
+
diff --git a/patches.arch/qcom-0045-x86-efi-bgrt-Use-efi_mem_reserve-to-avoid-copying-im.patch b/patches.arch/qcom-0045-x86-efi-bgrt-Use-efi_mem_reserve-to-avoid-copying-im.patch
new file mode 100644
index 0000000000..507a0640da
--- /dev/null
+++ b/patches.arch/qcom-0045-x86-efi-bgrt-Use-efi_mem_reserve-to-avoid-copying-im.patch
@@ -0,0 +1,61 @@
+From: Matt Fleming <matt@codeblueprint.co.uk>
+Date: Thu, 23 Jun 2016 11:36:32 +0100
+Subject: x86/efi-bgrt: Use efi_mem_reserve() to avoid copying image data
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Git-commit: 4bc9f92e64c81192dcca1c495354bcc7c3b43e7d
+Patch-mainline: v4.9-rc1
+References: fate#320512
+
+efi_mem_reserve() allows us to permanently mark EFI boot services
+regions as reserved, which means we no longer need to copy the image
+data out and into a separate buffer.
+
+Leaving the data in the original boot services region has the added
+benefit that BGRT images can now be passed across kexec reboot.
+
+Reviewed-by: Josh Triplett <josh@joshtriplett.org>
+Tested-by: Dave Young <dyoung@redhat.com> [kexec/kdump]
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> [arm]
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Peter Jones <pjones@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Josh Boyer <jwboyer@fedoraproject.org>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: MĂ´she van der Sterre <me@moshe.nl>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/platform/efi/efi-bgrt.c | 13 ++-----------
+ 1 file changed, 2 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/platform/efi/efi-bgrt.c
++++ b/arch/x86/platform/efi/efi-bgrt.c
+@@ -79,21 +79,12 @@ void __init efi_bgrt_init(void)
+ early_memunmap(image, sizeof(bmp_header));
+ bgrt_image_size = bmp_header.size;
+
+- bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN);
++ bgrt_image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB);
+ if (!bgrt_image) {
+- pr_err("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n",
+- bgrt_image_size);
+- return;
+- }
+-
+- image = early_memremap(bgrt_tab->image_address, bmp_header.size);
+- if (!image) {
+ pr_err("Ignoring BGRT: failed to map image memory\n");
+- kfree(bgrt_image);
+ bgrt_image = NULL;
+ return;
+ }
+
+- memcpy(bgrt_image, image, bgrt_image_size);
+- early_memunmap(image, bmp_header.size);
++ efi_mem_reserve(bgrt_tab->image_address, bgrt_image_size);
+ }
diff --git a/patches.arch/qcom-0046-efi-esrt-Use-memremap-not-ioremap-to-access-ESRT-tab.patch b/patches.arch/qcom-0046-efi-esrt-Use-memremap-not-ioremap-to-access-ESRT-tab.patch
new file mode 100644
index 0000000000..b3a31aba60
--- /dev/null
+++ b/patches.arch/qcom-0046-efi-esrt-Use-memremap-not-ioremap-to-access-ESRT-tab.patch
@@ -0,0 +1,55 @@
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Mon, 11 Jul 2016 21:00:45 +0200
+Subject: efi/esrt: Use memremap not ioremap to access ESRT table in memory
+
+Git-commit: f58a37b2e01f91c23af457a7662f6b5a1e9f41e0
+Patch-mainline: v4.9-rc1
+References: fate#320512
+
+On ARM and arm64, ioremap() and memremap() are not interchangeable like
+on x86, and the use of ioremap() on ordinary RAM is typically flagged
+as an error if the memory region being mapped is also covered by the
+linear mapping, since that would lead to aliases with conflicting
+cacheability attributes.
+
+Since what we are dealing with is not an I/O region with side effects,
+using ioremap() here is arguably incorrect anyway, so let's replace
+it with memremap() instead.
+
+Acked-by: Peter Jones <pjones@redhat.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/esrt.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
+index b93cd11f9bcc..14914074f716 100644
+--- a/drivers/firmware/efi/esrt.c
++++ b/drivers/firmware/efi/esrt.c
+@@ -16,6 +16,7 @@
+ #include <linux/device.h>
+ #include <linux/efi.h>
+ #include <linux/init.h>
++#include <linux/io.h>
+ #include <linux/kernel.h>
+ #include <linux/kobject.h>
+ #include <linux/list.h>
+@@ -387,9 +388,9 @@ static int __init esrt_sysfs_init(void)
+ if (!esrt_data || !esrt_data_size)
+ return -ENOSYS;
+
+- esrt = ioremap(esrt_data, esrt_data_size);
++ esrt = memremap(esrt_data, esrt_data_size, MEMREMAP_WB);
+ if (!esrt) {
+- pr_err("ioremap(%pa, %zu) failed.\n", &esrt_data,
++ pr_err("memremap(%pa, %zu) failed.\n", &esrt_data,
+ esrt_data_size);
+ return -ENOMEM;
+ }
+--
+2.11.0
+
diff --git a/patches.arch/qcom-0047-efi-arm-esrt-Add-missing-call-to-efi_esrt_init.patch b/patches.arch/qcom-0047-efi-arm-esrt-Add-missing-call-to-efi_esrt_init.patch
new file mode 100644
index 0000000000..40144eaf5c
--- /dev/null
+++ b/patches.arch/qcom-0047-efi-arm-esrt-Add-missing-call-to-efi_esrt_init.patch
@@ -0,0 +1,32 @@
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Mon, 11 Jul 2016 21:00:46 +0200
+Subject: efi/arm*: esrt: Add missing call to efi_esrt_init()
+
+Git-commit: 2ead3084e3fc37d42f379cca8753b458d8f9ba25
+Patch-mainline: v4.9-rc1
+References: fate#320512
+
+ESRT support is built by default for all architectures that define
+CONFIG_EFI. However, this support was not wired up yet for ARM/arm64,
+since efi_esrt_init() was never called. So add the missing call.
+
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Leif Lindholm <leif.lindholm@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Peter Jones <pjones@redhat.com>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ drivers/firmware/efi/arm-init.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/firmware/efi/arm-init.c
++++ b/drivers/firmware/efi/arm-init.c
+@@ -271,6 +271,7 @@ void __init efi_init_fdt(void *fdt)
+
+ reserve_regions();
+ efi_memattr_init();
++ efi_esrt_init();
+ efi_memmap_unmap();
+ memblock_reserve(params.mmap & PAGE_MASK,
+ PAGE_ALIGN(params.mmap_size +
diff --git a/patches.arch/qcom-0048-efi-x86-Prune-invalid-memory-map-entries-and-fix-boo.patch b/patches.arch/qcom-0048-efi-x86-Prune-invalid-memory-map-entries-and-fix-boo.patch
new file mode 100644
index 0000000000..cb023ad045
--- /dev/null
+++ b/patches.arch/qcom-0048-efi-x86-Prune-invalid-memory-map-entries-and-fix-boo.patch
@@ -0,0 +1,148 @@
+From: Peter Jones <pjones@redhat.com>
+Date: Mon, 12 Dec 2016 18:42:28 -0500
+Subject: efi/x86: Prune invalid memory map entries and fix boot regression
+Git-commit: 0100a3e67a9cef64d72cd3a1da86f3ddbee50363
+Patch-mainline: v4.10-rc4
+References: fate#320512
+
+Some machines, such as the Lenovo ThinkPad W541 with firmware GNET80WW
+(2.28), include memory map entries with phys_addr=0x0 and num_pages=0.
+
+These machines fail to boot after the following commit,
+
+ commit 8e80632fb23f ("efi/esrt: Use efi_mem_reserve() and avoid a kmalloc()")
+
+Fix this by removing such bogus entries from the memory map.
+
+Furthermore, currently the log output for this case (with efi=debug)
+looks like:
+
+ [ 0.000000] efi: mem45: [Reserved | | | | | | | | | | | | ] range=[0x0000000000000000-0xffffffffffffffff] (0MB)
+
+This is clearly wrong, and also not as informative as it could be. This
+patch changes it so that if we find obviously invalid memory map
+entries, we print an error and skip those entries. It also detects the
+display of the address range calculation overflow, so the new output is:
+
+ [ 0.000000] efi: [Firmware Bug]: Invalid EFI memory map entries:
+ [ 0.000000] efi: mem45: [Reserved | | | | | | | | | | | | ] range=[0x0000000000000000-0x0000000000000000] (invalid)
+
+It also detects memory map sizes that would overflow the physical
+address, for example phys_addr=0xfffffffffffff000 and
+num_pages=0x0200000000000001, and prints:
+
+ [ 0.000000] efi: [Firmware Bug]: Invalid EFI memory map entries:
+ [ 0.000000] efi: mem45: [Reserved | | | | | | | | | | | | ] range=[phys_addr=0xfffffffffffff000-0x20ffffffffffffffff] (invalid)
+
+It then removes these entries from the memory map.
+
+Signed-off-by: Peter Jones <pjones@redhat.com>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+[ardb: refactor for clarity with no functional changes, avoid PAGE_SHIFT]
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+[Matt: Include bugzilla info in commit log]
+Cc: <stable@vger.kernel.org> # v4.9+
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=191121
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/platform/efi/efi.c | 66 ++++++++++++++++++++++++++++++++++++++++++++
+ include/linux/efi.h | 1
+ 2 files changed, 67 insertions(+)
+
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -224,6 +224,70 @@ int __init efi_memblock_x86_reserve_rang
+ return 0;
+ }
+
++#define OVERFLOW_ADDR_SHIFT (64 - EFI_PAGE_SHIFT)
++#define OVERFLOW_ADDR_MASK (U64_MAX << OVERFLOW_ADDR_SHIFT)
++#define U64_HIGH_BIT (~(U64_MAX >> 1))
++
++static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
++{
++ u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
++ u64 end_hi = 0;
++ char buf[64];
++
++ if (md->num_pages == 0) {
++ end = 0;
++ } else if (md->num_pages > EFI_PAGES_MAX ||
++ EFI_PAGES_MAX - md->num_pages <
++ (md->phys_addr >> EFI_PAGE_SHIFT)) {
++ end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
++ >> OVERFLOW_ADDR_SHIFT;
++
++ if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
++ end_hi += 1;
++ } else {
++ return true;
++ }
++
++ pr_warn_once(FW_BUG "Invalid EFI memory map entries:\n");
++
++ if (end_hi) {
++ pr_warn("mem%02u: %s range=[0x%016llx-0x%llx%016llx] (invalid)\n",
++ i, efi_md_typeattr_format(buf, sizeof(buf), md),
++ md->phys_addr, end_hi, end);
++ } else {
++ pr_warn("mem%02u: %s range=[0x%016llx-0x%016llx] (invalid)\n",
++ i, efi_md_typeattr_format(buf, sizeof(buf), md),
++ md->phys_addr, end);
++ }
++ return false;
++}
++
++static void __init efi_clean_memmap(void)
++{
++ efi_memory_desc_t *out = efi.memmap.map;
++ const efi_memory_desc_t *in = out;
++ const efi_memory_desc_t *end = efi.memmap.map_end;
++ int i, n_removal;
++
++ for (i = n_removal = 0; in < end; i++) {
++ if (efi_memmap_entry_valid(in, i)) {
++ if (out != in)
++ memcpy(out, in, efi.memmap.desc_size);
++ out = (void *)out + efi.memmap.desc_size;
++ } else {
++ n_removal++;
++ }
++ in = (void *)in + efi.memmap.desc_size;
++ }
++
++ if (n_removal > 0) {
++ u64 size = efi.memmap.nr_map - n_removal;
++
++ pr_warn("Removing %d invalid memory map entries.\n", n_removal);
++ efi_memmap_install(efi.memmap.phys_map, size);
++ }
++}
++
+ void __init efi_print_memmap(void)
+ {
+ #ifdef EFI_DEBUG
+@@ -490,6 +554,8 @@ void __init efi_init(void)
+ }
+ }
+
++ efi_clean_memmap();
++
+ if (efi_enabled(EFI_DBG))
+ efi_print_memmap();
+
+--- a/include/linux/efi.h
++++ b/include/linux/efi.h
+@@ -130,6 +130,7 @@ typedef struct {
+
+ #define EFI_PAGE_SHIFT 12
+ #define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT)
++#define EFI_PAGES_MAX (U64_MAX >> EFI_PAGE_SHIFT)
+
+ typedef struct {
+ u32 type;
diff --git a/patches.arch/qcom-0049-x86-efi-Defer-efi_esrt_init-until-after-memblock_x86.patch b/patches.arch/qcom-0049-x86-efi-Defer-efi_esrt_init-until-after-memblock_x86.patch
new file mode 100644
index 0000000000..5815a7c503
--- /dev/null
+++ b/patches.arch/qcom-0049-x86-efi-Defer-efi_esrt_init-until-after-memblock_x86.patch
@@ -0,0 +1,98 @@
+From: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+Date: Tue, 16 Aug 2016 17:32:31 -0700
+Subject: x86/efi: Defer efi_esrt_init until after memblock_x86_fill
+Git-commit: 3dad6f7f6975387f53f1a772f29f54335563d93d
+Patch-mainline: v4.9-rc1
+References: fate#320512
+
+Commit 7b02d53e7852 ("efi: Allow drivers to reserve boot services forever")
+introduced a new efi_mem_reserve to reserve the boot services memory
+regions forever. This reservation involves allocating a new EFI memory
+range descriptor. However, allocation can only succeed if there is memory
+available for the allocation. Otherwise, error such as the following may
+occur:
+
+esrt: Reserving ESRT space from 0x000000003dd6a000 to 0x000000003dd6a010.
+Kernel panic - not syncing: ERROR: Failed to allocate 0x9f0 bytes below \
+ 0x0.
+CPU: 0 PID: 0 Comm: swapper Not tainted 4.7.0-rc5+ #503
+ 0000000000000000 ffffffff81e03ce0 ffffffff8131dae8 ffffffff81bb6c50
+ ffffffff81e03d70 ffffffff81e03d60 ffffffff8111f4df 0000000000000018
+ ffffffff81e03d70 ffffffff81e03d08 00000000000009f0 00000000000009f0
+Call Trace:
+ [<ffffffff8131dae8>] dump_stack+0x4d/0x65
+ [<ffffffff8111f4df>] panic+0xc5/0x206
+ [<ffffffff81f7c6d3>] memblock_alloc_base+0x29/0x2e
+ [<ffffffff81f7c6e3>] memblock_alloc+0xb/0xd
+ [<ffffffff81f6c86d>] efi_arch_mem_reserve+0xbc/0x134
+ [<ffffffff81fa3280>] efi_mem_reserve+0x2c/0x31
+ [<ffffffff81fa3280>] ? efi_mem_reserve+0x2c/0x31
+ [<ffffffff81fa40d3>] efi_esrt_init+0x19e/0x1b4
+ [<ffffffff81f6d2dd>] efi_init+0x398/0x44a
+ [<ffffffff81f5c782>] setup_arch+0x415/0xc30
+ [<ffffffff81f55af1>] start_kernel+0x5b/0x3ef
+ [<ffffffff81f55434>] x86_64_start_reservations+0x2f/0x31
+ [<ffffffff81f55520>] x86_64_start_kernel+0xea/0xed
+---[ end Kernel panic - not syncing: ERROR: Failed to allocate 0x9f0
+ bytes below 0x0.
+
+An inspection of the memblock configuration reveals that there is no memory
+available for the allocation:
+
+MEMBLOCK configuration:
+ memory size = 0x0 reserved size = 0x4f339c0
+ memory.cnt = 0x1
+ memory[0x0] [0x00000000000000-0xffffffffffffffff], 0x0 bytes on node 0\
+ flags: 0x0
+ reserved.cnt = 0x4
+ reserved[0x0] [0x0000000008c000-0x0000000008c9bf], 0x9c0 bytes flags: 0x0
+ reserved[0x1] [0x0000000009f000-0x000000000fffff], 0x61000 bytes\
+ flags: 0x0
+ reserved[0x2] [0x00000002800000-0x0000000394bfff], 0x114c000 bytes\
+ flags: 0x0
+ reserved[0x3] [0x000000304e4000-0x00000034269fff], 0x3d86000 bytes\
+ flags: 0x0
+
+This situation can be avoided if we call efi_esrt_init after memblock has
+memory regions for the allocation.
+
+Also, the EFI ESRT driver makes use of early_memremap'pings. Therfore, we
+do not want to defer efi_esrt_init for too long. We must call such function
+while calls to early_memremap are still valid.
+
+A good place to meet the two aforementioned conditions is right after
+memblock_x86_fill, grouped with other EFI-related functions.
+
+Reported-by: Scott Lawson <scott.lawson@intel.com>
+Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Peter Jones <pjones@redhat.com>
+Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/kernel/setup.c | 2 ++
+ arch/x86/platform/efi/efi.c | 2 --
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -1111,6 +1111,8 @@ void __init setup_arch(char **cmdline_p)
+ if (efi_enabled(EFI_MEMMAP)) {
+ efi_fake_memmap();
+ efi_find_mirror();
++ efi_esrt_init();
++
+ /*
+ * The EFI specification says that boot service code won't be
+ * called after ExitBootServices(). This is, in fact, a lie.
+--- a/arch/x86/platform/efi/efi.c
++++ b/arch/x86/platform/efi/efi.c
+@@ -558,8 +558,6 @@ void __init efi_init(void)
+
+ if (efi_enabled(EFI_DBG))
+ efi_print_memmap();
+-
+- efi_esrt_init();
+ }
+
+ void __init efi_late_init(void)
diff --git a/patches.arch/qcom-0050-x86-efi-Don-t-allocate-memmap-through-memblock-after.patch b/patches.arch/qcom-0050-x86-efi-Don-t-allocate-memmap-through-memblock-after.patch
new file mode 100644
index 0000000000..15a31891a1
--- /dev/null
+++ b/patches.arch/qcom-0050-x86-efi-Don-t-allocate-memmap-through-memblock-after.patch
@@ -0,0 +1,172 @@
+From: Nicolai Stange <nicstange@gmail.com>
+Date: Thu, 5 Jan 2017 13:51:29 +0100
+Subject: x86/efi: Don't allocate memmap through memblock after mm_init()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Git-commit: 20b1e22d01a4b0b11d3a1066e9feb04be38607ec
+Patch-mainline: v4.10-rc4
+References: fate#320512
+
+With the following commit:
+
+ 4bc9f92e64c8 ("x86/efi-bgrt: Use efi_mem_reserve() to avoid copying image data")
+
+... efi_bgrt_init() calls into the memblock allocator through
+efi_mem_reserve() => efi_arch_mem_reserve() *after* mm_init() has been called.
+
+Indeed, KASAN reports a bad read access later on in efi_free_boot_services():
+
+ BUG: KASAN: use-after-free in efi_free_boot_services+0xae/0x24c
+ at addr ffff88022de12740
+ Read of size 4 by task swapper/0/0
+ page:ffffea0008b78480 count:0 mapcount:-127
+ mapping: (null) index:0x1 flags: 0x5fff8000000000()
+ [...]
+ Call Trace:
+ dump_stack+0x68/0x9f
+ kasan_report_error+0x4c8/0x500
+ kasan_report+0x58/0x60
+ __asan_load4+0x61/0x80
+ efi_free_boot_services+0xae/0x24c
+ start_kernel+0x527/0x562
+ x86_64_start_reservations+0x24/0x26
+ x86_64_start_kernel+0x157/0x17a
+ start_cpu+0x5/0x14
+
+The instruction at the given address is the first read from the memmap's
+memory, i.e. the read of md->type in efi_free_boot_services().
+
+Note that the writes earlier in efi_arch_mem_reserve() don't splat because
+they're done through early_memremap()ed addresses.
+
+So, after memblock is gone, allocations should be done through the "normal"
+page allocator. Introduce a helper, efi_memmap_alloc() for this. Use
+it from efi_arch_mem_reserve(), efi_free_boot_services() and, for the sake
+of consistency, from efi_fake_memmap() as well.
+
+Note that for the latter, the memmap allocations cease to be page aligned.
+This isn't needed though.
+
+Tested-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Nicolai Stange <nicstange@gmail.com>
+Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: <stable@vger.kernel.org> # v4.9
+Cc: Dave Young <dyoung@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Mika Penttilä <mika.penttila@nextfour.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-efi@vger.kernel.org
+Fixes: 4bc9f92e64c8 ("x86/efi-bgrt: Use efi_mem_reserve() to avoid copying image data")
+Link: http://lkml.kernel.org/r/20170105125130.2815-1-nicstange@gmail.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Matthias Brugger <mbrugger@suse.com>
+---
+ arch/x86/platform/efi/quirks.c | 4 ++--
+ drivers/firmware/efi/fake_mem.c | 3 +--
+ drivers/firmware/efi/memmap.c | 38 ++++++++++++++++++++++++++++++++++++++
+ include/linux/efi.h | 1 +
+ 4 files changed, 42 insertions(+), 4 deletions(-)
+
+diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
+index 10aca63..30031d5 100644
+--- a/arch/x86/platform/efi/quirks.c
++++ b/arch/x86/platform/efi/quirks.c
+@@ -214,7 +214,7 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
+
+ new_size = efi.memmap.desc_size * num_entries;
+
+- new_phys = memblock_alloc(new_size, 0);
++ new_phys = efi_memmap_alloc(num_entries);
+ if (!new_phys) {
+ pr_err("Could not allocate boot services memmap\n");
+ return;
+@@ -355,7 +355,7 @@ void __init efi_free_boot_services(void)
+ }
+
+ new_size = efi.memmap.desc_size * num_entries;
+- new_phys = memblock_alloc(new_size, 0);
++ new_phys = efi_memmap_alloc(num_entries);
+ if (!new_phys) {
+ pr_err("Failed to allocate new EFI memmap\n");
+ return;
+diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
+index 520a40e..6c7d60c 100644
+--- a/drivers/firmware/efi/fake_mem.c
++++ b/drivers/firmware/efi/fake_mem.c
+@@ -71,8 +71,7 @@ void __init efi_fake_memmap(void)
+ }
+
+ /* allocate memory for new EFI memmap */
+- new_memmap_phy = memblock_alloc(efi.memmap.desc_size * new_nr_map,
+- PAGE_SIZE);
++ new_memmap_phy = efi_memmap_alloc(new_nr_map);
+ if (!new_memmap_phy)
+ return;
+
+diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
+index f03ddec..7868644 100644
+--- a/drivers/firmware/efi/memmap.c
++++ b/drivers/firmware/efi/memmap.c
+@@ -9,6 +9,44 @@
+ #include <linux/efi.h>
+ #include <linux/io.h>
+ #include <asm/early_ioremap.h>
++#include <linux/memblock.h>
++#include <linux/slab.h>
++
++static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
++{
++ return memblock_alloc(size, 0);
++}
++