Home Home > GIT Browse > openSUSE-15.1
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKernel Build Daemon <kbuild@suse.de>2019-06-08 07:08:06 +0200
committerKernel Build Daemon <kbuild@suse.de>2019-06-08 07:08:06 +0200
commit543ddd7b33f8b63fd7e768d9c6caa7d5bdc6b835 (patch)
tree5f10683b12d96853e85a44f74cbd15b16190b0de
parent28fa3e9fd1ef9a058b53b2a5a323b23a09e6bba9 (diff)
parent94a6e53ab4f10f2ade8edf8033aa0f40d1906af8 (diff)
Merge branch 'SLE15-SP1' into openSUSE-15.1
-rw-r--r--blacklist.conf19
-rw-r--r--config/arm64/default1
-rw-r--r--kabi/severities13
-rw-r--r--patches.arch/0002-efi-arm-libstub-add-a-root-memreserve-config-table.patch2
-rw-r--r--patches.arch/0003-efi-add-API-to-reserve-memory-persistently-across-ke.patch2
-rw-r--r--patches.arch/ARM-iop-don-t-use-using-64-bit-DMA-masks.patch154
-rw-r--r--patches.arch/ARM-orion-don-t-use-using-64-bit-DMA-masks.patch53
-rw-r--r--patches.arch/arm64-acpi-fix-alignment-fault-in-accessing-ACPI.patch2
-rw-r--r--patches.arch/crypto-vmx-ghash-do-nosimd-fallback-manually.patch312
-rw-r--r--patches.arch/crypto-vmx-return-correct-error-code-on-failed-setke.patch112
-rw-r--r--patches.arch/efi-arm-Don-t-mark-ACPI-reclaim-memory-as-MEMBLOCK_N.patch2
-rw-r--r--patches.arch/x86-cpu-hygon-fix-phys_proc_id-calculation-logic-for-multi-die-processors.patch122
-rw-r--r--patches.drivers/0001-crypto-qat-Remove-VLA-usage.patch49
-rw-r--r--patches.drivers/0001-crypto-qat-Remove-unused-goto-label.patch33
-rw-r--r--patches.drivers/0001-crypto-qat-move-temp-buffers-off-the-stack.patch192
-rw-r--r--patches.drivers/0001-crypto-qat-no-need-to-check-return-value-of-debugfs_.patch205
-rw-r--r--patches.drivers/0001-drivers-perf-Add-Cavium-ThunderX2-SoC-UNCORE-PMU-dri.patch928
-rw-r--r--patches.drivers/0001-efi-honour-memory-reservations-passed-via-a-linux-sp.patch (renamed from patches.arch/0001-efi-honour-memory-reservations-passed-via-a-linux-sp.patch)6
-rw-r--r--patches.drivers/0002-Documentation-perf-Add-documentation-for-ThunderX2-P.patch72
-rw-r--r--patches.drivers/ACPI-fix-menuconfig-presentation-of-ACPI-submenu.patch2
-rw-r--r--patches.drivers/ALSA-hda-realtek-Enable-micmute-LED-for-Huawei-lapto.patch66
-rw-r--r--patches.drivers/ALSA-hda-realtek-Improve-the-headset-mic-for-Acer-As.patch72
-rw-r--r--patches.drivers/ASoC-Intel-Add-machine-driver-for-Cherrytrail-CX2072119
-rw-r--r--patches.drivers/ASoC-Intel-add-support-for-CX2072x-machine-driver36
-rw-r--r--patches.drivers/ASoC-add-support-for-Conexant-CX2072X-CODEC72
-rw-r--r--patches.drivers/ASoC-cx2072x-Add-DT-bingings-documentation-for-CX20757
-rw-r--r--patches.drivers/ASoC-intel-Add-headset-jack-support-to-cht-cx2072x111
-rw-r--r--patches.drivers/IB-hfi1-Add-debugfs-to-control-expansion-ROM-write-p.patch118
-rw-r--r--patches.drivers/IB-hfi1-Add-selected-Rcv-counters.patch56
-rw-r--r--patches.drivers/IB-hfi1-Close-VNIC-sdma_progress-sleep-window.patch83
-rw-r--r--patches.drivers/IB-hfi1-Consider-LMC-in-16B-bypass-ingress-packet-ch.patch31
-rw-r--r--patches.drivers/IB-hfi1-Correctly-process-FECN-and-BECN-in-packets.patch459
-rw-r--r--patches.drivers/IB-hfi1-Dump-pio-info-for-non-user-send-contexts.patch142
-rw-r--r--patches.drivers/IB-hfi1-Eliminate-opcode-tests-on-mr-deref.patch47
-rw-r--r--patches.drivers/IB-hfi1-Failed-to-drain-send-queue-when-QP-is-put-in.patch58
-rw-r--r--patches.drivers/IB-hfi1-Fix-WQ_MEM_RECLAIM-warning.patch57
-rw-r--r--patches.drivers/IB-hfi1-Fix-the-allocation-of-RSM-table.patch96
-rw-r--r--patches.drivers/IB-hfi1-Fix-two-format-strings.patch57
-rw-r--r--patches.drivers/IB-hfi1-Ignore-LNI-errors-before-DC8051-transitions-.patch115
-rw-r--r--patches.drivers/IB-hfi1-Incorrect-sizing-of-sge-for-PIO-will-OOPs.patch53
-rw-r--r--patches.drivers/IB-hfi1-Limit-VNIC-use-of-SDMA-engines-to-the-availa.patch43
-rw-r--r--patches.drivers/IB-hfi1-Reduce-lock-contention-on-iowait_lock-for-sd.patch287
-rw-r--r--patches.drivers/IB-hfi1-Remove-WARN_ON-when-freeing-expected-receive.patch40
-rw-r--r--patches.drivers/IB-hfi1-Remove-overly-conservative-VM_EXEC-flag-chec.patch44
-rw-r--r--patches.drivers/IB-hfi1-Unreserve-a-reserved-request-when-it-is-comp.patch41
-rw-r--r--patches.drivers/IB-hw-Remove-unneeded-semicolons.patch102
-rw-r--r--patches.drivers/IB-rdmavt-Add-wc_flags-and-wc_immdata-to-cq-entry-tr.patch59
-rw-r--r--patches.drivers/IB-rdmavt-Fix-frwr-memory-registration.patch80
-rw-r--r--patches.drivers/IB-rdmavt-Fix-loopback-send-with-invalidate-ordering.patch73
-rw-r--r--patches.drivers/IB-rdmavt-hfi1-Miscellaneous-comment-fixes.patch40
-rw-r--r--patches.drivers/RDMA-cxbg-Use-correct-sizing-on-buffers-holding-page.patch46
-rw-r--r--patches.drivers/RDMA-cxgb4-Fix-null-pointer-dereference-on-alloc_skb.patch33
-rw-r--r--patches.drivers/RDMA-cxgb4-Fix-spelling-mistake-immedate-immediate.patch27
-rw-r--r--patches.drivers/RDMA-cxgb4-Remove-kref-accounting-for-sync-operation.patch112
-rw-r--r--patches.drivers/RDMA-hns-Add-SCC-context-allocation-support-for-hip0.patch447
-rw-r--r--patches.drivers/RDMA-hns-Add-SCC-context-clr-support-for-hip08.patch189
-rw-r--r--patches.drivers/RDMA-hns-Add-constraint-on-the-setting-of-local-ACK-.patch42
-rw-r--r--patches.drivers/RDMA-hns-Add-the-process-of-AEQ-overflow-for-hip08.patch47
-rw-r--r--patches.drivers/RDMA-hns-Add-timer-allocation-support-for-hip08.patch466
-rw-r--r--patches.drivers/RDMA-hns-Bugfix-for-SCC-hem-free.patch41
-rw-r--r--patches.drivers/RDMA-hns-Bugfix-for-mapping-user-db.patch41
-rw-r--r--patches.drivers/RDMA-hns-Bugfix-for-posting-multiple-srq-work-reques.patch142
-rw-r--r--patches.drivers/RDMA-hns-Bugfix-for-sending-with-invalidate.patch29
-rw-r--r--patches.drivers/RDMA-hns-Bugfix-for-set-hem-of-SCC.patch60
-rw-r--r--patches.drivers/RDMA-hns-Bugfix-for-the-scene-without-receiver-queue.patch29
-rw-r--r--patches.drivers/RDMA-hns-Configure-capacity-of-hns-device.patch38
-rw-r--r--patches.drivers/RDMA-hns-Delete-unused-variable-in-hns_roce_v2_modif.patch34
-rw-r--r--patches.drivers/RDMA-hns-Delete-useful-prints-for-aeq-subtype-event.patch91
-rw-r--r--patches.drivers/RDMA-hns-Fix-bad-endianess-of-port_pd-variable.patch36
-rw-r--r--patches.drivers/RDMA-hns-Fix-the-Oops-during-rmmod-or-insmod-ko-when.patch307
-rw-r--r--patches.drivers/RDMA-hns-Fix-the-bug-with-updating-rq-head-pointer-w.patch48
-rw-r--r--patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-door.patch168
-rw-r--r--patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-mail.patch285
-rw-r--r--patches.drivers/RDMA-hns-Fix-the-state-of-rereg-mr.patch29
-rw-r--r--patches.drivers/RDMA-hns-Hide-error-print-information-with-roce-vf-d.patch54
-rw-r--r--patches.drivers/RDMA-hns-Limit-minimum-ROCE-CQ-depth-to-64.patch27
-rw-r--r--patches.drivers/RDMA-hns-Limit-scope-of-hns_roce_cmq_send.patch34
-rw-r--r--patches.drivers/RDMA-hns-Make-some-function-static.patch60
-rw-r--r--patches.drivers/RDMA-hns-Modify-qp-cq-pd-specification-according-to-.patch48
-rw-r--r--patches.drivers/RDMA-hns-Modify-the-pbl-ba-page-size-for-hip08.patch28
-rw-r--r--patches.drivers/RDMA-hns-Move-spin_lock_irqsave-to-the-correct-place.patch92
-rw-r--r--patches.drivers/RDMA-hns-Only-assgin-some-fields-if-the-relatived-at.patch248
-rw-r--r--patches.drivers/RDMA-hns-Only-assign-the-fields-of-the-rq-psn-if-IB_.patch68
-rw-r--r--patches.drivers/RDMA-hns-Only-assign-the-relatived-fields-of-psn-if-.patch133
-rw-r--r--patches.drivers/RDMA-hns-RDMA-hns-Assign-rq-head-pointer-when-enable.patch62
-rw-r--r--patches.drivers/RDMA-hns-Remove-jiffies-operation-in-disable-interru.patch124
-rw-r--r--patches.drivers/RDMA-hns-Remove-set-but-not-used-variable-rst.patch41
-rw-r--r--patches.drivers/RDMA-hns-Set-allocated-memory-to-zero-for-wrid.patch33
-rw-r--r--patches.drivers/RDMA-hns-Support-to-create-1M-srq-queue.patch29
-rw-r--r--patches.drivers/RDMA-hns-Update-CQE-specifications.patch29
-rw-r--r--patches.drivers/RDMA-hns-Update-the-range-of-raq_psn-field-of-qp-con.patch29
-rw-r--r--patches.drivers/RDMA-i40iw-Handle-workqueue-allocation-failure.patch90
-rw-r--r--patches.drivers/RDMA-iw_cxgb4-Always-disconnect-when-QP-is-transitio.patch39
-rw-r--r--patches.drivers/RDMA-iwcm-add-tos_set-bool-to-iw_cm-struct.patch47
-rw-r--r--patches.drivers/RDMA-qedr-Fix-incorrect-device-rate.patch89
-rw-r--r--patches.drivers/RDMA-qedr-Fix-out-of-bounds-index-check-in-query-pke.patch30
-rw-r--r--patches.drivers/RDMA-rdmavt-Use-correct-sizing-on-buffers-holding-pa.patch36
-rw-r--r--patches.drivers/Revert-net-phy-marvell-avoid-pause-mode-on-SGMII-to-.patch6
-rw-r--r--patches.drivers/arm64-fix-ACPI-dependencies.patch2
-rw-r--r--patches.drivers/arm64-mm-efi-Account-for-GICv3-LPI-tables-in-static-.patch2
-rw-r--r--patches.drivers/bitmap-Add-bitmap_alloc-bitmap_zalloc-and-bitmap_fre.patch78
-rw-r--r--patches.drivers/bnx2x-Add-support-for-detection-of-P2P-event-packets.patch60
-rw-r--r--patches.drivers/bnx2x-Bump-up-driver-version-to-1.713.36.patch30
-rw-r--r--patches.drivers/bnx2x-Remove-set-but-not-used-variable-mfw_vn.patch47
-rw-r--r--patches.drivers/bnx2x-Replace-magic-numbers-with-macro-definitions.patch120
-rw-r--r--patches.drivers/bnx2x-Use-struct_size-in-kzalloc.patch54
-rw-r--r--patches.drivers/bnx2x-Utilize-FW-7.13.11.0.patch93
-rw-r--r--patches.drivers/bnx2x-fix-spelling-mistake-dicline-decline.patch27
-rw-r--r--patches.drivers/bnx2x-fix-various-indentation-issues.patch324
-rw-r--r--patches.drivers/bnxt_en-Fix-aggregation-buffer-leak-under-OOM-condit.patch35
-rw-r--r--patches.drivers/bnxt_en-Fix-possible-BUG-condition-when-calling-pci_.patch101
-rw-r--r--patches.drivers/bnxt_en-Fix-possible-crash-in-bnxt_hwrm_ring_free-un.patch65
-rw-r--r--patches.drivers/bnxt_en-Fix-statistics-context-reservation-logic.patch62
-rw-r--r--patches.drivers/bnxt_en-Fix-uninitialized-variable-usage-in-bnxt_rx_.patch53
-rw-r--r--patches.drivers/bnxt_en-Improve-NQ-reservations.patch40
-rw-r--r--patches.drivers/bnxt_en-Improve-multicast-address-setup-logic.patch41
-rw-r--r--patches.drivers/bnxt_en-Pass-correct-extended-TX-port-statistics-siz.patch51
-rw-r--r--patches.drivers/bnxt_en-Reduce-memory-usage-when-running-in-kdump-ke.patch61
-rw-r--r--patches.drivers/broadcom-Mark-expected-switch-fall-throughs.patch75
-rw-r--r--patches.drivers/chelsio-use-BUG-instead-of-BUG_ON-1.patch68
-rw-r--r--patches.drivers/crypto-chcr-ESN-for-Inline-IPSec-Tx.patch353
-rw-r--r--patches.drivers/crypto-chcr-small-packet-Tx-stalls-the-queue.patch34
-rw-r--r--patches.drivers/crypto-chelsio-Fix-NULL-pointer-dereference.patch33
-rw-r--r--patches.drivers/crypto-chelsio-Fix-passing-zero-to-PTR_ERR-warning-i.patch34
-rw-r--r--patches.drivers/crypto-chelsio-Fix-softlockup-with-heavy-I-O.patch59
-rw-r--r--patches.drivers/crypto-chelsio-Fix-wrong-error-counter-increments.patch82
-rw-r--r--patches.drivers/crypto-chelsio-Fixed-Traffic-Stall.patch132
-rw-r--r--patches.drivers/crypto-chelsio-Handle-PCI-shutdown-event.patch761
-rw-r--r--patches.drivers/crypto-chelsio-Inline-single-pdu-only.patch28
-rw-r--r--patches.drivers/crypto-chelsio-Reset-counters-on-cxgb4-Detach.patch36
-rw-r--r--patches.drivers/crypto-chelsio-Swap-location-of-AAD-and-IV-sent-in-W.patch537
-rw-r--r--patches.drivers/crypto-chelsio-Use-same-value-for-both-channel-in-si.patch91
-rw-r--r--patches.drivers/crypto-chelsio-avoid-using-sa_entry-imm.patch59
-rw-r--r--patches.drivers/crypto-chelsio-check-set_msg_len-overflow-in-generat.patch47
-rw-r--r--patches.drivers/crypto-chelsio-clean-up-various-indentation-issues.patch64
-rw-r--r--patches.drivers/crypto-chelsio-cleanup-send-addr-as-value-in-functio.patch83
-rw-r--r--patches.drivers/crypto-chelsio-count-incomplete-block-in-IV.patch31
-rw-r--r--patches.drivers/crypto-chelsio-remove-set-but-not-used-variable-kctx.patch42
-rw-r--r--patches.drivers/crypto-chelsio-remove-set-but-not-used-variables-ada.patch41
-rw-r--r--patches.drivers/crypto-chtls-remove-cdev_list_lock.patch30
-rw-r--r--patches.drivers/crypto-chtls-remove-set-but-not-used-variables-err-a.patch105
-rw-r--r--patches.drivers/cxgb4-Add-VF-Link-state-support.patch127
-rw-r--r--patches.drivers/cxgb4-Add-new-T6-PCI-device-ids-0x608b.patch24
-rw-r--r--patches.drivers/cxgb4-Delete-all-hash-and-TCAM-filters-before-resour.patch111
-rw-r--r--patches.drivers/cxgb4-Don-t-return-EAGAIN-when-TCAM-is-full.patch52
-rw-r--r--patches.drivers/cxgb4-Enable-outer-UDP-checksum-offload-for-T6.patch33
-rw-r--r--patches.drivers/cxgb4-Fix-error-path-in-cxgb4_init_module.patch80
-rw-r--r--patches.drivers/cxgb4-Revert-cxgb4-Remove-SGE_HOST_PAGE_SIZE-depende.patch44
-rw-r--r--patches.drivers/cxgb4-TLS-record-offload-enable.patch28
-rw-r--r--patches.drivers/cxgb4-Update-1.23.3.0-as-the-latest-firmware-support.patch52
-rw-r--r--patches.drivers/cxgb4-add-tcb-flags-and-tcb-rpl-struct.patch68
-rw-r--r--patches.drivers/cxgb4-chtls-Prefix-adapter-flags-with-CXGB4.patch828
-rw-r--r--patches.drivers/cxgb4-cxgb4vf-Display-advertised-FEC-in-ethtool.patch302
-rw-r--r--patches.drivers/cxgb4-cxgb4vf-Fix-up-netdev-hw_features.patch75
-rw-r--r--patches.drivers/cxgb4-cxgb4vf_main-Mark-expected-switch-fall-through.patch50
-rw-r--r--patches.drivers/cxgb4-free-mac_hlist-properly.patch42
-rw-r--r--patches.drivers/cxgb4-kfree-mhp-after-the-debug-print.patch32
-rw-r--r--patches.drivers/cxgb4-offload-VLAN-flows-regardless-of-VLAN-ethtype.patch42
-rw-r--r--patches.drivers/cxgb4-remove-DEFINE_SIMPLE_DEBUGFS_FILE.patch298
-rw-r--r--patches.drivers/cxgb4-remove-set-but-not-used-variables-multitrc-spe.patch63
-rw-r--r--patches.drivers/cxgb4vf-Call-netif_carrier_off-properly-in-pci_probe.patch35
-rw-r--r--patches.drivers/cxgb4vf-Enter-debugging-mode-if-FW-is-inaccessible.patch144
-rw-r--r--patches.drivers/cxgb4vf-Prefix-adapter-flags-with-CXGB4VF.patch297
-rw-r--r--patches.drivers/cxgb4vf-Revert-force-link-up-behaviour.patch57
-rw-r--r--patches.drivers/cxgb4vf-free-mac_hlist-properly.patch40
-rw-r--r--patches.drivers/drivers-acpi-add-dependency-of-EFI-for-arm64.patch (renamed from patches.arch/drivers-acpi-add-dependency-of-EFI-for-arm64.patch)2
-rw-r--r--patches.drivers/e1000e-Disable-runtime-PM-on-CNP.patch39
-rw-r--r--patches.drivers/e1000e-Exclude-device-from-suspend-direct-complete-o.patch36
-rw-r--r--patches.drivers/e1000e-fix-a-missing-check-for-return-value.patch74
-rw-r--r--patches.drivers/efi-Permit-calling-efi_mem_reserve_persistent-from-a.patch2
-rw-r--r--patches.drivers/efi-Permit-multiple-entries-in-persistent-memreserve.patch2
-rw-r--r--patches.drivers/efi-Prevent-GICv3-WARN-by-mapping-the-memreserve-tab.patch2
-rw-r--r--patches.drivers/efi-Reduce-the-amount-of-memblock-reservations-for-p.patch2
-rw-r--r--patches.drivers/efi-arm-Defer-persistent-reservations-until-after-pa.patch2
-rw-r--r--patches.drivers/efi-arm-Revert-Defer-persistent-reservations-until-a.patch2
-rw-r--r--patches.drivers/efi-arm-Revert-deferred-unmap-of-early-memmap-mappin.patch2
-rw-r--r--patches.drivers/efi-arm-map-UEFI-memory-map-even-w-o-runtime-service.patch (renamed from patches.arch/efi-arm-map-UEFI-memory-map-even-w-o-runtime-service.patch)2
-rw-r--r--patches.drivers/efi-arm-preserve-early-mapping-of-UEFI-memory-map-lo.patch (renamed from patches.arch/efi-arm-preserve-early-mapping-of-UEFI-memory-map-lo.patch)2
-rw-r--r--patches.drivers/fm10k-TRIVIAL-cleanup-of-extra-spacing-in-function-c.patch29
-rw-r--r--patches.drivers/fm10k-use-struct_size-in-kzalloc.patch55
-rw-r--r--patches.drivers/hid-core-move-usage-page-concatenation-to-main-item.patch145
-rw-r--r--patches.drivers/i40e-Able-to-add-up-to-16-MAC-filters-on-an-untruste.patch36
-rw-r--r--patches.drivers/i40e-Add-support-FEC-configuration-for-Fortville-25G.patch400
-rw-r--r--patches.drivers/i40e-Add-support-for-X710-B-P-SFP-cards.patch252
-rw-r--r--patches.drivers/i40e-Change-unmatched-function-types.patch56
-rw-r--r--patches.drivers/i40e-Changed-maximum-supported-FW-API-version-to-1.8.patch42
-rw-r--r--patches.drivers/i40e-Fix-for-10G-ports-LED-not-blinking.patch83
-rw-r--r--patches.drivers/i40e-Fix-for-allowing-too-many-MDD-events-on-VF.patch44
-rw-r--r--patches.drivers/i40e-Fix-misleading-error-message.patch30
-rw-r--r--patches.drivers/i40e-Fix-of-memory-leak-and-integer-truncation-in-i4.patch79
-rw-r--r--patches.drivers/i40e-Fix-the-typo-in-adding-40GE-KR4-mode.patch32
-rw-r--r--patches.drivers/i40e-Further-implementation-of-LLDP.patch281
-rw-r--r--patches.drivers/i40e-Implement-DDP-support-in-i40e-driver.patch953
-rw-r--r--patches.drivers/i40e-Introduce-recovery-mode-support.patch501
-rw-r--r--patches.drivers/i40e-Limiting-RSS-queues-to-CPUs.patch28
-rw-r--r--patches.drivers/i40e-Memory-leak-in-i40e_config_iwarp_qvlist.patch80
-rw-r--r--patches.drivers/i40e-Queues-are-reserved-despite-Invalid-argument-er.patch45
-rw-r--r--patches.drivers/i40e-Remove-misleading-messages-for-untrusted-VF.patch48
-rw-r--r--patches.drivers/i40e-Remove-umem-from-VSI.patch241
-rw-r--r--patches.drivers/i40e-Report-advertised-link-modes-on-40GBASE_SR4.patch43
-rw-r--r--patches.drivers/i40e-Report-advertised-link-modes-on-40GBase_LR4-CR4.patch59
-rw-r--r--patches.drivers/i40e-Revert-ShadowRAM-checksum-calculation-change.patch65
-rw-r--r--patches.drivers/i40e-Setting-VF-to-VLAN-0-requires-restart.patch36
-rw-r--r--patches.drivers/i40e-ShadowRAM-checksum-calculation-change.patch63
-rw-r--r--patches.drivers/i40e-The-driver-now-prints-the-API-version-in-error-.patch53
-rw-r--r--patches.drivers/i40e-Update-i40e_init_dcb-to-return-correct-error.patch111
-rw-r--r--patches.drivers/i40e-Use-struct_size-in-kzalloc.patch53
-rw-r--r--patches.drivers/i40e-VF-s-promiscuous-attribute-is-not-kept.patch74
-rw-r--r--patches.drivers/i40e-Wrong-truncation-from-u16-to-u8.patch32
-rw-r--r--patches.drivers/i40e-add-new-pci-id-for-X710-XXV710-N3000-cards.patch52
-rw-r--r--patches.drivers/i40e-add-num_vectors-checker-in-iwarp-handler.patch38
-rw-r--r--patches.drivers/i40e-add-tracking-of-AF_XDP-ZC-state-for-each-queue-.patch100
-rw-r--r--patches.drivers/i40e-change-behavior-on-PF-in-response-to-MDD-event.patch52
-rw-r--r--patches.drivers/i40e-check-queue-pairs-num-in-config-queues-handler.patch44
-rw-r--r--patches.drivers/i40e-clean-up-several-indentation-issues.patch63
-rw-r--r--patches.drivers/i40e-don-t-allow-changes-to-HW-VLAN-stripping-on-act.patch42
-rw-r--r--patches.drivers/i40e-fix-i40e_ptp_adjtime-when-given-a-negative-delt.patch52
-rw-r--r--patches.drivers/i40e-fix-misleading-message-about-promisc-setting-on.patch79
-rw-r--r--patches.drivers/i40e-increase-indentation.patch31
-rw-r--r--patches.drivers/i40e-missing-input-validation-on-VF-message-handling.patch168
-rw-r--r--patches.drivers/i40e-move-i40e_xsk_umem-function.patch74
-rw-r--r--patches.drivers/i40e-print-PCI-vendor-and-device-ID-during-probe.patch44
-rw-r--r--patches.drivers/i40e-remove-debugfs-tx_timeout-support.patch61
-rw-r--r--patches.drivers/i40e-remove-error-msg-when-vf-with-port-vlan-tries-t.patch32
-rw-r--r--patches.drivers/i40e-remove-out-of-range-comparisons-in-i40e_validat.patch54
-rw-r--r--patches.drivers/i40e-save-PTP-time-before-a-device-reset.patch171
-rw-r--r--patches.drivers/i40e-update-version-number-d1fc90a9.patch28
-rw-r--r--patches.drivers/i40e-update-version-number.patch28
-rw-r--r--patches.drivers/i40iw-Avoid-panic-when-handling-the-inetdev-event.patch72
-rw-r--r--patches.drivers/i40iw-remove-support-for-ib_get_vector_affinity.patch62
-rw-r--r--patches.drivers/i40iw-remove-use-of-VLAN_TAG_PRESENT.patch55
-rw-r--r--patches.drivers/ice-Add-52-byte-RSS-hash-key-support.patch60
-rw-r--r--patches.drivers/ice-Add-ability-to-update-rx-usecs-high.patch113
-rw-r--r--patches.drivers/ice-Add-code-for-DCB-initialization-part-1-4.patch366
-rw-r--r--patches.drivers/ice-Add-code-for-DCB-initialization-part-2-4.patch1190
-rw-r--r--patches.drivers/ice-Add-code-for-DCB-initialization-part-3-4.patch1409
-rw-r--r--patches.drivers/ice-Add-code-for-DCB-initialization-part-4-4.patch134
-rw-r--r--patches.drivers/ice-Add-code-for-DCB-rebuild.patch139
-rw-r--r--patches.drivers/ice-Add-code-to-control-FW-LLDP-and-DCBX.patch330
-rw-r--r--patches.drivers/ice-Add-code-to-get-DCB-related-statistics.patch191
-rw-r--r--patches.drivers/ice-Add-code-to-process-LLDP-MIB-change-events.patch127
-rw-r--r--patches.drivers/ice-Add-ethtool-private-flag-to-make-forcing-link-do.patch193
-rw-r--r--patches.drivers/ice-Add-ethtool-set_phys_id-handler.patch147
-rw-r--r--patches.drivers/ice-Add-function-to-program-ethertype-based-filter-r.patch201
-rw-r--r--patches.drivers/ice-Add-missing-PHY-type-to-link-settings.patch31
-rw-r--r--patches.drivers/ice-Add-missing-case-in-print_link_msg-for-printing-.patch33
-rw-r--r--patches.drivers/ice-Add-more-validation-in-ice_vc_cfg_irq_map_msg.patch179
-rw-r--r--patches.drivers/ice-Add-priority-information-into-VLAN-header.patch135
-rw-r--r--patches.drivers/ice-Add-reg_idx-variable-in-ice_q_vector-structure.patch297
-rw-r--r--patches.drivers/ice-Add-support-for-PF-VF-promiscuous-mode.patch802
-rw-r--r--patches.drivers/ice-Add-support-for-adaptive-interrupt-moderation.patch274
-rw-r--r--patches.drivers/ice-Add-support-for-new-PHY-types.patch737
-rw-r--r--patches.drivers/ice-Allow-for-software-timestamping.patch28
-rw-r--r--patches.drivers/ice-Always-free-allocate-q_vectors.patch129
-rw-r--r--patches.drivers/ice-Audit-hotpath-structures-with-pahole.patch123
-rw-r--r--patches.drivers/ice-Bump-driver-version.patch28
-rw-r--r--patches.drivers/ice-Bump-version.patch28
-rw-r--r--patches.drivers/ice-Calculate-ITR-increment-based-on-direct-calculat.patch199
-rw-r--r--patches.drivers/ice-Configure-RSS-LUT-and-HASH-KEY-in-rebuild-path.patch33
-rw-r--r--patches.drivers/ice-Create-a-generic-name-for-the-ice_rx_flg64_bits-.patch103
-rw-r--r--patches.drivers/ice-Create-framework-for-VSI-queue-context.patch473
-rw-r--r--patches.drivers/ice-Determine-descriptor-count-and-ring-size-based-o.patch155
-rw-r--r--patches.drivers/ice-Disable-sniffing-VF-traffic-on-PF.patch72
-rw-r--r--patches.drivers/ice-Do-not-bail-out-when-filter-already-exists.patch35
-rw-r--r--patches.drivers/ice-Do-not-set-LB_EN-for-prune-switch-rules.patch38
-rw-r--r--patches.drivers/ice-Do-not-unnecessarily-initialize-local-variable.patch33
-rw-r--r--patches.drivers/ice-Don-t-let-VF-know-that-it-is-untrusted.patch54
-rw-r--r--patches.drivers/ice-Don-t-remove-VLAN-filters-that-were-never-progra.patch59
-rw-r--r--patches.drivers/ice-Enable-LAN_EN-for-the-right-recipes.patch64
-rw-r--r--patches.drivers/ice-Enable-MAC-anti-spoof-by-default.patch44
-rw-r--r--patches.drivers/ice-Enable-link-events-over-the-ARQ.patch180
-rw-r--r--patches.drivers/ice-Ensure-only-valid-bits-are-set-in-ice_aq_set_phy.patch75
-rw-r--r--patches.drivers/ice-Fix-added-in-VSI-supported-nodes-calc.patch42
-rw-r--r--patches.drivers/ice-Fix-broadcast-traffic-in-port-VLAN-mode.patch149
-rw-r--r--patches.drivers/ice-Fix-for-FC-get-rx-tx-pause-params.patch73
-rw-r--r--patches.drivers/ice-Fix-for-adaptive-interrupt-moderation.patch386
-rw-r--r--patches.drivers/ice-Fix-for-allowing-too-many-MDD-events-on-VF.patch71
-rw-r--r--patches.drivers/ice-Fix-incorrect-use-of-abbreviations.patch2333
-rw-r--r--patches.drivers/ice-Fix-issue-reclaiming-resources-back-to-the-pool-.patch70
-rw-r--r--patches.drivers/ice-Fix-issue-reconfiguring-VF-queues.patch246
-rw-r--r--patches.drivers/ice-Fix-issue-when-adding-more-than-allowed-VLANs.patch87
-rw-r--r--patches.drivers/ice-Fix-issue-with-VF-attempt-to-delete-default-MAC-.patch81
-rw-r--r--patches.drivers/ice-Fix-issue-with-VF-reset-and-multiple-VFs-support.patch67
-rw-r--r--patches.drivers/ice-Fix-the-calculation-of-ICE_MAX_MTU.patch34
-rw-r--r--patches.drivers/ice-Fix-typos-in-code-comments.patch83
-rw-r--r--patches.drivers/ice-Gather-the-rx-buf-clean-up-logic-for-better-reus.patch173
-rw-r--r--patches.drivers/ice-Get-VF-VSI-instances-directly-via-PF.patch169
-rw-r--r--patches.drivers/ice-Get-resources-per-function.patch62
-rw-r--r--patches.drivers/ice-Get-rid-of-ice_pull_tail.patch131
-rw-r--r--patches.drivers/ice-Implement-flow-to-reset-VFs-with-PFR-and-other-r.patch59
-rw-r--r--patches.drivers/ice-Implement-getting-and-setting-ethtool-coalesce.patch334
-rw-r--r--patches.drivers/ice-Implement-pci_error_handler-ops.patch204
-rw-r--r--patches.drivers/ice-Implement-support-for-normal-get_eeprom-_len-eth.patch201
-rw-r--r--patches.drivers/ice-Introduce-bulk-update-for-page-count.patch127
-rw-r--r--patches.drivers/ice-Limit-the-ice_add_rx_frag-to-frag-addition.patch275
-rw-r--r--patches.drivers/ice-Move-aggregator-list-into-ice_hw-instance.patch141
-rw-r--r--patches.drivers/ice-Offload-SCTP-checksum.patch66
-rw-r--r--patches.drivers/ice-Preserve-VLAN-Rx-stripping-settings.patch33
-rw-r--r--patches.drivers/ice-Prevent-unintended-multiple-chain-resets.patch45
-rw-r--r--patches.drivers/ice-Pull-out-page-reuse-checks-onto-separate-functio.patch123
-rw-r--r--patches.drivers/ice-Put-__ICE_PREPARED_FOR_RESET-check-in-ice_prepar.patch47
-rw-r--r--patches.drivers/ice-Reduce-scope-of-variable-in-ice_vsi_cfg_rxqs.patch56
-rw-r--r--patches.drivers/ice-Refactor-a-few-Tx-scheduler-functions.patch253
-rw-r--r--patches.drivers/ice-Refactor-getting-setting-coalesce.patch233
-rw-r--r--patches.drivers/ice-Refactor-link-event-flow.patch211
-rw-r--r--patches.drivers/ice-Remove-2-BITS-comment.patch41
-rw-r--r--patches.drivers/ice-Remove-__always_unused-attribute.patch30
-rw-r--r--patches.drivers/ice-Remove-runtime-change-of-PFINT_OICR_ENA-register.patch94
-rw-r--r--patches.drivers/ice-Remove-unnecessary-braces.patch30
-rw-r--r--patches.drivers/ice-Remove-unnecessary-newlines-from-log-messages.patch42
-rw-r--r--patches.drivers/ice-Remove-unnecessary-wait-when-disabling-enabling-.patch47
-rw-r--r--patches.drivers/ice-Remove-unused-function-prototype-10c7e4c5.patch30
-rw-r--r--patches.drivers/ice-Remove-unused-function-prototype.patch46
-rw-r--r--patches.drivers/ice-Remove-unused-vsi_id-field.patch27
-rw-r--r--patches.drivers/ice-Reset-all-VFs-with-VFLR-during-SR-IOV-init-flow.patch35
-rw-r--r--patches.drivers/ice-Resolve-static-analysis-reported-issue.patch58
-rw-r--r--patches.drivers/ice-Restore-VLAN-switch-rule-if-port-VLAN-existed-be.patch33
-rw-r--r--patches.drivers/ice-Retrieve-rx_buf-in-separate-function.patch169
-rw-r--r--patches.drivers/ice-Return-configuration-error-without-queue-to-disa.patch46
-rw-r--r--patches.drivers/ice-Rework-queue-management-code-for-reuse.patch521
-rw-r--r--patches.drivers/ice-Separate-if-conditions-for-ice_set_features.patch46
-rw-r--r--patches.drivers/ice-Set-LAN_EN-for-all-directional-rules.patch50
-rw-r--r--patches.drivers/ice-Set-physical-link-up-down-when-an-interface-is-s.patch184
-rw-r--r--patches.drivers/ice-Suppress-false-positive-style-issues-reported-by.patch33
-rw-r--r--patches.drivers/ice-Update-comment-regarding-the-ITR_GRAN_S.patch30
-rw-r--r--patches.drivers/ice-Update-function-header-for-__ice_vsi_get_qs.patch34
-rw-r--r--patches.drivers/ice-Update-rings-based-on-TC-information.patch130
-rw-r--r--patches.drivers/ice-Use-bitfields-where-possible.patch53
-rw-r--r--patches.drivers/ice-Use-dev_err-when-ice_cfg_vsi_lan-fails.patch43
-rw-r--r--patches.drivers/ice-Use-ice_for_each_q_vector-macro-where-possible.patch101
-rw-r--r--patches.drivers/ice-Use-more-efficient-structures.patch42
-rw-r--r--patches.drivers/ice-Use-pf-instead-of-vsi-back.patch247
-rw-r--r--patches.drivers/ice-Validate-ring-existence-and-its-q_vector-per-VSI.patch33
-rw-r--r--patches.drivers/ice-add-and-use-new-ice_for_each_traffic_class-macro.patch76
-rw-r--r--patches.drivers/ice-add-const-qualifier-to-mac_addr-parameter.patch56
-rw-r--r--patches.drivers/ice-avoid-multiple-unnecessary-de-references-in-prob.patch128
-rw-r--r--patches.drivers/ice-change-VF-VSI-tc-info-along-with-num_queues.patch31
-rw-r--r--patches.drivers/ice-check-for-a-leaf-node-presence.patch71
-rw-r--r--patches.drivers/ice-clear-VF-ARQLEN-register-on-reset.patch52
-rw-r--r--patches.drivers/ice-code-cleanup-in-ice_sched.c.patch86
-rw-r--r--patches.drivers/ice-configure-GLINT_ITR-to-always-have-an-ITR-gran-o.patch137
-rw-r--r--patches.drivers/ice-don-t-spam-VFs-with-link-messages.patch36
-rw-r--r--patches.drivers/ice-enable-VF-admin-queue-interrupts.patch45
-rw-r--r--patches.drivers/ice-fix-ice_remove_rule_internal-vsi_list-handling.patch76
-rw-r--r--patches.drivers/ice-fix-issue-where-host-reboots-on-unload-when-iomm.patch141
-rw-r--r--patches.drivers/ice-fix-numeric-overflow-warning.patch55
-rw-r--r--patches.drivers/ice-fix-overlong-string-update-stats-output.patch117
-rw-r--r--patches.drivers/ice-fix-some-function-prototype-and-signature-style-.patch532
-rw-r--r--patches.drivers/ice-fix-stack-hogs-from-struct-ice_vsi_ctx-structure.patch375
-rw-r--r--patches.drivers/ice-fix-static-analysis-warnings.patch66
-rw-r--r--patches.drivers/ice-fix-the-divide-by-zero-issue.patch42
-rw-r--r--patches.drivers/ice-flush-Tx-pipe-on-disable-queue-timeout.patch72
-rw-r--r--patches.drivers/ice-map-Rx-buffer-pages-with-DMA-attributes.patch97
-rw-r--r--patches.drivers/ice-only-use-the-VF-for-ICE_VSI_VF-in-ice_vsi_releas.patch39
-rw-r--r--patches.drivers/ice-remove-redundant-variable-and-if-condition.patch46
-rw-r--r--patches.drivers/ice-sizeof-type-should-be-avoided.patch328
-rw-r--r--patches.drivers/ice-update-VSI-config-dynamically.patch134
-rw-r--r--patches.drivers/ice-use-absolute-vector-ID-for-VFs.patch42
-rw-r--r--patches.drivers/ice-use-ice_for_each_vsi-macro-when-possible.patch111
-rw-r--r--patches.drivers/ice-use-irq_num-var-in-ice_vsi_req_irq_msix.patch35
-rw-r--r--patches.drivers/ice-use-virt-channel-status-codes.patch929
-rw-r--r--patches.drivers/igb-Bump-version-number.patch29
-rw-r--r--patches.drivers/igb-Exclude-device-from-suspend-direct-complete-opti.patch37
-rw-r--r--patches.drivers/igb-Fix-WARN_ONCE-on-runtime-suspend.patch148
-rw-r--r--patches.drivers/igb-fix-various-indentation-issues.patch30
-rw-r--r--patches.drivers/igb-use-struct_size-helper.patch54
-rw-r--r--patches.drivers/igc-Add-ethtool-support.patch1420
-rw-r--r--patches.drivers/igc-Add-multiple-receive-queues-control-supporting.patch157
-rw-r--r--patches.drivers/igc-Add-support-for-statistics.patch472
-rw-r--r--patches.drivers/igc-Add-support-for-the-ntuple-feature.patch140
-rw-r--r--patches.drivers/igc-Extend-the-ethtool-supporting.patch947
-rw-r--r--patches.drivers/igc-Fix-code-redundancy.patch87
-rw-r--r--patches.drivers/igc-Fix-the-typo-in-igc_base.h-header-definition.patch30
-rw-r--r--patches.drivers/igc-Remove-the-igc_get_phy_id_base-method.patch52
-rw-r--r--patches.drivers/igc-Remove-the-igc_read_mac_addr_base-method.patch49
-rw-r--r--patches.drivers/igc-Remove-unneeded-code.patch57
-rw-r--r--patches.drivers/igc-Remove-unneeded-hw_dbg-prints.patch38
-rw-r--r--patches.drivers/igc-Remove-unreachable-code-from-igc_phy.c-file.patch46
-rw-r--r--patches.drivers/igc-Remove-unused-code.patch49
-rw-r--r--patches.drivers/igc-Use-struct_size-helper.patch65
-rw-r--r--patches.drivers/include-linux-bitops.h-introduce-BITS_PER_TYPE.patch47
-rw-r--r--patches.drivers/infiniband-hfi1-drop-crazy-DEBUGFS_SEQ_FILE_CREATE-m.patch152
-rw-r--r--patches.drivers/infiniband-hfi1-no-need-to-check-return-value-of-deb.patch105
-rw-r--r--patches.drivers/infiniband-qedr-Potential-null-ptr-dereference-of-qp.patch29
-rw-r--r--patches.drivers/intel-correct-return-from-set-features-callback.patch80
-rw-r--r--patches.drivers/iommu-arm-smmu-v3-Abort-all-transactions-if-SMMU-is-.patch2
-rw-r--r--patches.drivers/iommu-arm-smmu-v3-Don-t-disable-SMMU-in-kdump-kernel.patch2
-rw-r--r--patches.drivers/iw_cxgb-kzalloc-the-iwcm-verbs-struct.patch39
-rw-r--r--patches.drivers/iw_cxgb4-Check-for-send-WR-also-while-posting-write-.patch72
-rw-r--r--patches.drivers/iw_cxgb4-Make-function-read_tcb-static.patch32
-rw-r--r--patches.drivers/iw_cxgb4-complete-the-cached-SRQ-buffers.patch303
-rw-r--r--patches.drivers/iw_cxgb4-fix-srqidx-leak-during-connection-abort.patch55
-rw-r--r--patches.drivers/iw_cxgb4-use-listening-ep-tos-when-accepting-new-con.patch51
-rw-r--r--patches.drivers/iw_cxgb4-use-tos-when-finding-ipv6-routes.patch39
-rw-r--r--patches.drivers/iw_cxgb4-use-tos-when-importing-the-endpoint.patch28
-rw-r--r--patches.drivers/ixgbe-Use-struct_size-helper.patch66
-rw-r--r--patches.drivers/ixgbe-fix-mdio-bus-registration.patch58
-rw-r--r--patches.drivers/ixgbe-fix-older-devices-that-do-not-support-IXGBE_MR.patch36
-rw-r--r--patches.drivers/ixgbe-register-a-mdiobus.patch410
-rw-r--r--patches.drivers/ixgbe-remove-magic-constant-in-ixgbe_reset_hw_82599.patch31
-rw-r--r--patches.drivers/ixgbe-use-mii_bus-to-handle-MII-related-ioctls.patch59
-rw-r--r--patches.drivers/libcxgb-fix-incorrect-ppmax-calculation.patch46
-rw-r--r--patches.drivers/mmc-block-Delete-gendisk-before-cleaning-up-the-requ.patch93
-rw-r--r--patches.drivers/net-chelsio-Add-a-missing-check-on-cudg_get_buffer.patch31
-rw-r--r--patches.drivers/net-cxgb4-fix-various-indentation-issues.patch57
-rw-r--r--patches.drivers/net-hns3-Add-handling-of-MAC-tunnel-interruption.patch260
-rw-r--r--patches.drivers/net-hns3-Add-support-for-netif-message-level-setting.patch231
-rw-r--r--patches.drivers/net-hns3-Make-hclge_destroy_cmd_queue-static.patch30
-rw-r--r--patches.drivers/net-hns3-Make-hclgevf_update_link_mode-static.patch30
-rw-r--r--patches.drivers/net-hns3-add-counter-for-times-RX-pages-gets-allocat.patch58
-rw-r--r--patches.drivers/net-hns3-add-error-handler-for-initializing-command-.patch84
-rw-r--r--patches.drivers/net-hns3-add-function-type-check-for-debugfs-help-in.patch66
-rw-r--r--patches.drivers/net-hns3-add-hns3_gro_complete-for-HW-GRO-process.patch301
-rw-r--r--patches.drivers/net-hns3-add-linearizing-checking-for-TSO-case.patch92
-rw-r--r--patches.drivers/net-hns3-add-protect-when-handling-mac-addr-list.patch68
-rw-r--r--patches.drivers/net-hns3-add-queue-s-statistics-update-to-service-ta.patch66
-rw-r--r--patches.drivers/net-hns3-add-reset-statistics-for-VF.patch102
-rw-r--r--patches.drivers/net-hns3-add-reset-statistics-info-for-PF.patch175
-rw-r--r--patches.drivers/net-hns3-add-some-debug-info-for-hclgevf_get_mbx_res.patch44
-rw-r--r--patches.drivers/net-hns3-add-some-debug-information-for-hclge_check_.patch43
-rw-r--r--patches.drivers/net-hns3-add-support-for-dump-ncl-config-by-debugfs.patch132
-rw-r--r--patches.drivers/net-hns3-adjust-the-timing-of-hns3_client_stop-when-.patch37
-rw-r--r--patches.drivers/net-hns3-always-assume-no-drop-TC-for-performance-re.patch87
-rw-r--r--patches.drivers/net-hns3-check-1000M-half-for-hns3_ethtool_ops.set_l.patch32
-rw-r--r--patches.drivers/net-hns3-check-resetting-status-in-hns3_get_stats.patch34
-rw-r--r--patches.drivers/net-hns3-code-optimization-for-command-queue-spin-lo.patch59
-rw-r--r--patches.drivers/net-hns3-combine-len-and-checksum-handling-for-inner.patch280
-rw-r--r--patches.drivers/net-hns3-deactive-the-reset-timer-when-reset-success.patch28
-rw-r--r--patches.drivers/net-hns3-divide-shared-buffer-between-TC.patch81
-rw-r--r--patches.drivers/net-hns3-do-not-initialize-MDIO-bus-when-PHY-is-inex.patch45
-rw-r--r--patches.drivers/net-hns3-do-not-request-reset-when-hardware-resettin.patch47
-rw-r--r--patches.drivers/net-hns3-dump-more-information-when-tx-timeout-happe.patch166
-rw-r--r--patches.drivers/net-hns3-extend-the-loopback-state-acquisition-time.patch35
-rw-r--r--patches.drivers/net-hns3-fix-VLAN-offload-handle-for-VLAN-inserted-b.patch148
-rw-r--r--patches.drivers/net-hns3-fix-data-race-between-ring-next_to_clean.patch72
-rw-r--r--patches.drivers/net-hns3-fix-error-handling-for-desc-filling.patch94
-rw-r--r--patches.drivers/net-hns3-fix-for-HNS3_RXD_GRO_SIZE_M-macro.patch32
-rw-r--r--patches.drivers/net-hns3-fix-for-TX-clean-num-when-cleaning-TX-BD.patch49
-rw-r--r--patches.drivers/net-hns3-fix-for-tunnel-type-handling-in-hns3_rx_che.patch89
-rw-r--r--patches.drivers/net-hns3-fix-for-vport-bw_limit-overflow-problem.patch34
-rw-r--r--patches.drivers/net-hns3-fix-keep_alive_timer-not-stop-problem.patch51
-rw-r--r--patches.drivers/net-hns3-fix-loop-condition-of-hns3_get_tx_timeo_que.patch32
-rw-r--r--patches.drivers/net-hns3-fix-pause-configure-fail-problem.patch36
-rw-r--r--patches.drivers/net-hns3-fix-set-port-based-VLAN-for-PF.patch321
-rw-r--r--patches.drivers/net-hns3-fix-set-port-based-VLAN-issue-for-VF.patch263
-rw-r--r--patches.drivers/net-hns3-fix-sparse-warning-when-calling-hclge_set_v.patch46
-rw-r--r--patches.drivers/net-hns3-free-the-pending-skb-when-clean-RX-ring.patch38
-rw-r--r--patches.drivers/net-hns3-handle-pending-reset-while-reset-fail.patch30
-rw-r--r--patches.drivers/net-hns3-handle-the-BD-info-on-the-last-BD-of-the-pa.patch111
-rw-r--r--patches.drivers/net-hns3-ignore-lower-level-new-coming-reset.patch31
-rw-r--r--patches.drivers/net-hns3-minor-optimization-for-datapath.patch81
-rw-r--r--patches.drivers/net-hns3-minor-optimization-for-ring_space.patch48
-rw-r--r--patches.drivers/net-hns3-minor-refactor-for-hns3_rx_checksum.patch49
-rw-r--r--patches.drivers/net-hns3-modify-HNS3_NIC_STATE_INITED-flag-in-hns3_r.patch39
-rw-r--r--patches.drivers/net-hns3-modify-VLAN-initialization-to-be-compatible.patch195
-rw-r--r--patches.drivers/net-hns3-modify-the-VF-network-port-media-type-acqui.patch136
-rw-r--r--patches.drivers/net-hns3-not-reset-TQP-in-the-DOWN-while-VF-resettin.patch35
-rw-r--r--patches.drivers/net-hns3-not-reset-vport-who-not-alive-when-PF-reset.patch31
-rw-r--r--patches.drivers/net-hns3-optimize-the-barrier-using-when-cleaning-TX.patch71
-rw-r--r--patches.drivers/net-hns3-prevent-change-MTU-when-resetting.patch30
-rw-r--r--patches.drivers/net-hns3-prevent-double-free-in-hns3_put_ring_config.patch76
-rw-r--r--patches.drivers/net-hns3-reduce-resources-use-in-kdump-kernel.patch81
-rw-r--r--patches.drivers/net-hns3-refactor-BD-filling-for-l2l3l4-info.patch111
-rw-r--r--patches.drivers/net-hns3-refine-tx-timeout-count-handle.patch44
-rw-r--r--patches.drivers/net-hns3-remove-redundant-assignment-of-l2_hdr-to-it.patch30
-rw-r--r--patches.drivers/net-hns3-remove-reset-after-command-send-failed.patch68
-rw-r--r--patches.drivers/net-hns3-return-0-and-print-warning-when-hit-duplica.patch39
-rw-r--r--patches.drivers/net-hns3-set-dividual-reset-level-for-all-RAS-and-MS.patch1271
-rw-r--r--patches.drivers/net-hns3-set-up-the-vport-alive-state-while-reinitia.patch38
-rw-r--r--patches.drivers/net-hns3-set-vport-alive-state-to-default-while-rese.patch31
-rw-r--r--patches.drivers/net-hns3-simplify-hclgevf_cmd_csq_clean.patch72
-rw-r--r--patches.drivers/net-hns3-some-cleanup-for-struct-hns3_enet_ring.patch70
-rw-r--r--patches.drivers/net-hns3-split-function-hnae3_match_n_instantiate.patch128
-rw-r--r--patches.drivers/net-hns3-stop-mailbox-handling-when-command-queue-ne.patch31
-rw-r--r--patches.drivers/net-hns3-stop-sending-keep-alive-msg-when-VF-command.patch33
-rw-r--r--patches.drivers/net-hns3-unify-maybe_stop_tx-for-TSO-and-non-TSO-cas.patch259
-rw-r--r--patches.drivers/net-hns3-unify-the-page-reusing-for-page-size-4K-and.patch90
-rw-r--r--patches.drivers/net-hns3-use-a-reserved-byte-to-identify-need_resp-f.patch84
-rw-r--r--patches.drivers/net-hns3-use-atomic_t-replace-u32-for-arq-s-count.patch74
-rw-r--r--patches.drivers/net-hns3-use-devm_kcalloc-when-allocating-desc_cb.patch51
-rw-r--r--patches.drivers/net-hns3-use-napi_schedule_irqoff-in-hard-interrupts.patch33
-rw-r--r--patches.drivers/net-phy-marvell-Enable-interrupt-function-on-LED2-pi.patch63
-rw-r--r--patches.drivers/net-phy-marvell-add-new-default-led-configure-for-m8.patch75
-rw-r--r--patches.drivers/net-phy-marvell-change-default-m88e1510-LED-configur.patch123
-rw-r--r--patches.drivers/net-tls-free-ctx-in-sock-destruct.patch107
-rw-r--r--patches.drivers/nvme-rdma-fix-possible-free-of-a-non-allocated-async.patch5
-rw-r--r--patches.drivers/qed-Add-API-for-SmartAN-query.patch88
-rw-r--r--patches.drivers/qed-Add-iWARP-100g-support.patch111
-rw-r--r--patches.drivers/qed-Add-infrastructure-for-error-detection-and-recov.patch507
-rw-r--r--patches.drivers/qed-Add-llh-ppfid-interface-and-100g-support-for-off.patch1904
-rw-r--r--patches.drivers/qed-Add-qed-devlink-parameters-table.patch173
-rw-r--r--patches.drivers/qed-Change-hwfn-used-for-sb-initialization.patch163
-rw-r--r--patches.drivers/qed-Define-new-MF-bit-for-no_vlan-config.patch66
-rw-r--r--patches.drivers/qed-Delete-redundant-doorbell-recovery-types.patch174
-rw-r--r--patches.drivers/qed-Fix-iWARP-buffer-size-provided-for-syn-packet-pr.patch83
-rw-r--r--patches.drivers/qed-Fix-iWARP-syn-packet-mac-address-validation.patch37
-rw-r--r--patches.drivers/qed-Fix-missing-DORQ-attentions.patch103
-rw-r--r--patches.drivers/qed-Fix-static-checker-warning-8e2ea3ea.patch72
-rw-r--r--patches.drivers/qed-Fix-the-DORQ-s-attentions-handling.patch159
-rw-r--r--patches.drivers/qed-Fix-the-doorbell-address-sanity-check.patch69
-rw-r--r--patches.drivers/qed-Mark-expected-switch-fall-through.patch36
-rw-r--r--patches.drivers/qed-Modify-api-for-performing-a-dmae-to-another-PF.patch385
-rw-r--r--patches.drivers/qed-Modify-offload-protocols-to-use-the-affined-engi.patch926
-rw-r--r--patches.drivers/qed-Read-device-port-count-from-the-shmem.patch222
-rw-r--r--patches.drivers/qed-Reduce-the-severity-of-ptp-debug-message.patch32
-rw-r--r--patches.drivers/qed-Revise-load-sequence-to-avoid-PCI-errors.patch519
-rw-r--r--patches.drivers/qed-Set-the-doorbell-address-correctly.patch99
-rw-r--r--patches.drivers/qed-fix-indentation-issue-with-statements-in-an-if-b.patch38
-rw-r--r--patches.drivers/qed-fix-spelling-mistake-faspath-fastpath.patch28
-rw-r--r--patches.drivers/qed-fix-spelling-mistake-inculde-include.patch32
-rw-r--r--patches.drivers/qed-remove-duplicated-include-from-qed_if.h.patch27
-rw-r--r--patches.drivers/qed-remove-redundant-assignment-to-rc.patch31
-rw-r--r--patches.drivers/qede-Add-ethtool-interface-for-SmartAN-query.patch51
-rw-r--r--patches.drivers/qede-Error-recovery-process.patch636
-rw-r--r--patches.drivers/qede-Fix-internal-loopback-failure-with-jumbo-mtu-co.patch35
-rw-r--r--patches.drivers/qede-Handle-infinite-driver-spinning-for-Tx-timestam.patch159
-rw-r--r--patches.drivers/qede-Populate-mbi-version-in-ethtool-driver-query-da.patch63
-rw-r--r--patches.drivers/qede-fix-write-to-free-d-pointer-error-and-double-fr.patch48
-rw-r--r--patches.drivers/qede-place-ethtool_rx_flow_spec-after-code-after-TC-.patch306
-rw-r--r--patches.drivers/qedr-Change-the-MSI-X-vectors-selection-to-be-based-.patch89
-rw-r--r--patches.drivers/qlcnic-remove-assumption-that-vlan_tci-0.patch49
-rw-r--r--patches.drivers/qlcnic-remove-set-but-not-used-variables-cur_rings-m.patch67
-rw-r--r--patches.drivers/qlcnic-remove-set-but-not-used-variables-op-cmd_op.patch54
-rw-r--r--patches.drivers/rtc-da9063-set-uie_unsupported-when-relevant.patch44
-rw-r--r--patches.drivers/rtc-sh-Fix-invalid-alarm-warning-for-non-enabled-ala.patch48
-rw-r--r--patches.drivers/scsi-hisi_sas-add-host-reset-interface-for-test73
-rw-r--r--patches.drivers/scsi-hisi_sas-add-softreset-in-hisi_sas_i_t_nexus_reset45
-rw-r--r--patches.drivers/scsi-hisi_sas-adjust-the-printk-format-of-functions-hisi_sas_init_device39
-rw-r--r--patches.drivers/scsi-hisi_sas-allocate-different-sas-address-for-directly-attached-situation43
-rw-r--r--patches.drivers/scsi-hisi_sas-don-t-fail-it-nexus-reset-for-open-reject-timeout81
-rw-r--r--patches.drivers/scsi-hisi_sas-don-t-hard-reset-disk-during-controller-reset38
-rw-r--r--patches.drivers/scsi-hisi_sas-fix-for-setting-the-phy-linkrate-when-disconnected237
-rw-r--r--patches.drivers/scsi-hisi_sas-remedy-inconsistent-phy-down-state-in-software47
-rw-r--r--patches.drivers/scsi-hisi_sas-remove-the-check-of-sas_dev-status-in-hisi_sas_i_t_nexus_reset121
-rw-r--r--patches.drivers/scsi-hisi_sas-send-hard-reset-to-clear-the-previous-affiliation-of-stp-target-port172
-rw-r--r--patches.drivers/scsi-hisi_sas-set-phy-linkrate-when-disconnected79
-rw-r--r--patches.drivers/scsi-hisi_sas-some-misc-tidy-up315
-rw-r--r--patches.drivers/scsi-hisi_sas-support-all-ras-events-with-msi-interrupts611
-rw-r--r--patches.drivers/scsi-libsas-do-discovery-on-empty-phy-to-update-phy-info53
-rw-r--r--patches.drivers/scsi-libsas-improve-vague-log-in-sas-rediscovery90
-rw-r--r--patches.drivers/scsi-libsas-inject-revalidate-event-for-root-port-event110
-rw-r--r--patches.drivers/scsi-libsas-print-expander-phy-indexes-in-decimal255
-rw-r--r--patches.drivers/scsi-libsas-stop-hardcoding-sas-address-length152
-rw-r--r--patches.drivers/scsi-libsas-support-sata-phy-connection-rate-unmatch-fixing-during-discovery97
-rw-r--r--patches.drivers/scsi-libsas-try-to-retain-programmed-min-linkrate-for-sata-min-pathway-unmatch-fixing50
-rw-r--r--patches.drivers/scsi-qedf-fixup-bit-operations.patch77
-rw-r--r--patches.drivers/scsi-qedf-fixup-locking-in-qedf_restart_rport.patch42
-rw-r--r--patches.drivers/scsi-qedf-missing-kref_put-in-qedf_xmit.patch37
-rw-r--r--patches.drivers/scsi-qla2xxx-Add-28xx-flash-primary-secondary-status.patch873
-rw-r--r--patches.drivers/scsi-qla2xxx-Add-Device-ID-for-ISP28XX.patch1542
-rw-r--r--patches.drivers/scsi-qla2xxx-Add-First-Burst-support-for-FC-NVMe-dev.patch181
-rw-r--r--patches.drivers/scsi-qla2xxx-Add-Serdes-support-for-ISP28XX.patch374
-rw-r--r--patches.drivers/scsi-qla2xxx-Add-fw_attr-and-port_no-SysFS-node.patch76
-rw-r--r--patches.drivers/scsi-qla2xxx-Add-new-FW-dump-template-entry-types.patch535
-rw-r--r--patches.drivers/scsi-qla2xxx-Add-protection-mask-module-parameters.patch82
-rw-r--r--patches.drivers/scsi-qla2xxx-Add-support-for-multiple-fwdump-templat.patch862
-rw-r--r--patches.drivers/scsi-qla2xxx-Add-support-for-setting-port-speed.patch288
-rw-r--r--patches.drivers/scsi-qla2xxx-Avoid-PCI-IRQ-affinity-mapping-when-mul.patch80
-rw-r--r--patches.drivers/scsi-qla2xxx-Check-for-FW-started-flag-before-aborti.patch39
-rw-r--r--patches.drivers/scsi-qla2xxx-Cleanups-for-NVRAM-Flash-read-write-pat.patch1545
-rw-r--r--patches.drivers/scsi-qla2xxx-Correction-and-improvement-to-fwdt-proc.patch124
-rw-r--r--patches.drivers/scsi-qla2xxx-Correctly-report-max-min-supported-spee.patch309
-rw-r--r--patches.drivers/scsi-qla2xxx-Declare-local-functions-static.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-Fix-DMA-error-when-the-DIF-sg-buffer-cr.patch790
-rw-r--r--patches.drivers/scsi-qla2xxx-Fix-LUN-discovery-if-loop-id-is-not-ass.patch48
-rw-r--r--patches.drivers/scsi-qla2xxx-Fix-code-indentation-for-qla27xx_fwdt_e.patch80
-rw-r--r--patches.drivers/scsi-qla2xxx-Fix-panic-from-use-after-free-in-qla2x0.patch44
-rw-r--r--patches.drivers/scsi-qla2xxx-Fix-read-offset-in-qla24xx_load_risc_fl.patch235
-rw-r--r--patches.drivers/scsi-qla2xxx-Fix-routine-qla27xx_dump_-mpi-ram.patch259
-rw-r--r--patches.drivers/scsi-qla2xxx-Fix-unload-when-NVMe-devices-are-config.patch71
-rw-r--r--patches.drivers/scsi-qla2xxx-Improve-several-kernel-doc-headers.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-Introduce-a-switch-case-statement-in-ql.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-Make-qla2x00_sysfs_write_nvram-easier-t.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-Make-sure-that-qlafx00_ioctl_iosb_entry.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-Move-debug-messages-before-sending-srb-.patch305
-rw-r--r--patches.drivers/scsi-qla2xxx-Move-marker-request-behind-QPair.patch393
-rw-r--r--patches.drivers/scsi-qla2xxx-NULL-check-before-some-freeing-function.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-Prevent-SysFS-access-when-chip-is-down.patch35
-rw-r--r--patches.drivers/scsi-qla2xxx-Prevent-multiple-ADISC-commands-per-ses.patch38
-rw-r--r--patches.drivers/scsi-qla2xxx-Remove-FW-default-template.patch320
-rw-r--r--patches.drivers/scsi-qla2xxx-Remove-a-set-but-not-used-variable.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-Remove-two-arguments-from-qlafx00_error.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-Secure-flash-update-support-for-ISP28XX.patch1064
-rw-r--r--patches.drivers/scsi-qla2xxx-Set-remote-port-devloss-timeout-to-0.patch66
-rw-r--r--patches.drivers/scsi-qla2xxx-Simplification-of-register-address-used.patch264
-rw-r--r--patches.drivers/scsi-qla2xxx-Simplify-conditional-check-again.patch48
-rw-r--r--patches.drivers/scsi-qla2xxx-Split-the-__qla2x00_abort_all_cmds-func.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-Update-driver-version-to-10.00.00.14-k.patch30
-rw-r--r--patches.drivers/scsi-qla2xxx-Update-driver-version-to-10.01.00.15-k.patch35
-rw-r--r--patches.drivers/scsi-qla2xxx-Update-flash-read-write-routine.patch1330
-rw-r--r--patches.drivers/scsi-qla2xxx-avoid-printf-format-warning.patch66
-rw-r--r--patches.drivers/scsi-qla2xxx-check-for-kstrtol-failure.patch37
-rw-r--r--patches.drivers/scsi-qla2xxx-fully-convert-to-the-generic-DMA-API.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-no-need-to-check-return-value-of-debugf.patch108
-rw-r--r--patches.drivers/scsi-qla2xxx-remove-redundant-null-check-on-pointer-.patch63
-rw-r--r--patches.drivers/scsi-qla2xxx-use-lower_32_bits-and-upper_32_bits-ins.patch2
-rw-r--r--patches.drivers/xsk-export-xdp_get_umem_from_qid.patch31
-rw-r--r--patches.drm/0001-drm-vmwgfx-NULL-pointer-dereference-from-vmw_cmd_dx_.patch36
-rw-r--r--patches.drm/0001-fbdev-fix-WARNING-in-__alloc_pages_nodemask-bug.patch54
-rw-r--r--patches.drm/0001-fbdev-fix-divide-error-in-fb_var_to_videomode.patch84
-rw-r--r--patches.drm/0002-drm-i915-gvt-Tiled-Resources-mmios-are-in-context-mm.patch41
-rw-r--r--patches.drm/0003-drm-i915-gvt-add-0x4dfc-to-gen9-save-restore-list.patch30
-rw-r--r--patches.drm/0004-drm-etnaviv-lock-MMU-while-dumping-core.patch51
-rw-r--r--patches.drm/drm-edid-Fix-a-missing-check-bug-in-drm_load_edid_firmware.patch38
-rw-r--r--patches.fixes/0001-Documentation-Add-MDS-vulnerability-documentation.patch353
-rw-r--r--patches.fixes/0001-dt-bindings-clock-r8a7795-Remove-CSIREF-clock.patch38
-rw-r--r--patches.fixes/0001-dt-bindings-clock-r8a7796-Remove-CSIREF-clock.patch38
-rw-r--r--patches.fixes/0001-dt-bindings-net-Add-binding-for-the-external-clock-f.patch48
-rw-r--r--patches.fixes/0001-dt-bindings-rtc-sun6i-rtc-Fix-register-range-in-exam.patch30
-rw-r--r--patches.fixes/0001-keys-safe-concurrent-user-session-uid-_keyring-acces.patch165
-rw-r--r--patches.fixes/0001-mm-hwpoison-fix-thp-split-handing-in-soft_offline_in.patch76
-rw-r--r--patches.fixes/0001-mwifiex-Abort-at-too-short-BSS-descriptor-element.patch89
-rw-r--r--patches.fixes/0001-mwifiex-Fix-heap-overflow-in-mwifiex_uap_parse_tail_.patch115
-rw-r--r--patches.fixes/0001-mwifiex-Fix-possible-buffer-overflows-at-parsing-bss.patch50
-rw-r--r--patches.fixes/0001-of-fix-clang-Wunsequenced-for-be32_to_cpu.patch59
-rw-r--r--patches.fixes/0001-p54-drop-device-reference-count-if-fails-to-enable-d.patch45
-rw-r--r--patches.fixes/0001-xenbus-drop-useless-LIST_HEAD-in-xenbus_write_watch-.patch45
-rw-r--r--patches.fixes/0002-btrfs-qgroup-Check-bg-while-resuming-relocation-to-a.patch93
-rw-r--r--patches.fixes/KVM-s390-fix-memory-overwrites-when-not-using-SCA-en.patch40
-rw-r--r--patches.fixes/KVM-s390-provide-io-interrupt-kvm_stat.patch29
-rw-r--r--patches.fixes/KVM-s390-use-created_vcpus-in-more-places.patch44
-rw-r--r--patches.fixes/KVM-s390-vsie-fix-8k-check-for-the-itdba.patch41
-rw-r--r--patches.fixes/RDMA-rxe-Consider-skb-reserve-space-based-on-netdev-.patch32
-rw-r--r--patches.fixes/bpf-add-map_lookup_elem_sys_only-for-lookups-from-sy.patch51
-rw-r--r--patches.fixes/bpf-lru-avoid-messing-with-eviction-heuristics-upon-.patch104
-rw-r--r--patches.fixes/configfs-Fix-use-after-free-when-accessing-sd-s_dent.patch60
-rw-r--r--patches.fixes/ext4-avoid-panic-during-forced-reboot-due-to-aborted.patch39
-rw-r--r--patches.fixes/ext4-fix-data-corruption-caused-by-overlapping-unali.patch54
-rw-r--r--patches.fixes/ext4-make-sanity-check-in-mballoc-more-strict.patch39
-rw-r--r--patches.fixes/ext4-wait-for-outstanding-dio-during-truncate-in-noj.patch62
-rw-r--r--patches.fixes/fs-prevent-page-refcount-overflow-in-pipe_buf_get.patch161
-rw-r--r--patches.fixes/fs-sync.c-sync_file_range-2-may-use-WB_SYNC_ALL-writ.patch87
-rw-r--r--patches.fixes/fs-writeback.c-use-rcu_barrier-to-wait-for-inflight-.patch75
-rw-r--r--patches.fixes/indirect-call-wrappers-helpers-to-speed-up-indirect-.patch85
-rw-r--r--patches.fixes/jbd2-check-superblock-mapped-prior-to-committing.patch53
-rw-r--r--patches.fixes/mm-add-try_get_page-helper-function.patch53
-rw-r--r--patches.fixes/mm-fix-__gup_device_huge-vs-unmap.patch6
-rw-r--r--patches.fixes/mm-gup-ensure-real-head-page-is-ref-counted-when-using-hugepages.patch101
-rw-r--r--patches.fixes/mm-gup-remove-broken-vm_bug_on_page-compound-check-for-hugepages.patch67
-rw-r--r--patches.fixes/mm-make-page-ref-count-overflow-check-tighter-and-more-explicit.patch49
-rw-r--r--patches.fixes/mm-prevent-get_user_pages-from-overflowing-page-refcount.patch234
-rw-r--r--patches.fixes/mount-copy-the-port-field-into-the-cloned-nfs_server.patch31
-rw-r--r--patches.fixes/net-unbreak-CONFIG_RETPOLINE-n-builds.patch37
-rw-r--r--patches.fixes/net-use-indirect-call-wrappers-at-GRO-network-layer.patch111
-rw-r--r--patches.fixes/net-use-indirect-call-wrappers-at-GRO-transport-laye.patch290
-rw-r--r--patches.fixes/ocfs2-fix-ocfs2-read-inode-data-panic-in-ocfs2_iget.patch184
-rw-r--r--patches.fixes/scsi-qla2xxx-fix-driver-unload-by-shutting-down-chip.patch83
-rw-r--r--patches.fixes/udp-use-indirect-call-wrappers-for-GRO-socket-lookup.patch52
-rw-r--r--patches.fixes/xfs-serialize-unaligned-dio-writes-against-all-other.patch94
-rw-r--r--patches.kabi/arch-arm64-acpi-KABI-ignore-includes.patch2
-rw-r--r--patches.kabi/bpf-add-map_lookup_elem_sys_only-for-lookups-from-sy.patch94
-rw-r--r--patches.kabi/fs-prevent-page-refcount-overflow-in-pipe_buf_get-kabi.patch58
-rw-r--r--patches.kabi/kabi-arm64-cpuhotplug-Reuse-other-arch-s-cpuhp_state.patch46
-rw-r--r--patches.kabi/kabi-i40e-ignore-include.patch26
-rw-r--r--patches.kabi/kabi-protect-dma-mapping-h-include.patch26
-rw-r--r--patches.kabi/kabi-protect-struct-pci_dev.patch29
-rw-r--r--patches.kabi/kabi-protect-struct_iw_cm_id.patch29
-rw-r--r--patches.kabi/kabi-protect-struct_vf_info.patch26
-rw-r--r--patches.kabi/memcg-make-it-work-on-sparse-non-0-node-systems-kabi.patch46
-rw-r--r--patches.kabi/qla2xxx-kABI-fixes-for-v10.00.00.14-k.patch127
-rw-r--r--patches.kabi/qla2xxx-kABI-fixes-for-v10.01.00.15-k.patch282
-rw-r--r--patches.kabi/scsi-hisi-kabi-ignore-new-symbols52
-rw-r--r--patches.kernel.org/4.4.164-131-mm-thp-relax-__GFP_THISNODE-for-MADV_HUGEPAGE.patch233
-rw-r--r--patches.suse/0001-btrfs-extent-tree-Fix-a-bug-that-btrfs-is-unable-to-.patch86
-rw-r--r--patches.suse/PCI-Factor-out-pcie_retrain_link-function.patch85
-rw-r--r--patches.suse/PCI-Work-around-Pericom-PCIe-to-PCI-bridge-Retrain-L.patch100
-rw-r--r--patches.suse/PCI-endpoint-Use-EPC-s-device-in-dma_alloc_coherent-.patch82
-rw-r--r--patches.suse/bonding-fix-event-handling-for-stacked-bonds.patch45
-rw-r--r--patches.suse/btrfs-don-t-double-unlock-on-error-in-btrfs_punch_ho.patch42
-rw-r--r--patches.suse/btrfs-fix-fsync-not-persisting-changed-attributes-of.patch99
-rw-r--r--patches.suse/btrfs-fix-race-between-ranged-fsync-and-writeback-of.patch245
-rw-r--r--patches.suse/btrfs-fix-race-updating-log-root-item-during-fsync.patch126
-rw-r--r--patches.suse/btrfs-fix-wrong-ctime-and-mtime-of-a-directory-after.patch85
-rw-r--r--patches.suse/btrfs-reloc-also-queue-orphan-reloc-tree-for-cleanup-to-avoid-bug_on.patch137
-rw-r--r--patches.suse/btrfs-tree-checker-detect-file-extent-items-with-ove.patch114
-rw-r--r--patches.suse/ftrace-x86_64-emulate-call-function-while-updating-in-breakpoint-handler.patch150
-rw-r--r--patches.suse/ipv4-add-sanity-checks-in-ipv4_link_failure.patch152
-rw-r--r--patches.suse/ipv4-ensure-rcu_read_lock-in-ipv4_link_failure.patch86
-rw-r--r--patches.suse/ipv4-ip_do_fragment-Preserve-skb_iif-during-fragment.patch40
-rw-r--r--patches.suse/ipv4-recompile-ip-options-in-ipv4_link_failure.patch40
-rw-r--r--patches.suse/ipv4-set-the-tcp_min_rtt_wlen-range-from-0-to-one-da.patch88
-rw-r--r--patches.suse/kernel-sys.c-prctl-fix-false-positive-in-validate_pr.patch49
-rw-r--r--patches.suse/livepatch-convert-error-about-unsupported-reliable-stacktrace-into-a-warning.patch47
-rw-r--r--patches.suse/livepatch-remove-custom-kobject-state-handling.patch215
-rw-r--r--patches.suse/livepatch-remove-duplicated-code-for-early-initialization.patch127
-rw-r--r--patches.suse/memcg-make-it-work-on-sparse-non-0-node-systems.patch93
-rw-r--r--patches.suse/mlxsw-spectrum-Fix-autoneg-status-in-ethtool.patch42
-rw-r--r--patches.suse/net-atm-Fix-potential-Spectre-v1-vulnerabilities.patch51
-rw-r--r--patches.suse/net-dsa-bcm_sf2-fix-buffer-overflow-doing-set_rxnfc.patch41
-rw-r--r--patches.suse/net-dsa-mv88e6xxx-fix-handling-of-upper-half-of-STAT.patch31
-rw-r--r--patches.suse/net-fou-do-not-use-guehdr-after-iptunnel_pull_offloa.patch47
-rw-r--r--patches.suse/net-mlx5e-ethtool-Remove-unsupported-SFP-EEPROM-high.patch47
-rw-r--r--patches.suse/net-phy-marvell-Fix-buffer-overrun-with-stats-counte.patch49
-rw-r--r--patches.suse/net-rds-exchange-of-8K-and-1M-pool.patch78
-rw-r--r--patches.suse/net-rose-fix-unbound-loop-in-rose_loopback_timer.patch161
-rw-r--r--patches.suse/net-stmmac-move-stmmac_check_ether_addr-to-driver-pr.patch44
-rw-r--r--patches.suse/net-thunderx-don-t-allow-jumbo-frames-with-XDP.patch39
-rw-r--r--patches.suse/net-thunderx-raise-XDP-MTU-to-1508.patch53
-rw-r--r--patches.suse/nvme-flush-scan_work-when-resetting-controller.patch38
-rw-r--r--patches.suse/objtool-fix-function-fallthrough-detection.patch59
-rw-r--r--patches.suse/ptrace-take-into-account-saved_sigmask-in-PTRACE-GET.patch127
-rw-r--r--patches.suse/qla2xxx-allow-irqbalance-control-in-non-MQ-mode.patch34
-rw-r--r--patches.suse/scsi-zfcp-make-dix-experimental-disabled-and-independent-of-dif81
-rw-r--r--patches.suse/sctp-avoid-running-the-sctp-state-machine-recursivel.patch163
-rw-r--r--patches.suse/signal-Always-notice-exiting-tasks.patch62
-rw-r--r--patches.suse/signal-Better-detection-of-synchronous-signals.patch115
-rw-r--r--patches.suse/signal-Restore-the-stop-PTRACE_EVENT_EXIT.patch55
-rw-r--r--patches.suse/stmmac-pci-Adjust-IOT2000-matching.patch50
-rw-r--r--patches.suse/switchtec-Fix-unintended-mask-of-MRPC-event.patch43
-rw-r--r--patches.suse/tcp-tcp_grow_window-needs-to-respect-tcp_space.patch61
-rw-r--r--patches.suse/team-fix-possible-recursive-locking-when-add-slaves.patch52
-rw-r--r--patches.suse/tipc-missing-entries-in-name-table-of-publications.patch41
-rw-r--r--patches.suse/tracing-fix-buffer_ref-pipe-ops.patch27
-rw-r--r--patches.suse/tracing-fix-partial-reading-of-trace-event-s-id-file.patch77
-rw-r--r--patches.suse/userfaultfd-use-RCU-to-free-the-task-struct-when-for.patch132
-rw-r--r--patches.suse/vhost-reject-zero-size-iova-range.patch36
-rw-r--r--patches.suse/x86_64-add-gap-to-int3-to-allow-for-call-emulation.patch73
-rw-r--r--patches.suse/x86_64-allow-breakpoints-to-emulate-call-instructions.patch91
-rwxr-xr-xscripts/bugzilla-create1
-rw-r--r--scripts/git_sort/README.md13
-rwxr-xr-xscripts/git_sort/git_sort.py15
-rw-r--r--scripts/git_sort/lib.py12
-rwxr-xr-xscripts/git_sort/merge_tool.py3
-rw-r--r--scripts/git_sort/pygit2_wrapper.py30
-rwxr-xr-xscripts/git_sort/qcp.py8
-rwxr-xr-xscripts/git_sort/qdupcheck.py3
-rwxr-xr-xscripts/git_sort/series_insert.py3
-rwxr-xr-xscripts/git_sort/series_sort.py9
-rw-r--r--scripts/git_sort/tests/sle12-sp2/Dockerfile2
-rw-r--r--scripts/git_sort/tests/support.py3
-rwxr-xr-xscripts/git_sort/tests/test_git_sort.py3
-rwxr-xr-xscripts/git_sort/tests/test_quilt_mode.py3
-rwxr-xr-xscripts/git_sort/tests/test_series_insert.py3
-rwxr-xr-xscripts/git_sort/tests/test_series_sort.py3
-rwxr-xr-xscripts/git_sort/update_clone.py3
-rw-r--r--series.conf690
-rw-r--r--supported.conf1
737 files changed, 90907 insertions, 456 deletions
diff --git a/blacklist.conf b/blacklist.conf
index 097ac8f80c..4f63f0258a 100644
--- a/blacklist.conf
+++ b/blacklist.conf
@@ -1041,3 +1041,22 @@ c8ea3663f7a8e6996d44500ee818c9330ac4fd88 # virt/fsl: no supported platform
6a024330650e24556b8a18cc654ad00cfecf6c6c # virt/fsl: no supported platform
92ff42645028fa6f9b8aa767718457b9264316b4 # ipvlan: reverted in below
918150cbd6103199fe326e8b1462a7f0d81475e4 # ipvlan: reverting the above
+2100e3ca3676e894fa48b8f6f01d01733387fe81 # Kconfig only and our kernels compile
+e5d01196c0428a206f307e9ee5f6842964098ff0 # bug requires e50e5129f384 "ext4: xattr-in-inode support"
+08fc98a4d6424af66eb3ac4e2cedd2fc927ed436 # bug requires e08ac99fa2a2 "ext4: add largedir feature"
+8ea58f1e8b11cca3087b294779bf5959bf89cc10 # not needed. We can happily use just AR.
+0294e6f4a0006856e1f36b8cd8fa088d9e499e98 # kbuild: not a bugfix
+954b4b752a4c4e963b017ed8cef4c453c5ed308d # ARCH_RENESAS = n
+be20bbcb0a8cb5597cc62b3e28d275919f3431df # ARCH_RENESAS = n
+b80a2bfce85e1051056d98d04ecb2d0b55cbbc1c # fixes 2610e8894663 which we don't have
+d8f945bf8096375f458683b5718722a2d5dda2f0 # Temporarily disabling until qla2xxx has been updated
+43a0541e312f7136e081e6bf58f6c8a2e9672688 # We don't build the tegra-smmu driver
+8069053880e0ee3a75fd6d7e0a30293265fe3de4 # sm712fb driver not enabled: fbdev: sm712fb: fix white screen of death on reboot, don't set CR3B-CR3F
+5481115e25e42b9215f2619452aa99c95f08492f # sm712fb driver not enabled: fbdev: sm712fb: fix brightness control on reboot, don't set SR30
+dcf9070595e100942c539e229dde4770aaeaa4e9 # sm712fb driver not enabled: fbdev: sm712fb: fix VRAM detection, don't set SR70/71/74/75
+ec1587d5073f29820e358f3a383850d61601d981 # sm712fb driver not enabled: fbdev: sm712fb: fix boot screen glitch when sm712fb replaces VGA
+9e0e59993df0601cddb95c4f6c61aa3d5e753c00 # sm712fb driver not enabled: fbdev: sm712fb: fix crashes during framebuffer writes by correctly mapping VRAM
+f627caf55b8e735dcec8fa6538e9668632b55276 # sm712fb driver not enabled: fbdev: sm712fb: fix crashes and garbled display during DPMS modesetting
+6053d3a4793e5bde6299ac5388e76a3bf679ff65 # sm712fb driver not enabled: fbdev: sm712fb: fix support for 1024x768-16 mode
+4ed7d2ccb7684510ec5f7a8f7ef534bc6a3d55b2 # sm712fb driver not enabled: fbdev: sm712fb: use 1024x768 by default on non-MIPS, fix garbled display
+2f0799a0ffc033bf3cc82d5032acc3ec633464c2 # this reverts ac5b2c18911f and there is a disagreement over this policy. We want to have ac5b2c18911f applied
diff --git a/config/arm64/default b/config/arm64/default
index 51e15d020a..2401bdc425 100644
--- a/config/arm64/default
+++ b/config/arm64/default
@@ -7971,6 +7971,7 @@ CONFIG_ARM_DSU_PMU=m
CONFIG_HISI_PMU=y
CONFIG_QCOM_L2_PMU=y
CONFIG_QCOM_L3_PMU=y
+CONFIG_THUNDERX2_PMU=m
CONFIG_XGENE_PMU=y
CONFIG_ARM_SPE_PMU=m
CONFIG_RAS=y
diff --git a/kabi/severities b/kabi/severities
index 5948a9282a..ed91247c4c 100644
--- a/kabi/severities
+++ b/kabi/severities
@@ -33,3 +33,16 @@ fs/ceph PASS
# intermodule syms shared between cxgb4 and cxgb4vf
drivers/net/ethernet/chelsio/cxgb4/* PASS
drivers/net/ethernet/chelsio/cxgb4vf/* PASS
+
+# inter-module symbols for qed/qede/qedf/qedi/qedr
+drivers/net/ethernet/qlogic/qed/* PASS
+drivers/net/ethernet/qlogic/qede/* PASS
+drivers/scsi/qedf/* PASS
+drivers/scsi/qedi/* PASS
+drivers/infiniband/hw/qedr/* PASS
+
+# inter-module symbols for hns3
+drivers/net/ethernet/hisilicon/hns3/* PASS
+drivers/net/ethernet/hisilicon/hns3/hns3pf/* PASS
+drivers/net/ethernet/hisilicon/hns3/hns3vf/* PASS
+drivers/infiniband/hw/hns/* PASS
diff --git a/patches.arch/0002-efi-arm-libstub-add-a-root-memreserve-config-table.patch b/patches.arch/0002-efi-arm-libstub-add-a-root-memreserve-config-table.patch
index 15fa23008c..96adb9660e 100644
--- a/patches.arch/0002-efi-arm-libstub-add-a-root-memreserve-config-table.patch
+++ b/patches.arch/0002-efi-arm-libstub-add-a-root-memreserve-config-table.patch
@@ -3,7 +3,7 @@ Date: Fri, 21 Sep 2018 09:32:45 -0700
Subject: efi/arm: libstub: add a root memreserve config table
Git-commit: b844470f22061e8cd646cb355e85d2f518b2c913
Patch-mainline: v4.20-rc1
-References: bsc#1111147
+References: bsc#1111147 bsc#1117158 bsc#1134671
Installing UEFI configuration tables can only be done before calling
ExitBootServices(), so if we want to use the new MEMRESRVE config table
diff --git a/patches.arch/0003-efi-add-API-to-reserve-memory-persistently-across-ke.patch b/patches.arch/0003-efi-add-API-to-reserve-memory-persistently-across-ke.patch
index e78dc375aa..1ee43dc177 100644
--- a/patches.arch/0003-efi-add-API-to-reserve-memory-persistently-across-ke.patch
+++ b/patches.arch/0003-efi-add-API-to-reserve-memory-persistently-across-ke.patch
@@ -3,7 +3,7 @@ Date: Fri, 21 Sep 2018 09:32:46 -0700
Subject: efi: add API to reserve memory persistently across kexec reboot
Git-commit: a23d3bb05ccbd815c79293d2207fedede0b3515d
Patch-mainline: v4.20-rc1
-References: bsc#1111147
+References: bsc#1111147 bsc#1117158 bsc#1134671
Add kernel plumbing to reserve memory regions persistently on a EFI
system by adding entries to the MEMRESERVE linked list.
diff --git a/patches.arch/ARM-iop-don-t-use-using-64-bit-DMA-masks.patch b/patches.arch/ARM-iop-don-t-use-using-64-bit-DMA-masks.patch
new file mode 100644
index 0000000000..c20b3b865b
--- /dev/null
+++ b/patches.arch/ARM-iop-don-t-use-using-64-bit-DMA-masks.patch
@@ -0,0 +1,154 @@
+From 2125801ccce19249708ca3245d48998e70569ab8 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Mon, 25 Mar 2019 16:50:43 +0100
+Subject: [PATCH] ARM: iop: don't use using 64-bit DMA masks
+Git-commit: 2125801ccce19249708ca3245d48998e70569ab8
+Patch-mainline: v5.1-rc4
+References: bsc#1051510
+
+clang warns about statically defined DMA masks from the DMA_BIT_MASK
+macro with length 64:
+
+ arch/arm/mach-iop13xx/setup.c:303:35: error: shift count >= width of type [-Werror,-Wshift-count-overflow]
+ static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
+ ^~~~~~~~~~~~~~~~
+ include/linux/dma-mapping.h:141:54: note: expanded from macro 'DMA_BIT_MASK'
+ #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+ ^ ~~~
+
+The ones in iop shouldn't really be 64 bit masks, so changing them
+to what the driver can support avoids the warning.
+
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Olof Johansson <olof@lixom.net>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ arch/arm/mach-iop13xx/setup.c | 8 ++++----
+ arch/arm/mach-iop13xx/tpmi.c | 10 +++++-----
+ arch/arm/plat-iop/adma.c | 6 +++---
+ 3 files changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c
+index 53c316f7301e..fe4932fda01d 100644
+--- a/arch/arm/mach-iop13xx/setup.c
++++ b/arch/arm/mach-iop13xx/setup.c
+@@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = {
+ }
+ };
+
+-static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
++static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32);
+ static struct iop_adma_platform_data iop13xx_adma_0_data = {
+ .hw_id = 0,
+ .pool_size = PAGE_SIZE,
+@@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = {
+ .resource = iop13xx_adma_0_resources,
+ .dev = {
+ .dma_mask = &iop13xx_adma_dmamask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = (void *) &iop13xx_adma_0_data,
+ },
+ };
+@@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = {
+ .resource = iop13xx_adma_1_resources,
+ .dev = {
+ .dma_mask = &iop13xx_adma_dmamask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = (void *) &iop13xx_adma_1_data,
+ },
+ };
+@@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = {
+ .resource = iop13xx_adma_2_resources,
+ .dev = {
+ .dma_mask = &iop13xx_adma_dmamask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = (void *) &iop13xx_adma_2_data,
+ },
+ };
+diff --git a/arch/arm/mach-iop13xx/tpmi.c b/arch/arm/mach-iop13xx/tpmi.c
+index db511ec2b1df..116feb6b261e 100644
+--- a/arch/arm/mach-iop13xx/tpmi.c
++++ b/arch/arm/mach-iop13xx/tpmi.c
+@@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = {
+ }
+ };
+
+-u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64);
++u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32);
+ static struct platform_device iop13xx_tpmi_0_device = {
+ .name = "iop-tpmi",
+ .id = 0,
+@@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = {
+ .resource = iop13xx_tpmi_0_resources,
+ .dev = {
+ .dma_mask = &iop13xx_tpmi_mask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ };
+
+@@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = {
+ .resource = iop13xx_tpmi_1_resources,
+ .dev = {
+ .dma_mask = &iop13xx_tpmi_mask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ };
+
+@@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = {
+ .resource = iop13xx_tpmi_2_resources,
+ .dev = {
+ .dma_mask = &iop13xx_tpmi_mask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ };
+
+@@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = {
+ .resource = iop13xx_tpmi_3_resources,
+ .dev = {
+ .dma_mask = &iop13xx_tpmi_mask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ };
+
+diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c
+index a4d1f8de3b5b..d9612221e484 100644
+--- a/arch/arm/plat-iop/adma.c
++++ b/arch/arm/plat-iop/adma.c
+@@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = {
+ .resource = iop3xx_dma_0_resources,
+ .dev = {
+ .dma_mask = &iop3xx_adma_dmamask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = (void *) &iop3xx_dma_0_data,
+ },
+ };
+@@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = {
+ .resource = iop3xx_dma_1_resources,
+ .dev = {
+ .dma_mask = &iop3xx_adma_dmamask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = (void *) &iop3xx_dma_1_data,
+ },
+ };
+@@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = {
+ .resource = iop3xx_aau_resources,
+ .dev = {
+ .dma_mask = &iop3xx_adma_dmamask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = (void *) &iop3xx_aau_data,
+ },
+ };
+--
+2.16.4
+
diff --git a/patches.arch/ARM-orion-don-t-use-using-64-bit-DMA-masks.patch b/patches.arch/ARM-orion-don-t-use-using-64-bit-DMA-masks.patch
new file mode 100644
index 0000000000..68460d2c69
--- /dev/null
+++ b/patches.arch/ARM-orion-don-t-use-using-64-bit-DMA-masks.patch
@@ -0,0 +1,53 @@
+From cd92d74d67c811dc22544430b9ac3029f5bd64c5 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Mon, 25 Mar 2019 16:50:42 +0100
+Subject: [PATCH] ARM: orion: don't use using 64-bit DMA masks
+Git-commit: cd92d74d67c811dc22544430b9ac3029f5bd64c5
+Patch-mainline: v5.1-rc4
+References: bsc#1051510
+
+clang warns about statically defined DMA masks from the DMA_BIT_MASK
+macro with length 64:
+
+arch/arm/plat-orion/common.c:625:29: error: shift count >= width of type [-Werror,-Wshift-count-overflow]
+ .coherent_dma_mask = DMA_BIT_MASK(64),
+ ^~~~~~~~~~~~~~~~
+include/linux/dma-mapping.h:141:54: note: expanded from macro 'DMA_BIT_MASK'
+ #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+
+The ones in orion shouldn't really be 64 bit masks, so changing them
+to what the driver can support avoids the warning.
+
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Olof Johansson <olof@lixom.net>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ arch/arm/plat-orion/common.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
+index a6c81ce00f52..8647cb80a93b 100644
+--- a/arch/arm/plat-orion/common.c
++++ b/arch/arm/plat-orion/common.c
+@@ -622,7 +622,7 @@ static struct platform_device orion_xor0_shared = {
+ .resource = orion_xor0_shared_resources,
+ .dev = {
+ .dma_mask = &orion_xor_dmamask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &orion_xor0_pdata,
+ },
+ };
+@@ -683,7 +683,7 @@ static struct platform_device orion_xor1_shared = {
+ .resource = orion_xor1_shared_resources,
+ .dev = {
+ .dma_mask = &orion_xor_dmamask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &orion_xor1_pdata,
+ },
+ };
+--
+2.16.4
+
diff --git a/patches.arch/arm64-acpi-fix-alignment-fault-in-accessing-ACPI.patch b/patches.arch/arm64-acpi-fix-alignment-fault-in-accessing-ACPI.patch
index f6cadadeb0..45dd19c57b 100644
--- a/patches.arch/arm64-acpi-fix-alignment-fault-in-accessing-ACPI.patch
+++ b/patches.arch/arm64-acpi-fix-alignment-fault-in-accessing-ACPI.patch
@@ -3,7 +3,7 @@ Date: Mon, 23 Jul 2018 10:57:32 +0900
Subject: arm64: acpi: fix alignment fault in accessing ACPI
Git-commit: 09ffcb0d718a0b100f0bed029b830987ecf53fab
Patch-mainline: v4.19-rc1
-References: bsc#1117158
+References: bsc#1117158 bsc#1134671
This is a fix against the issue that crash dump kernel may hang up
during booting, which can happen on any ACPI-based system with "ACPI
diff --git a/patches.arch/crypto-vmx-ghash-do-nosimd-fallback-manually.patch b/patches.arch/crypto-vmx-ghash-do-nosimd-fallback-manually.patch
new file mode 100644
index 0000000000..026e011e2b
--- /dev/null
+++ b/patches.arch/crypto-vmx-ghash-do-nosimd-fallback-manually.patch
@@ -0,0 +1,312 @@
+From 357d065a44cdd77ed5ff35155a989f2a763e96ef Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Fri, 17 May 2019 01:40:02 +1000
+Subject: [PATCH] crypto: vmx - ghash: do nosimd fallback manually
+
+References: bsc#1135661, bsc#1137162
+Patch-mainline: v5.2-rc2
+Git-commit: 357d065a44cdd77ed5ff35155a989f2a763e96ef
+
+VMX ghash was using a fallback that did not support interleaving simd
+and nosimd operations, leading to failures in the extended test suite.
+
+If I understood correctly, Eric's suggestion was to use the same
+data format that the generic code uses, allowing us to call into it
+with the same contexts. I wasn't able to get that to work - I think
+there's a very different key structure and data layout being used.
+
+So instead steal the arm64 approach and perform the fallback
+operations directly if required.
+
+Fixes: cc333cd68dfa ("crypto: vmx - Adding GHASH routines for VMX module")
+Cc: stable@vger.kernel.org # v4.1+
+Reported-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Tested-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ drivers/crypto/vmx/ghash.c | 211 +++++++++++++++----------------------
+ 1 file changed, 86 insertions(+), 125 deletions(-)
+
+--- a/drivers/crypto/vmx/ghash.c
++++ b/drivers/crypto/vmx/ghash.c
+@@ -1,22 +1,14 @@
++// SPDX-License-Identifier: GPL-2.0
+ /**
+ * GHASH routines supporting VMX instructions on the Power 8
+ *
+- * Copyright (C) 2015 International Business Machines Inc.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; version 2 only.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ * Copyright (C) 2015, 2019 International Business Machines Inc.
+ *
+ * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
++ *
++ * Extended by Daniel Axtens <dja@axtens.net> to replace the fallback
++ * mechanism. The new approach is based on arm64 code, which is:
++ * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+ #include <linux/types.h>
+@@ -39,71 +31,25 @@ void gcm_ghash_p8(u64 Xi[2], const u128
+ const u8 *in, size_t len);
+
+ struct p8_ghash_ctx {
++ /* key used by vector asm */
+ u128 htable[16];
+- struct crypto_shash *fallback;
++ /* key used by software fallback */
++ be128 key;
+ };
+
+ struct p8_ghash_desc_ctx {
+ u64 shash[2];
+ u8 buffer[GHASH_DIGEST_SIZE];
+ int bytes;
+- struct shash_desc fallback_desc;
+ };
+
+-static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
+-{
+- const char *alg = "ghash-generic";
+- struct crypto_shash *fallback;
+- struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm);
+- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+-
+- fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+- if (IS_ERR(fallback)) {
+- printk(KERN_ERR
+- "Failed to allocate transformation for '%s': %ld\n",
+- alg, PTR_ERR(fallback));
+- return PTR_ERR(fallback);
+- }
+-
+- crypto_shash_set_flags(fallback,
+- crypto_shash_get_flags((struct crypto_shash
+- *) tfm));
+-
+- /* Check if the descsize defined in the algorithm is still enough. */
+- if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx)
+- + crypto_shash_descsize(fallback)) {
+- printk(KERN_ERR
+- "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n",
+- alg,
+- shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx),
+- crypto_shash_descsize(fallback));
+- return -EINVAL;
+- }
+- ctx->fallback = fallback;
+-
+- return 0;
+-}
+-
+-static void p8_ghash_exit_tfm(struct crypto_tfm *tfm)
+-{
+- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+-
+- if (ctx->fallback) {
+- crypto_free_shash(ctx->fallback);
+- ctx->fallback = NULL;
+- }
+-}
+-
+ static int p8_ghash_init(struct shash_desc *desc)
+ {
+- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+ struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ dctx->bytes = 0;
+ memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
+- dctx->fallback_desc.tfm = ctx->fallback;
+- dctx->fallback_desc.flags = desc->flags;
+- return crypto_shash_init(&dctx->fallback_desc);
++ return 0;
+ }
+
+ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
+@@ -121,7 +67,51 @@ static int p8_ghash_setkey(struct crypto
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+- return crypto_shash_setkey(ctx->fallback, key, keylen);
++
++ memcpy(&ctx->key, key, GHASH_BLOCK_SIZE);
++
++ return 0;
++}
++
++static inline void __ghash_block(struct p8_ghash_ctx *ctx,
++ struct p8_ghash_desc_ctx *dctx)
++{
++ if (!IN_INTERRUPT) {
++ preempt_disable();
++ pagefault_disable();
++ enable_kernel_vsx();
++ gcm_ghash_p8(dctx->shash, ctx->htable,
++ dctx->buffer, GHASH_DIGEST_SIZE);
++ disable_kernel_vsx();
++ pagefault_enable();
++ preempt_enable();
++ } else {
++ crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE);
++ gf128mul_lle((be128 *)dctx->shash, &ctx->key);
++ }
++}
++
++static inline void __ghash_blocks(struct p8_ghash_ctx *ctx,
++ struct p8_ghash_desc_ctx *dctx,
++ const u8 *src, unsigned int srclen)
++{
++ if (!IN_INTERRUPT) {
++ preempt_disable();
++ pagefault_disable();
++ enable_kernel_vsx();
++ gcm_ghash_p8(dctx->shash, ctx->htable,
++ src, srclen);
++ disable_kernel_vsx();
++ pagefault_enable();
++ preempt_enable();
++ } else {
++ while (srclen >= GHASH_BLOCK_SIZE) {
++ crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE);
++ gf128mul_lle((be128 *)dctx->shash, &ctx->key);
++ srclen -= GHASH_BLOCK_SIZE;
++ src += GHASH_BLOCK_SIZE;
++ }
++ }
+ }
+
+ static int p8_ghash_update(struct shash_desc *desc,
+@@ -131,49 +121,33 @@ static int p8_ghash_update(struct shash_
+ struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+ struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+- if (IN_INTERRUPT) {
+- return crypto_shash_update(&dctx->fallback_desc, src,
+- srclen);
+- } else {
+- if (dctx->bytes) {
+- if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
+- memcpy(dctx->buffer + dctx->bytes, src,
+- srclen);
+- dctx->bytes += srclen;
+- return 0;
+- }
++ if (dctx->bytes) {
++ if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
+ memcpy(dctx->buffer + dctx->bytes, src,
+- GHASH_DIGEST_SIZE - dctx->bytes);
+- preempt_disable();
+- pagefault_disable();
+- enable_kernel_vsx();
+- gcm_ghash_p8(dctx->shash, ctx->htable,
+- dctx->buffer, GHASH_DIGEST_SIZE);
+- disable_kernel_vsx();
+- pagefault_enable();
+- preempt_enable();
+- src += GHASH_DIGEST_SIZE - dctx->bytes;
+- srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
+- dctx->bytes = 0;
++ srclen);
++ dctx->bytes += srclen;
++ return 0;
+ }
+- len = srclen & ~(GHASH_DIGEST_SIZE - 1);
+- if (len) {
+- preempt_disable();
+- pagefault_disable();
+- enable_kernel_vsx();
+- gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
+- disable_kernel_vsx();
+- pagefault_enable();
+- preempt_enable();
+- src += len;
+- srclen -= len;
+- }
+- if (srclen) {
+- memcpy(dctx->buffer, src, srclen);
+- dctx->bytes = srclen;
+- }
+- return 0;
++ memcpy(dctx->buffer + dctx->bytes, src,
++ GHASH_DIGEST_SIZE - dctx->bytes);
++
++ __ghash_block(ctx, dctx);
++
++ src += GHASH_DIGEST_SIZE - dctx->bytes;
++ srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
++ dctx->bytes = 0;
++ }
++ len = srclen & ~(GHASH_DIGEST_SIZE - 1);
++ if (len) {
++ __ghash_blocks(ctx, dctx, src, len);
++ src += len;
++ srclen -= len;
+ }
++ if (srclen) {
++ memcpy(dctx->buffer, src, srclen);
++ dctx->bytes = srclen;
++ }
++ return 0;
+ }
+
+ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
+@@ -182,25 +156,14 @@ static int p8_ghash_final(struct shash_d
+ struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+ struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+- if (IN_INTERRUPT) {
+- return crypto_shash_final(&dctx->fallback_desc, out);
+- } else {
+- if (dctx->bytes) {
+- for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
+- dctx->buffer[i] = 0;
+- preempt_disable();
+- pagefault_disable();
+- enable_kernel_vsx();
+- gcm_ghash_p8(dctx->shash, ctx->htable,
+- dctx->buffer, GHASH_DIGEST_SIZE);
+- disable_kernel_vsx();
+- pagefault_enable();
+- preempt_enable();
+- dctx->bytes = 0;
+- }
+- memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
+- return 0;
++ if (dctx->bytes) {
++ for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
++ dctx->buffer[i] = 0;
++ __ghash_block(ctx, dctx);
++ dctx->bytes = 0;
+ }
++ memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
++ return 0;
+ }
+
+ struct shash_alg p8_ghash_alg = {
+@@ -215,11 +178,9 @@ struct shash_alg p8_ghash_alg = {
+ .cra_name = "ghash",
+ .cra_driver_name = "p8_ghash",
+ .cra_priority = 1000,
+- .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK,
++ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = GHASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct p8_ghash_ctx),
+ .cra_module = THIS_MODULE,
+- .cra_init = p8_ghash_init_tfm,
+- .cra_exit = p8_ghash_exit_tfm,
+ },
+ };
diff --git a/patches.arch/crypto-vmx-return-correct-error-code-on-failed-setke.patch b/patches.arch/crypto-vmx-return-correct-error-code-on-failed-setke.patch
new file mode 100644
index 0000000000..8ea5c7af21
--- /dev/null
+++ b/patches.arch/crypto-vmx-return-correct-error-code-on-failed-setke.patch
@@ -0,0 +1,112 @@
+From 5749f687b62ea74a42aaf0723da49a18247649db Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Tue, 9 Apr 2019 23:46:35 -0700
+Subject: [PATCH] crypto: vmx - return correct error code on failed setkey
+
+References: bsc#1135661, bsc#1137162
+Patch-mainline: v5.2-rc1
+Git-commit: 694e0db6600c12f8172efb51cd4b4bbade958562
+
+In the VMX implementations of AES and AES modes, return -EINVAL when an
+invalid key length is provided, rather than some unusual error code
+determined via a series of additions. This makes the behavior match the
+other AES implementations in the kernel's crypto API.
+
+Cc: Daniel Axtens <dja@axtens.net>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ drivers/crypto/vmx/aes.c | 7 ++++---
+ drivers/crypto/vmx/aes_cbc.c | 7 ++++---
+ drivers/crypto/vmx/aes_ctr.c | 5 +++--
+ drivers/crypto/vmx/aes_xts.c | 9 +++++----
+ 4 files changed, 16 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
+index b0cd5aff3822..5e85dfca8242 100644
+--- a/drivers/crypto/vmx/aes.c
++++ b/drivers/crypto/vmx/aes.c
+@@ -83,13 +83,14 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
+ pagefault_disable();
+ enable_kernel_vsx();
+ ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+- ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
++ ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+- ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
+- return ret;
++ ret |= crypto_cipher_setkey(ctx->fallback, key, keylen);
++
++ return ret ? -EINVAL : 0;
+ }
+
+ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
+index 668e285f1a64..bb01e62700af 100644
+--- a/drivers/crypto/vmx/aes_cbc.c
++++ b/drivers/crypto/vmx/aes_cbc.c
+@@ -86,13 +86,14 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
+ pagefault_disable();
+ enable_kernel_vsx();
+ ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+- ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
++ ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+- return ret;
++ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
++
++ return ret ? -EINVAL : 0;
+ }
+
+ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
+diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
+index 386943e65a20..a9bac01ba2fb 100644
+--- a/drivers/crypto/vmx/aes_ctr.c
++++ b/drivers/crypto/vmx/aes_ctr.c
+@@ -88,8 +88,9 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
+ pagefault_enable();
+ preempt_enable();
+
+- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+- return ret;
++ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
++
++ return ret ? -EINVAL : 0;
+ }
+
+ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
+diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
+index 16f6c0cef4ac..f9c224192802 100644
+--- a/drivers/crypto/vmx/aes_xts.c
++++ b/drivers/crypto/vmx/aes_xts.c
+@@ -91,14 +91,15 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
+ pagefault_disable();
+ enable_kernel_vsx();
+ ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key);
+- ret += aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
+- ret += aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
++ ret |= aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
++ ret |= aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+- return ret;
++ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
++
++ return ret ? -EINVAL : 0;
+ }
+
+ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
+--
+2.20.1
+
diff --git a/patches.arch/efi-arm-Don-t-mark-ACPI-reclaim-memory-as-MEMBLOCK_N.patch b/patches.arch/efi-arm-Don-t-mark-ACPI-reclaim-memory-as-MEMBLOCK_N.patch
index 6767501ce6..44b7c2efaf 100644
--- a/patches.arch/efi-arm-Don-t-mark-ACPI-reclaim-memory-as-MEMBLOCK_N.patch
+++ b/patches.arch/efi-arm-Don-t-mark-ACPI-reclaim-memory-as-MEMBLOCK_N.patch
@@ -3,7 +3,7 @@ Date: Fri, 18 Aug 2017 20:49:34 +0100
Subject: efi/arm: Don't mark ACPI reclaim memory as MEMBLOCK_NOMAP
Git-commit: f56ab9a5b73ca2aee777ccdf2d355ae2dd31db5a
Patch-mainline: v4.14-rc1
-References: bsc#1117158 bsc#1115688 bsc#1120566
+References: bsc#1117158 bsc#1115688 bsc#1120566 bsc#1134671
On ARM, regions of memory that are described by UEFI as having special
significance to the firmware itself are omitted from the linear mapping.
diff --git a/patches.arch/x86-cpu-hygon-fix-phys_proc_id-calculation-logic-for-multi-die-processors.patch b/patches.arch/x86-cpu-hygon-fix-phys_proc_id-calculation-logic-for-multi-die-processors.patch
new file mode 100644
index 0000000000..8fcb7cb394
--- /dev/null
+++ b/patches.arch/x86-cpu-hygon-fix-phys_proc_id-calculation-logic-for-multi-die-processors.patch
@@ -0,0 +1,122 @@
+From: Pu Wen <puwen@hygon.cn>
+Date: Sat, 23 Mar 2019 23:42:20 +0800
+Subject: x86/CPU/hygon: Fix phys_proc_id calculation logic for multi-die processors
+Git-commit: e0ceeae708cebf22c990c3d703a4ca187dc837f5
+Patch-mainline: v5.2-rc1
+References: fate#327735
+
+The Hygon family 18h multi-die processor platform supports 1, 2 or
+4-Dies per socket. The topology looks like this:
+
+ System View (with 1-Die 2-Socket):
+ |------------|
+ ------ -----
+ SOCKET0 | D0 | | D1 | SOCKET1
+ ------ -----
+
+ System View (with 2-Die 2-socket):
+ --------------------
+ | -------------|------
+ | | | |
+ ------------ ------------
+ SOCKET0 | D1 -- D0 | | D3 -- D2 | SOCKET1
+ ------------ ------------
+
+ System View (with 4-Die 2-Socket) :
+ --------------------
+ | -------------|------
+ | | | |
+ ------------ ------------
+ | D1 -- D0 | | D7 -- D6 |
+ | | \/ | | | | \/ | |
+ SOCKET0 | | /\ | | | | /\ | | SOCKET1
+ | D2 -- D3 | | D4 -- D5 |
+ ------------ ------------
+ | | | |
+ ------|------------| |
+ --------------------
+
+Currently
+
+ phys_proc_id = initial_apicid >> bits
+
+calculates the physical processor ID from the initial_apicid by shifting
+*bits*.
+
+However, this does not work for 1-Die and 2-Die 2-socket systems.
+
+According to document [1] section 2.1.11.1, the bits is the value of
+CPUID_Fn80000008_ECX[12:15]. The possible values are 4, 5 or 6 which
+mean:
+
+ 4 - 1 die
+ 5 - 2 dies
+ 6 - 3/4 dies.
+
+Hygon programs the initial ApicId the same way as AMD. The ApicId is
+read from CPUID_Fn00000001_EBX (see section 2.1.11.1 of referrence [1])
+and the definition is as below (see section 2.1.10.2.1.3 of [1]):
+
+ -------------------------------------------------
+ Bit | 6 | 5 4 | 3 | 2 1 0 |
+ |-----------|---------|--------|----------------|
+ IDs | Socket ID | Node ID | CCX ID | Core/Thread ID |
+ -------------------------------------------------
+
+So for 3/4-Die configurations, the bits variable is 6, which is the same
+as the ApicID definition field.
+
+For 1-Die and 2-Die configurations, bits is 4 or 5, which will cause the
+right shifted result to not be exactly the value of socket ID.
+
+However, the socket ID should be obtained from ApicId[6]. To fix the
+problem and match the ApicID field definition, set the shift bits to 6
+for all Hygon family 18h multi-die CPUs.
+
+Because AMD doesn't have 2-Socket systems with 1-Die/2-Die processors
+(see reference [2]), this doesn't need to be changed on the AMD side but
+only for Hygon.
+
+Refs:
+[1] https://www.amd.com/system/files/TechDocs/54945_PPR_Family_17h_Models_00h-0Fh.pdf
+[2] https://www.amd.com/en/products/specifications/processors
+
+ [bp: heavily massage commit message. ]
+
+Signed-off-by: Pu Wen <puwen@hygon.cn>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Thomas Lendacky <Thomas.Lendacky@amd.com>
+Cc: Yazen Ghannam <yazen.ghannam@amd.com>
+Cc: x86-ml <x86@kernel.org>
+Link: https://lkml.kernel.org/r/1553355740-19999-1-git-send-email-puwen@hygon.cn
+---
+ arch/x86/kernel/cpu/hygon.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
+index cf25405444ab..415621ddb8a2 100644
+--- a/arch/x86/kernel/cpu/hygon.c
++++ b/arch/x86/kernel/cpu/hygon.c
+@@ -19,6 +19,8 @@
+
+ #include "cpu.h"
+
++#define APICID_SOCKET_ID_BIT 6
++
+ /*
+ * nodes_per_socket: Stores the number of nodes per socket.
+ * Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8]
+@@ -87,6 +89,9 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
+ if (!err)
+ c->x86_coreid_bits = get_count_order(c->x86_max_cores);
+
++ /* Socket ID is ApicId[6] for these processors. */
++ c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
++
+ cacheinfo_hygon_init_llc_id(c, cpu, node_id);
+ } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
+ u64 value;
+
diff --git a/patches.drivers/0001-crypto-qat-Remove-VLA-usage.patch b/patches.drivers/0001-crypto-qat-Remove-VLA-usage.patch
new file mode 100644
index 0000000000..4edf987e48
--- /dev/null
+++ b/patches.drivers/0001-crypto-qat-Remove-VLA-usage.patch
@@ -0,0 +1,49 @@
+From 1299c9cfae6dccd79e4e035cad44f99fdb828593 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Tue, 7 Aug 2018 14:18:41 -0700
+Subject: [PATCH] crypto: qat - Remove VLA usage
+Git-commit: 1299c9cfae6dccd79e4e035cad44f99fdb828593
+Patch-mainline: v4.20
+References: jsc#SLE-4818
+
+In the quest to remove all stack VLA usage from the kernel[1], this uses
+the new upper bound for the stack buffer. Also adds a sanity check.
+
+[1] https://lkml.kernel.org/r/CA+55aFzCG-zNmZwX4A2FQpadafLfEzK6CC=qPXydAacU1RqZWA@mail.gmail.com
+
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+---
+ drivers/crypto/qat/qat_common/qat_algs.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
+index 1138e41d6805..a28edf7b792f 100644
+--- a/drivers/crypto/qat/qat_common/qat_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_algs.c
+@@ -153,8 +153,8 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
+ struct sha512_state sha512;
+ int block_size = crypto_shash_blocksize(ctx->hash_tfm);
+ int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
+- char ipad[block_size];
+- char opad[block_size];
++ char ipad[MAX_ALGAPI_BLOCKSIZE];
++ char opad[MAX_ALGAPI_BLOCKSIZE];
+ __be32 *hash_state_out;
+ __be64 *hash512_state_out;
+ int i, offset;
+@@ -164,6 +164,10 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
+ shash->tfm = ctx->hash_tfm;
+ shash->flags = 0x0;
+
++ if (WARN_ON(block_size > sizeof(ipad) ||
++ sizeof(ipad) != sizeof(opad)))
++ return -EINVAL;
++
+ if (auth_keylen > block_size) {
+ int ret = crypto_shash_digest(shash, auth_key,
+ auth_keylen, ipad);
+--
+2.16.4
+
diff --git a/patches.drivers/0001-crypto-qat-Remove-unused-goto-label.patch b/patches.drivers/0001-crypto-qat-Remove-unused-goto-label.patch
new file mode 100644
index 0000000000..4e60b75306
--- /dev/null
+++ b/patches.drivers/0001-crypto-qat-Remove-unused-goto-label.patch
@@ -0,0 +1,33 @@
+From 920d7f7215d87005beb4aa2b90b9cb0b74b36947 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Fri, 8 Feb 2019 15:04:56 +0800
+Subject: [PATCH] crypto: qat - Remove unused goto label
+Git-commit: 920d7f7215d87005beb4aa2b90b9cb0b74b36947
+Patch-mainline: v5.1-rc1
+References: jsc#SLE-4818
+
+This patch removes an unused label.
+
+Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
+Fixes: f0fcf9ade46a ("crypto: qat - no need to check return...")
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+---
+ drivers/crypto/qat/qat_common/adf_transport.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
+index ac658ce46836..2136cbe4bf6c 100644
+--- a/drivers/crypto/qat/qat_common/adf_transport.c
++++ b/drivers/crypto/qat/qat_common/adf_transport.c
+@@ -498,7 +498,6 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
+
+ err_bank_all:
+ debugfs_remove(etr_data->debug);
+-err_bank_debug:
+ kfree(etr_data->banks);
+ err_bank:
+ kfree(etr_data);
+--
+2.16.4
+
diff --git a/patches.drivers/0001-crypto-qat-move-temp-buffers-off-the-stack.patch b/patches.drivers/0001-crypto-qat-move-temp-buffers-off-the-stack.patch
new file mode 100644
index 0000000000..2a83debe85
--- /dev/null
+++ b/patches.drivers/0001-crypto-qat-move-temp-buffers-off-the-stack.patch
@@ -0,0 +1,192 @@
+From cfa1d74495aa3cf240fd2b1fb45d43cc2a754a46 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Wed, 26 Sep 2018 11:51:59 +0200
+Subject: [PATCH] crypto: qat - move temp buffers off the stack
+Git-commit: cfa1d74495aa3cf240fd2b1fb45d43cc2a754a46
+Patch-mainline: v4.20
+References: jsc#SLE-4818
+
+Arnd reports that with Kees's latest VLA patches applied, the HMAC
+handling in the QAT driver uses a worst case estimate of 160 bytes
+for the SHA blocksize, allowing the compiler to determine the size
+of the stack frame at compile time and throw a warning:
+
+ drivers/crypto/qat/qat_common/qat_algs.c: In function 'qat_alg_do_precomputes':
+ drivers/crypto/qat/qat_common/qat_algs.c:257:1: error: the frame size
+ of 1112 bytes is larger than 1024 bytes [-Werror=frame-larger-than=]
+
+Given that this worst case estimate is only 32 bytes larger than the
+actual block size of SHA-512, the use of a VLA here was hiding the
+excessive size of the stack frame from the compiler, and so we should
+try to move these buffers off the stack.
+
+So move the ipad/opad buffers and the various SHA state descriptors
+into the tfm context struct. Since qat_alg_do_precomputes() is only
+called in the context of a setkey() operation, this should be safe.
+Using SHA512_BLOCK_SIZE for the size of the ipad/opad buffers allows
+them to be used by SHA-1/SHA-256 as well.
+
+Reported-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+---
+ drivers/crypto/qat/qat_common/qat_algs.c | 64 ++++++++++++++++----------------
+ 1 file changed, 31 insertions(+), 33 deletions(-)
+
+diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
+index a28edf7b792f..d2698299896f 100644
+--- a/drivers/crypto/qat/qat_common/qat_algs.c
++++ b/drivers/crypto/qat/qat_common/qat_algs.c
+@@ -113,6 +113,13 @@ struct qat_alg_aead_ctx {
+ struct crypto_shash *hash_tfm;
+ enum icp_qat_hw_auth_algo qat_hash_alg;
+ struct qat_crypto_instance *inst;
++ union {
++ struct sha1_state sha1;
++ struct sha256_state sha256;
++ struct sha512_state sha512;
++ };
++ char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
++ char opad[SHA512_BLOCK_SIZE];
+ };
+
+ struct qat_alg_ablkcipher_ctx {
+@@ -148,41 +155,32 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
+ unsigned int auth_keylen)
+ {
+ SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
+- struct sha1_state sha1;
+- struct sha256_state sha256;
+- struct sha512_state sha512;
+ int block_size = crypto_shash_blocksize(ctx->hash_tfm);
+ int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
+- char ipad[MAX_ALGAPI_BLOCKSIZE];
+- char opad[MAX_ALGAPI_BLOCKSIZE];
+ __be32 *hash_state_out;
+ __be64 *hash512_state_out;
+ int i, offset;
+
+- memset(ipad, 0, block_size);
+- memset(opad, 0, block_size);
++ memset(ctx->ipad, 0, block_size);
++ memset(ctx->opad, 0, block_size);
+ shash->tfm = ctx->hash_tfm;
+ shash->flags = 0x0;
+
+- if (WARN_ON(block_size > sizeof(ipad) ||
+- sizeof(ipad) != sizeof(opad)))
+- return -EINVAL;
+-
+ if (auth_keylen > block_size) {
+ int ret = crypto_shash_digest(shash, auth_key,
+- auth_keylen, ipad);
++ auth_keylen, ctx->ipad);
+ if (ret)
+ return ret;
+
+- memcpy(opad, ipad, digest_size);
++ memcpy(ctx->opad, ctx->ipad, digest_size);
+ } else {
+- memcpy(ipad, auth_key, auth_keylen);
+- memcpy(opad, auth_key, auth_keylen);
++ memcpy(ctx->ipad, auth_key, auth_keylen);
++ memcpy(ctx->opad, auth_key, auth_keylen);
+ }
+
+ for (i = 0; i < block_size; i++) {
+- char *ipad_ptr = ipad + i;
+- char *opad_ptr = opad + i;
++ char *ipad_ptr = ctx->ipad + i;
++ char *opad_ptr = ctx->opad + i;
+ *ipad_ptr ^= HMAC_IPAD_VALUE;
+ *opad_ptr ^= HMAC_OPAD_VALUE;
+ }
+@@ -190,7 +188,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
+ if (crypto_shash_init(shash))
+ return -EFAULT;
+
+- if (crypto_shash_update(shash, ipad, block_size))
++ if (crypto_shash_update(shash, ctx->ipad, block_size))
+ return -EFAULT;
+
+ hash_state_out = (__be32 *)hash->sha.state1;
+@@ -198,22 +196,22 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+- if (crypto_shash_export(shash, &sha1))
++ if (crypto_shash_export(shash, &ctx->sha1))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+- *hash_state_out = cpu_to_be32(*(sha1.state + i));
++ *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+- if (crypto_shash_export(shash, &sha256))
++ if (crypto_shash_export(shash, &ctx->sha256))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+- *hash_state_out = cpu_to_be32(*(sha256.state + i));
++ *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+- if (crypto_shash_export(shash, &sha512))
++ if (crypto_shash_export(shash, &ctx->sha512))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+- *hash512_state_out = cpu_to_be64(*(sha512.state + i));
++ *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
+ break;
+ default:
+ return -EFAULT;
+@@ -222,7 +220,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
+ if (crypto_shash_init(shash))
+ return -EFAULT;
+
+- if (crypto_shash_update(shash, opad, block_size))
++ if (crypto_shash_update(shash, ctx->opad, block_size))
+ return -EFAULT;
+
+ offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
+@@ -231,28 +229,28 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
+
+ switch (ctx->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+- if (crypto_shash_export(shash, &sha1))
++ if (crypto_shash_export(shash, &ctx->sha1))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+- *hash_state_out = cpu_to_be32(*(sha1.state + i));
++ *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+- if (crypto_shash_export(shash, &sha256))
++ if (crypto_shash_export(shash, &ctx->sha256))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+- *hash_state_out = cpu_to_be32(*(sha256.state + i));
++ *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+- if (crypto_shash_export(shash, &sha512))
++ if (crypto_shash_export(shash, &ctx->sha512))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+- *hash512_state_out = cpu_to_be64(*(sha512.state + i));
++ *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
+ break;
+ default:
+ return -EFAULT;
+ }
+- memzero_explicit(ipad, block_size);
+- memzero_explicit(opad, block_size);
++ memzero_explicit(ctx->ipad, block_size);
++ memzero_explicit(ctx->opad, block_size);
+ return 0;
+ }
+
+--
+2.16.4
+
diff --git a/patches.drivers/0001-crypto-qat-no-need-to-check-return-value-of-debugfs_.patch b/patches.drivers/0001-crypto-qat-no-need-to-check-return-value-of-debugfs_.patch
new file mode 100644
index 0000000000..d7aa81f924
--- /dev/null
+++ b/patches.drivers/0001-crypto-qat-no-need-to-check-return-value-of-debugfs_.patch
@@ -0,0 +1,205 @@
+From f0fcf9ade46abd64091fab8891ff5f690f7c8d7d Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Tue, 22 Jan 2019 16:14:16 +0100
+Subject: [PATCH] crypto: qat - no need to check return value of debugfs_create
+ functions
+Git-commit: f0fcf9ade46abd64091fab8891ff5f690f7c8d7d
+Patch-mainline: v5.1-rc1
+References: jsc#SLE-4818
+
+When calling debugfs functions, there is no need to ever check the
+return value. The function can work or not, but the code logic should
+never do something different based on this.
+
+Cc: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Conor McLoughlin <conor.mcloughlin@intel.com>
+Cc: Waiman Long <longman@redhat.com>
+Cc: qat-linux@intel.com
+Cc: linux-crypto@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Oliver Neukum <oneukum@suse.com>
+---
+ drivers/crypto/qat/qat_c3xxx/adf_drv.c | 5 -----
+ drivers/crypto/qat/qat_c3xxxvf/adf_drv.c | 5 -----
+ drivers/crypto/qat/qat_c62x/adf_drv.c | 5 -----
+ drivers/crypto/qat/qat_c62xvf/adf_drv.c | 5 -----
+ drivers/crypto/qat/qat_common/adf_cfg.c | 7 -------
+ drivers/crypto/qat/qat_common/adf_transport.c | 6 ------
+ drivers/crypto/qat/qat_common/adf_transport_debug.c | 15 ---------------
+ drivers/crypto/qat/qat_dh895xcc/adf_drv.c | 5 -----
+ drivers/crypto/qat/qat_dh895xccvf/adf_drv.c | 5 -----
+ 9 files changed, 58 deletions(-)
+
+diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+index 763c2166ee0e..d937cc7248a5 100644
+--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
++++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+@@ -193,11 +193,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ PCI_FUNC(pdev->devfn));
+
+ accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+- if (!accel_dev->debugfs_dir) {
+- dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+- ret = -EINVAL;
+- goto out_err;
+- }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+index 613c7d5644ce..1dc5ac859f7b 100644
+--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
++++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+@@ -177,11 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ PCI_FUNC(pdev->devfn));
+
+ accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+- if (!accel_dev->debugfs_dir) {
+- dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+- ret = -EINVAL;
+- goto out_err;
+- }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
+index 9cb832963357..2bc06c89d2fe 100644
+--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
++++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
+@@ -193,11 +193,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ PCI_FUNC(pdev->devfn));
+
+ accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+- if (!accel_dev->debugfs_dir) {
+- dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+- ret = -EINVAL;
+- goto out_err;
+- }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+index 278452b8ef81..a68358b31292 100644
+--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
++++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+@@ -177,11 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ PCI_FUNC(pdev->devfn));
+
+ accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+- if (!accel_dev->debugfs_dir) {
+- dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+- ret = -EINVAL;
+- goto out_err;
+- }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
+index d0879790561f..5c7fdb0fc53d 100644
+--- a/drivers/crypto/qat/qat_common/adf_cfg.c
++++ b/drivers/crypto/qat/qat_common/adf_cfg.c
+@@ -141,13 +141,6 @@ int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
+ accel_dev->debugfs_dir,
+ dev_cfg_data,
+ &qat_dev_cfg_fops);
+- if (!dev_cfg_data->debug) {
+- dev_err(&GET_DEV(accel_dev),
+- "Failed to create qat cfg debugfs entry.\n");
+- kfree(dev_cfg_data);
+- accel_dev->cfg = NULL;
+- return -EFAULT;
+- }
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
+diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
+index 57d2622728a5..ac658ce46836 100644
+--- a/drivers/crypto/qat/qat_common/adf_transport.c
++++ b/drivers/crypto/qat/qat_common/adf_transport.c
+@@ -486,12 +486,6 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
+ /* accel_dev->debugfs_dir should always be non-NULL here */
+ etr_data->debug = debugfs_create_dir("transport",
+ accel_dev->debugfs_dir);
+- if (!etr_data->debug) {
+- dev_err(&GET_DEV(accel_dev),
+- "Unable to create transport debugfs entry\n");
+- ret = -ENOENT;
+- goto err_bank_debug;
+- }
+
+ for (i = 0; i < num_banks; i++) {
+ ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
+diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
+index 52340b9bb387..e794e9d97b2c 100644
+--- a/drivers/crypto/qat/qat_common/adf_transport_debug.c
++++ b/drivers/crypto/qat/qat_common/adf_transport_debug.c
+@@ -163,11 +163,6 @@ int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
+ ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR,
+ ring->bank->bank_debug_dir,
+ ring, &adf_ring_debug_fops);
+- if (!ring_debug->debug) {
+- pr_err("QAT: Failed to create ring debug entry.\n");
+- kfree(ring_debug);
+- return -EFAULT;
+- }
+ ring->ring_debug = ring_debug;
+ return 0;
+ }
+@@ -271,19 +266,9 @@ int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+
+ snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
+ bank->bank_debug_dir = debugfs_create_dir(name, parent);
+- if (!bank->bank_debug_dir) {
+- pr_err("QAT: Failed to create bank debug dir.\n");
+- return -EFAULT;
+- }
+-
+ bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR,
+ bank->bank_debug_dir, bank,
+ &adf_bank_debug_fops);
+- if (!bank->bank_debug_cfg) {
+- pr_err("QAT: Failed to create bank debug entry.\n");
+- debugfs_remove(bank->bank_debug_dir);
+- return -EFAULT;
+- }
+ return 0;
+ }
+
+diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+index 3a9708ef4ce2..b11bf8c0e683 100644
+--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
++++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+@@ -193,11 +193,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ PCI_FUNC(pdev->devfn));
+
+ accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+- if (!accel_dev->debugfs_dir) {
+- dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+- ret = -EINVAL;
+- goto out_err;
+- }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+index 3da0f951cb59..1b762eefc6c1 100644
+--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
++++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+@@ -177,11 +177,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ PCI_FUNC(pdev->devfn));
+
+ accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+- if (!accel_dev->debugfs_dir) {
+- dev_err(&pdev->dev, "Could not create debugfs dir %s\n", name);
+- ret = -EINVAL;
+- goto out_err;
+- }
+
+ /* Create device configuration table */
+ ret = adf_cfg_dev_add(accel_dev);
+--
+2.16.4
+
diff --git a/patches.drivers/0001-drivers-perf-Add-Cavium-ThunderX2-SoC-UNCORE-PMU-dri.patch b/patches.drivers/0001-drivers-perf-Add-Cavium-ThunderX2-SoC-UNCORE-PMU-dri.patch
new file mode 100644
index 0000000000..13b705d15b
--- /dev/null
+++ b/patches.drivers/0001-drivers-perf-Add-Cavium-ThunderX2-SoC-UNCORE-PMU-dri.patch
@@ -0,0 +1,928 @@
+From: "Kulkarni, Ganapatrao" <Ganapatrao.Kulkarni@cavium.com>
+Date: Thu, 6 Dec 2018 11:51:31 +0000
+Subject: drivers/perf: Add Cavium ThunderX2 SoC UNCORE PMU driver
+
+Git-commit: 69c32972d59388c041268e8206e8eb1acff29b9a
+Patch-mainline: v5.0-rc1
+References: fate#323052,bsc#1117114
+
+This patch adds a perf driver for the PMU UNCORE devices DDR4 Memory
+Controller(DMC) and Level 3 Cache(L3C). Each PMU supports up to 4
+counters. All counters lack overflow interrupt and are
+sampled periodically.
+
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Ganapatrao Kulkarni <ganapatrao.kulkarni@cavium.com>
+[will: consistent enum cpuhp_state naming]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+
+Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
+---
+ drivers/perf/Kconfig | 9
+ drivers/perf/Makefile | 1
+ drivers/perf/thunderx2_pmu.c | 861 +++++++++++++++++++++++++++++++++++++++++++
+ include/linux/cpuhotplug.h | 1
+ 4 files changed, 872 insertions(+)
+ create mode 100644 drivers/perf/thunderx2_pmu.c
+
+--- a/drivers/perf/Kconfig
++++ b/drivers/perf/Kconfig
+@@ -87,6 +87,15 @@ config QCOM_L3_PMU
+ Adds the L3 cache PMU into the perf events subsystem for
+ monitoring L3 cache events.
+
++config THUNDERX2_PMU
++ tristate "Cavium ThunderX2 SoC PMU UNCORE"
++ depends on ARCH_THUNDER2 && ARM64 && ACPI && NUMA
++ default m
++ help
++ Provides support for ThunderX2 UNCORE events.
++ The SoC has PMU support in its L3 cache controller (L3C) and
++ in the DDR4 Memory Controller (DMC).
++
+ config XGENE_PMU
+ depends on ARCH_XGENE
+ bool "APM X-Gene SoC PMU"
+--- a/drivers/perf/Makefile
++++ b/drivers/perf/Makefile
+@@ -6,5 +6,6 @@ obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_ac
+ obj-$(CONFIG_HISI_PMU) += hisilicon/
+ obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o
+ obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
++obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o
+ obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
+ obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
+--- /dev/null
++++ b/drivers/perf/thunderx2_pmu.c
+@@ -0,0 +1,861 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * CAVIUM THUNDERX2 SoC PMU UNCORE
++ * Copyright (C) 2018 Cavium Inc.
++ * Author: Ganapatrao Kulkarni <gkulkarni@cavium.com>
++ */
++
++#include <linux/acpi.h>
++#include <linux/cpuhotplug.h>
++#include <linux/perf_event.h>
++#include <linux/platform_device.h>
++
++/* Each ThunderX2(TX2) Socket has a L3C and DMC UNCORE PMU device.
++ * Each UNCORE PMU device consists of 4 independent programmable counters.
++ * Counters are 32 bit and do not support overflow interrupt,
++ * they need to be sampled before overflow(i.e, at every 2 seconds).
++ */
++
++#define TX2_PMU_MAX_COUNTERS 4
++#define TX2_PMU_DMC_CHANNELS 8
++#define TX2_PMU_L3_TILES 16
++
++#define TX2_PMU_HRTIMER_INTERVAL (2 * NSEC_PER_SEC)
++#define GET_EVENTID(ev) ((ev->hw.config) & 0x1f)
++#define GET_COUNTERID(ev) ((ev->hw.idx) & 0x3)
++ /* 1 byte per counter(4 counters).
++ * Event id is encoded in bits [5:1] of a byte,
++ */
++#define DMC_EVENT_CFG(idx, val) ((val) << (((idx) * 8) + 1))
++
++#define L3C_COUNTER_CTL 0xA8
++#define L3C_COUNTER_DATA 0xAC
++#define DMC_COUNTER_CTL 0x234
++#define DMC_COUNTER_DATA 0x240
++
++/* L3C event IDs */
++#define L3_EVENT_READ_REQ 0xD
++#define L3_EVENT_WRITEBACK_REQ 0xE
++#define L3_EVENT_INV_N_WRITE_REQ 0xF
++#define L3_EVENT_INV_REQ 0x10
++#define L3_EVENT_EVICT_REQ 0x13
++#define L3_EVENT_INV_N_WRITE_HIT 0x14
++#define L3_EVENT_INV_HIT 0x15
++#define L3_EVENT_READ_HIT 0x17
++#define L3_EVENT_MAX 0x18
++
++/* DMC event IDs */
++#define DMC_EVENT_COUNT_CYCLES 0x1
++#define DMC_EVENT_WRITE_TXNS 0xB
++#define DMC_EVENT_DATA_TRANSFERS 0xD
++#define DMC_EVENT_READ_TXNS 0xF
++#define DMC_EVENT_MAX 0x10
++
++enum tx2_uncore_type {
++ PMU_TYPE_L3C,
++ PMU_TYPE_DMC,
++ PMU_TYPE_INVALID,
++};
++
++/*
++ * pmu on each socket has 2 uncore devices(dmc and l3c),
++ * each device has 4 counters.
++ */
++struct tx2_uncore_pmu {
++ struct hlist_node hpnode;
++ struct list_head entry;
++ struct pmu pmu;
++ char *name;
++ int node;
++ int cpu;
++ u32 max_counters;
++ u32 prorate_factor;
++ u32 max_events;
++ u64 hrtimer_interval;
++ void __iomem *base;
++ DECLARE_BITMAP(active_counters, TX2_PMU_MAX_COUNTERS);
++ struct perf_event *events[TX2_PMU_MAX_COUNTERS];
++ struct device *dev;
++ struct hrtimer hrtimer;
++ const struct attribute_group **attr_groups;
++ enum tx2_uncore_type type;
++ void (*init_cntr_base)(struct perf_event *event,
++ struct tx2_uncore_pmu *tx2_pmu);
++ void (*stop_event)(struct perf_event *event);
++ void (*start_event)(struct perf_event *event, int flags);
++};
++
++static LIST_HEAD(tx2_pmus);
++
++static inline struct tx2_uncore_pmu *pmu_to_tx2_pmu(struct pmu *pmu)
++{
++ return container_of(pmu, struct tx2_uncore_pmu, pmu);
++}
++
++PMU_FORMAT_ATTR(event, "config:0-4");
++
++static struct attribute *l3c_pmu_format_attrs[] = {
++ &format_attr_event.attr,
++ NULL,
++};
++
++static struct attribute *dmc_pmu_format_attrs[] = {
++ &format_attr_event.attr,
++ NULL,
++};
++
++static const struct attribute_group l3c_pmu_format_attr_group = {
++ .name = "format",
++ .attrs = l3c_pmu_format_attrs,
++};
++
++static const struct attribute_group dmc_pmu_format_attr_group = {
++ .name = "format",
++ .attrs = dmc_pmu_format_attrs,
++};
++
++/*
++ * sysfs event attributes
++ */
++static ssize_t tx2_pmu_event_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct dev_ext_attribute *eattr;
++
++ eattr = container_of(attr, struct dev_ext_attribute, attr);
++ return sprintf(buf, "event=0x%lx\n", (unsigned long) eattr->var);
++}
++
++#define TX2_EVENT_ATTR(name, config) \
++ PMU_EVENT_ATTR(name, tx2_pmu_event_attr_##name, \
++ config, tx2_pmu_event_show)
++
++TX2_EVENT_ATTR(read_request, L3_EVENT_READ_REQ);
++TX2_EVENT_ATTR(writeback_request, L3_EVENT_WRITEBACK_REQ);
++TX2_EVENT_ATTR(inv_nwrite_request, L3_EVENT_INV_N_WRITE_REQ);
++TX2_EVENT_ATTR(inv_request, L3_EVENT_INV_REQ);
++TX2_EVENT_ATTR(evict_request, L3_EVENT_EVICT_REQ);
++TX2_EVENT_ATTR(inv_nwrite_hit, L3_EVENT_INV_N_WRITE_HIT);
++TX2_EVENT_ATTR(inv_hit, L3_EVENT_INV_HIT);
++TX2_EVENT_ATTR(read_hit, L3_EVENT_READ_HIT);
++
++static struct attribute *l3c_pmu_events_attrs[] = {
++ &tx2_pmu_event_attr_read_request.attr.attr,
++ &tx2_pmu_event_attr_writeback_request.attr.attr,
++ &tx2_pmu_event_attr_inv_nwrite_request.attr.attr,
++ &tx2_pmu_event_attr_inv_request.attr.attr,
++ &tx2_pmu_event_attr_evict_request.attr.attr,
++ &tx2_pmu_event_attr_inv_nwrite_hit.attr.attr,
++ &tx2_pmu_event_attr_inv_hit.attr.attr,
++ &tx2_pmu_event_attr_read_hit.attr.attr,
++ NULL,
++};
++
++TX2_EVENT_ATTR(cnt_cycles, DMC_EVENT_COUNT_CYCLES);
++TX2_EVENT_ATTR(write_txns, DMC_EVENT_WRITE_TXNS);
++TX2_EVENT_ATTR(data_transfers, DMC_EVENT_DATA_TRANSFERS);
++TX2_EVENT_ATTR(read_txns, DMC_EVENT_READ_TXNS);
++
++static struct attribute *dmc_pmu_events_attrs[] = {
++ &tx2_pmu_event_attr_cnt_cycles.attr.attr,
++ &tx2_pmu_event_attr_write_txns.attr.attr,
++ &tx2_pmu_event_attr_data_transfers.attr.attr,
++ &tx2_pmu_event_attr_read_txns.attr.attr,
++ NULL,
++};
++
++static const struct attribute_group l3c_pmu_events_attr_group = {
++ .name = "events",
++ .attrs = l3c_pmu_events_attrs,
++};
++
++static const struct attribute_group dmc_pmu_events_attr_group = {
++ .name = "events",
++ .attrs = dmc_pmu_events_attrs,
++};
++
++/*
++ * sysfs cpumask attributes
++ */
++static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct tx2_uncore_pmu *tx2_pmu;
++
++ tx2_pmu = pmu_to_tx2_pmu(dev_get_drvdata(dev));
++ return cpumap_print_to_pagebuf(true, buf, cpumask_of(tx2_pmu->cpu));
++}
++static DEVICE_ATTR_RO(cpumask);
++
++static struct attribute *tx2_pmu_cpumask_attrs[] = {
++ &dev_attr_cpumask.attr,
++ NULL,
++};
++
++static const struct attribute_group pmu_cpumask_attr_group = {
++ .attrs = tx2_pmu_cpumask_attrs,
++};
++
++/*
++ * Per PMU device attribute groups
++ */
++static const struct attribute_group *l3c_pmu_attr_groups[] = {
++ &l3c_pmu_format_attr_group,
++ &pmu_cpumask_attr_group,
++ &l3c_pmu_events_attr_group,
++ NULL
++};
++
++static const struct attribute_group *dmc_pmu_attr_groups[] = {
++ &dmc_pmu_format_attr_group,
++ &pmu_cpumask_attr_group,
++ &dmc_pmu_events_attr_group,
++ NULL
++};
++
++static inline u32 reg_readl(unsigned long addr)
++{
++ return readl((void __iomem *)addr);
++}
++
++static inline void reg_writel(u32 val, unsigned long addr)
++{
++ writel(val, (void __iomem *)addr);
++}
++
++static int alloc_counter(struct tx2_uncore_pmu *tx2_pmu)
++{
++ int counter;
++
++ counter = find_first_zero_bit(tx2_pmu->active_counters,
++ tx2_pmu->max_counters);
++ if (counter == tx2_pmu->max_counters)
++ return -ENOSPC;
++
++ set_bit(counter, tx2_pmu->active_counters);
++ return counter;
++}
++
++static inline void free_counter(struct tx2_uncore_pmu *tx2_pmu, int counter)
++{
++ clear_bit(counter, tx2_pmu->active_counters);
++}
++
++static void init_cntr_base_l3c(struct perf_event *event,
++ struct tx2_uncore_pmu *tx2_pmu)
++{
++ struct hw_perf_event *hwc = &event->hw;
++
++ /* counter ctrl/data reg offset at 8 */
++ hwc->config_base = (unsigned long)tx2_pmu->base
++ + L3C_COUNTER_CTL + (8 * GET_COUNTERID(event));
++ hwc->event_base = (unsigned long)tx2_pmu->base
++ + L3C_COUNTER_DATA + (8 * GET_COUNTERID(event));
++}
++
++static void init_cntr_base_dmc(struct perf_event *event,
++ struct tx2_uncore_pmu *tx2_pmu)
++{
++ struct hw_perf_event *hwc = &event->hw;
++
++ hwc->config_base = (unsigned long)tx2_pmu->base
++ + DMC_COUNTER_CTL;
++ /* counter data reg offset at 0xc */
++ hwc->event_base = (unsigned long)tx2_pmu->base
++ + DMC_COUNTER_DATA + (0xc * GET_COUNTERID(event));
++}
++
++static void uncore_start_event_l3c(struct perf_event *event, int flags)
++{
++ u32 val;
++ struct hw_perf_event *hwc = &event->hw;
++
++ /* event id encoded in bits [07:03] */
++ val = GET_EVENTID(event) << 3;
++ reg_writel(val, hwc->config_base);
++ local64_set(&hwc->prev_count, 0);
++ reg_writel(0, hwc->event_base);
++}
++
++static inline void uncore_stop_event_l3c(struct perf_event *event)
++{
++ reg_writel(0, event->hw.config_base);
++}
++
++static void uncore_start_event_dmc(struct perf_event *event, int flags)
++{
++ u32 val;
++ struct hw_perf_event *hwc = &event->hw;
++ int idx = GET_COUNTERID(event);
++ int event_id = GET_EVENTID(event);
++
++ /* enable and start counters.
++ * 8 bits for each counter, bits[05:01] of a counter to set event type.
++ */
++ val = reg_readl(hwc->config_base);
++ val &= ~DMC_EVENT_CFG(idx, 0x1f);
++ val |= DMC_EVENT_CFG(idx, event_id);
++ reg_writel(val, hwc->config_base);
++ local64_set(&hwc->prev_count, 0);
++ reg_writel(0, hwc->event_base);
++}
++
++static void uncore_stop_event_dmc(struct perf_event *event)
++{
++ u32 val;
++ struct hw_perf_event *hwc = &event->hw;
++ int idx = GET_COUNTERID(event);
++
++ /* clear event type(bits[05:01]) to stop counter */
++ val = reg_readl(hwc->config_base);
++ val &= ~DMC_EVENT_CFG(idx, 0x1f);
++ reg_writel(val, hwc->config_base);
++}
++
++static void tx2_uncore_event_update(struct perf_event *event)
++{
++ s64 prev, delta, new = 0;
++ struct hw_perf_event *hwc = &event->hw;
++ struct tx2_uncore_pmu *tx2_pmu;
++ enum tx2_uncore_type type;
++ u32 prorate_factor;
++
++ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
++ type = tx2_pmu->type;
++ prorate_factor = tx2_pmu->prorate_factor;
++
++ new = reg_readl(hwc->event_base);
++ prev = local64_xchg(&hwc->prev_count, new);
++
++ /* handles rollover of 32 bit counter */
++ delta = (u32)(((1UL << 32) - prev) + new);
++
++ /* DMC event data_transfers granularity is 16 Bytes, convert it to 64 */
++ if (type == PMU_TYPE_DMC &&
++ GET_EVENTID(event) == DMC_EVENT_DATA_TRANSFERS)
++ delta = delta/4;
++
++ /* L3C and DMC has 16 and 8 interleave channels respectively.
++ * The sampled value is for channel 0 and multiplied with
++ * prorate_factor to get the count for a device.
++ */
++ local64_add(delta * prorate_factor, &event->count);
++}
++
++static enum tx2_uncore_type get_tx2_pmu_type(struct acpi_device *adev)
++{
++ int i = 0;
++ struct acpi_tx2_pmu_device {
++ __u8 id[ACPI_ID_LEN];
++ enum tx2_uncore_type type;
++ } devices[] = {
++ {"CAV901D", PMU_TYPE_L3C},
++ {"CAV901F", PMU_TYPE_DMC},
++ {"", PMU_TYPE_INVALID}
++ };
++
++ while (devices[i].type != PMU_TYPE_INVALID) {
++ if (!strcmp(acpi_device_hid(adev), devices[i].id))
++ break;
++ i++;
++ }
++
++ return devices[i].type;
++}
++
++static bool tx2_uncore_validate_event(struct pmu *pmu,
++ struct perf_event *event, int *counters)
++{
++ if (is_software_event(event))
++ return true;
++ /* Reject groups spanning multiple HW PMUs. */
++ if (event->pmu != pmu)
++ return false;
++
++ *counters = *counters + 1;
++ return true;
++}
++
++/*
++ * Make sure the group of events can be scheduled at once
++ * on the PMU.
++ */
++static bool tx2_uncore_validate_event_group(struct perf_event *event)
++{
++ struct perf_event *sibling, *leader = event->group_leader;
++ int counters = 0;
++
++ if (event->group_leader == event)
++ return true;
++
++ if (!tx2_uncore_validate_event(event->pmu, leader, &counters))
++ return false;
++
++ for_each_sibling_event(sibling, leader) {
++ if (!tx2_uncore_validate_event(event->pmu, sibling, &counters))
++ return false;
++ }
++
++ if (!tx2_uncore_validate_event(event->pmu, event, &counters))
++ return false;
++
++ /*
++ * If the group requires more counters than the HW has,
++ * it cannot ever be scheduled.
++ */
++ return counters <= TX2_PMU_MAX_COUNTERS;
++}
++
++
++static int tx2_uncore_event_init(struct perf_event *event)
++{
++ struct hw_perf_event *hwc = &event->hw;
++ struct tx2_uncore_pmu *tx2_pmu;
++
++ /* Test the event attr type check for PMU enumeration */
++ if (event->attr.type != event->pmu->type)
++ return -ENOENT;
++
++ /*
++ * SOC PMU counters are shared across all cores.
++ * Therefore, it does not support per-process mode.
++ * Also, it does not support event sampling mode.
++ */
++ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
++ return -EINVAL;
++
++ /* We have no filtering of any kind */
++ if (event->attr.exclude_user ||
++ event->attr.exclude_kernel ||
++ event->attr.exclude_hv ||
++ event->attr.exclude_idle ||
++ event->attr.exclude_host ||
++ event->attr.exclude_guest)
++ return -EINVAL;
++
++ if (event->cpu < 0)
++ return -EINVAL;
++
++ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
++ if (tx2_pmu->cpu >= nr_cpu_ids)
++ return -EINVAL;
++ event->cpu = tx2_pmu->cpu;
++
++ if (event->attr.config >= tx2_pmu->max_events)
++ return -EINVAL;
++
++ /* store event id */
++ hwc->config = event->attr.config;
++
++ /* Validate the group */
++ if (!tx2_uncore_validate_event_group(event))
++ return -EINVAL;
++
++ return 0;
++}
++
++static void tx2_uncore_event_start(struct perf_event *event, int flags)
++{
++ struct hw_perf_event *hwc = &event->hw;
++ struct tx2_uncore_pmu *tx2_pmu;
++
++ hwc->state = 0;
++ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
++
++ tx2_pmu->start_event(event, flags);
++ perf_event_update_userpage(event);
++
++ /* Start timer for first event */
++ if (bitmap_weight(tx2_pmu->active_counters,
++ tx2_pmu->max_counters) == 1) {
++ hrtimer_start(&tx2_pmu->hrtimer,
++ ns_to_ktime(tx2_pmu->hrtimer_interval),
++ HRTIMER_MODE_REL_PINNED);
++ }
++}
++
++static void tx2_uncore_event_stop(struct perf_event *event, int flags)
++{
++ struct hw_perf_event *hwc = &event->hw;
++ struct tx2_uncore_pmu *tx2_pmu;
++
++ if (hwc->state & PERF_HES_UPTODATE)
++ return;
++
++ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
++ tx2_pmu->stop_event(event);
++ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
++ hwc->state |= PERF_HES_STOPPED;
++ if (flags & PERF_EF_UPDATE) {
++ tx2_uncore_event_update(event);
++ hwc->state |= PERF_HES_UPTODATE;
++ }
++}
++
++static int tx2_uncore_event_add(struct perf_event *event, int flags)
++{
++ struct hw_perf_event *hwc = &event->hw;
++ struct tx2_uncore_pmu *tx2_pmu;
++
++ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
++
++ /* Allocate a free counter */
++ hwc->idx = alloc_counter(tx2_pmu);
++ if (hwc->idx < 0)
++ return -EAGAIN;
++
++ tx2_pmu->events[hwc->idx] = event;
++ /* set counter control and data registers base address */
++ tx2_pmu->init_cntr_base(event, tx2_pmu);
++
++ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
++ if (flags & PERF_EF_START)
++ tx2_uncore_event_start(event, flags);
++
++ return 0;
++}
++
++static void tx2_uncore_event_del(struct perf_event *event, int flags)
++{
++ struct tx2_uncore_pmu *tx2_pmu = pmu_to_tx2_pmu(event->pmu);
++ struct hw_perf_event *hwc = &event->hw;
++
++ tx2_uncore_event_stop(event, PERF_EF_UPDATE);
++
++ /* clear the assigned counter */
++ free_counter(tx2_pmu, GET_COUNTERID(event));
++
++ perf_event_update_userpage(event);
++ tx2_pmu->events[hwc->idx] = NULL;
++ hwc->idx = -1;
++}
++
++static void tx2_uncore_event_read(struct perf_event *event)
++{
++ tx2_uncore_event_update(event);
++}
++
++static enum hrtimer_restart tx2_hrtimer_callback(struct hrtimer *timer)
++{
++ struct tx2_uncore_pmu *tx2_pmu;
++ int max_counters, idx;
++
++ tx2_pmu = container_of(timer, struct tx2_uncore_pmu, hrtimer);
++ max_counters = tx2_pmu->max_counters;
++
++ if (bitmap_empty(tx2_pmu->active_counters, max_counters))
++ return HRTIMER_NORESTART;
++
++ for_each_set_bit(idx, tx2_pmu->active_counters, max_counters) {
++ struct perf_event *event = tx2_pmu->events[idx];
++
++ tx2_uncore_event_update(event);
++ }
++ hrtimer_forward_now(timer, ns_to_ktime(tx2_pmu->hrtimer_interval));
++ return HRTIMER_RESTART;
++}
++
++static int tx2_uncore_pmu_register(
++ struct tx2_uncore_pmu *tx2_pmu)
++{
++ struct device *dev = tx2_pmu->dev;
++ char *name = tx2_pmu->name;
++
++ /* Perf event registration */
++ tx2_pmu->pmu = (struct pmu) {
++ .module = THIS_MODULE,
++ .attr_groups = tx2_pmu->attr_groups,
++ .task_ctx_nr = perf_invalid_context,
++ .event_init = tx2_uncore_event_init,
++ .add = tx2_uncore_event_add,
++ .del = tx2_uncore_event_del,
++ .start = tx2_uncore_event_start,
++ .stop = tx2_uncore_event_stop,
++ .read = tx2_uncore_event_read,
++ };
++
++ tx2_pmu->pmu.name = devm_kasprintf(dev, GFP_KERNEL,
++ "%s", name);
++
++ return perf_pmu_register(&tx2_pmu->pmu, tx2_pmu->pmu.name, -1);
++}
++
++static int tx2_uncore_pmu_add_dev(struct tx2_uncore_pmu *tx2_pmu)
++{
++ int ret, cpu;
++
++ cpu = cpumask_any_and(cpumask_of_node(tx2_pmu->node),
++ cpu_online_mask);
++
++ tx2_pmu->cpu = cpu;
++ hrtimer_init(&tx2_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ tx2_pmu->hrtimer.function = tx2_hrtimer_callback;
++
++ ret = tx2_uncore_pmu_register(tx2_pmu);
++ if (ret) {
++ dev_err(tx2_pmu->dev, "%s PMU: Failed to init driver\n",
++ tx2_pmu->name);
++ return -ENODEV;
++ }
++
++ /* register hotplug callback for the pmu */
++ ret = cpuhp_state_add_instance(
++ CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
++ &tx2_pmu->hpnode);
++ if (ret) {
++ dev_err(tx2_pmu->dev, "Error %d registering hotplug", ret);
++ return ret;
++ }
++
++ /* Add to list */
++ list_add(&tx2_pmu->entry, &tx2_pmus);
++
++ dev_dbg(tx2_pmu->dev, "%s PMU UNCORE registered\n",
++ tx2_pmu->pmu.name);
++ return ret;
++}
++
++static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
++ acpi_handle handle, struct acpi_device *adev, u32 type)
++{
++ struct tx2_uncore_pmu *tx2_pmu;
++ void __iomem *base;
++ struct resource res;
++ struct resource_entry *rentry;
++ struct list_head list;
++ int ret;
++
++ INIT_LIST_HEAD(&list);
++ ret = acpi_dev_get_resources(adev, &list, NULL, NULL);
++ if (ret <= 0) {
++ dev_err(dev, "failed to parse _CRS method, error %d\n", ret);
++ return NULL;
++ }
++
++ list_for_each_entry(rentry, &list, node) {
++ if (resource_type(rentry->res) == IORESOURCE_MEM) {
++ res = *rentry->res;
++ break;
++ }
++ }
++
++ if (!rentry->res)
++ return NULL;
++
++ acpi_dev_free_resource_list(&list);
++ base = devm_ioremap_resource(dev, &res);
++ if (IS_ERR(base)) {
++ dev_err(dev, "PMU type %d: Fail to map resource\n", type);
++ return NULL;
++ }
++
++ tx2_pmu = devm_kzalloc(dev, sizeof(*tx2_pmu), GFP_KERNEL);
++ if (!tx2_pmu)
++ return NULL;
++
++ tx2_pmu->dev = dev;
++ tx2_pmu->type = type;
++ tx2_pmu->base = base;
++ tx2_pmu->node = dev_to_node(dev);
++ INIT_LIST_HEAD(&tx2_pmu->entry);
++
++ switch (tx2_pmu->type) {
++ case PMU_TYPE_L3C:
++ tx2_pmu->max_counters = TX2_PMU_MAX_COUNTERS;
++ tx2_pmu->prorate_factor = TX2_PMU_L3_TILES;
++ tx2_pmu->max_events = L3_EVENT_MAX;
++ tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
++ tx2_pmu->attr_groups = l3c_pmu_attr_groups;
++ tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
++ "uncore_l3c_%d", tx2_pmu->node);
++ tx2_pmu->init_cntr_base = init_cntr_base_l3c;
++ tx2_pmu->start_event = uncore_start_event_l3c;
++ tx2_pmu->stop_event = uncore_stop_event_l3c;
++ break;
++ case PMU_TYPE_DMC:
++ tx2_pmu->max_counters = TX2_PMU_MAX_COUNTERS;
++ tx2_pmu->prorate_factor = TX2_PMU_DMC_CHANNELS;
++ tx2_pmu->max_events = DMC_EVENT_MAX;
++ tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
++ tx2_pmu->attr_groups = dmc_pmu_attr_groups;
++ tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
++ "uncore_dmc_%d", tx2_pmu->node);
++ tx2_pmu->init_cntr_base = init_cntr_base_dmc;
++ tx2_pmu->start_event = uncore_start_event_dmc;
++ tx2_pmu->stop_event = uncore_stop_event_dmc;
++ break;
++ case PMU_TYPE_INVALID:
++ devm_kfree(dev, tx2_pmu);
++ return NULL;
++ }
++
++ return tx2_pmu;
++}
++
++static acpi_status tx2_uncore_pmu_add(acpi_handle handle, u32 level,
++ void *data, void **return_value)
++{
++ struct tx2_uncore_pmu *tx2_pmu;
++ struct acpi_device *adev;
++ enum tx2_uncore_type type;
++
++ if (acpi_bus_get_device(handle, &adev))
++ return AE_OK;
++ if (acpi_bus_get_status(adev) || !adev->status.present)
++ return AE_OK;
++
++ type = get_tx2_pmu_type(adev);
++ if (type == PMU_TYPE_INVALID)
++ return AE_OK;
++
++ tx2_pmu = tx2_uncore_pmu_init_dev((struct device *)data,
++ handle, adev, type);
++
++ if (!tx2_pmu)
++ return AE_ERROR;
++
++ if (tx2_uncore_pmu_add_dev(tx2_pmu)) {
++ /* Can't add the PMU device, abort */
++ return AE_ERROR;
++ }
++ return AE_OK;
++}
++
++static int tx2_uncore_pmu_online_cpu(unsigned int cpu,
++ struct hlist_node *hpnode)
++{
++ struct tx2_uncore_pmu *tx2_pmu;
++
++ tx2_pmu = hlist_entry_safe(hpnode,
++ struct tx2_uncore_pmu, hpnode);
++
++ /* Pick this CPU, If there is no CPU/PMU association and both are
++ * from same node.
++ */
++ if ((tx2_pmu->cpu >= nr_cpu_ids) &&
++ (tx2_pmu->node == cpu_to_node(cpu)))
++ tx2_pmu->cpu = cpu;
++
++ return 0;
++}
++
++static int tx2_uncore_pmu_offline_cpu(unsigned int cpu,
++ struct hlist_node *hpnode)
++{
++ int new_cpu;
++ struct tx2_uncore_pmu *tx2_pmu;
++ struct cpumask cpu_online_mask_temp;
++
++ tx2_pmu = hlist_entry_safe(hpnode,
++ struct tx2_uncore_pmu, hpnode);
++
++ if (cpu != tx2_pmu->cpu)
++ return 0;
++
++ hrtimer_cancel(&tx2_pmu->hrtimer);
++ cpumask_copy(&cpu_online_mask_temp, cpu_online_mask);
++ cpumask_clear_cpu(cpu, &cpu_online_mask_temp);
++ new_cpu = cpumask_any_and(
++ cpumask_of_node(tx2_pmu->node),
++ &cpu_online_mask_temp);
++
++ tx2_pmu->cpu = new_cpu;
++ if (new_cpu >= nr_cpu_ids)
++ return 0;
++ perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu);
++
++ return 0;
++}
++
++static const struct acpi_device_id tx2_uncore_acpi_match[] = {
++ {"CAV901C", 0},
++ {},
++};
++MODULE_DEVICE_TABLE(acpi, tx2_uncore_acpi_match);
++
++static int tx2_uncore_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ acpi_handle handle;
++ acpi_status status;
++
++ set_dev_node(dev, acpi_get_node(ACPI_HANDLE(dev)));
++
++ if (!has_acpi_companion(dev))
++ return -ENODEV;
++
++ handle = ACPI_HANDLE(dev);
++ if (!handle)
++ return -EINVAL;
++
++ /* Walk through the tree for all PMU UNCORE devices */
++ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
++ tx2_uncore_pmu_add,
++ NULL, dev, NULL);
++ if (ACPI_FAILURE(status)) {
++ dev_err(dev, "failed to probe PMU devices\n");
++ return_ACPI_STATUS(status);
++ }
++
++ dev_info(dev, "node%d: pmu uncore registered\n", dev_to_node(dev));
++ return 0;
++}
++
++static int tx2_uncore_remove(struct platform_device *pdev)
++{
++ struct tx2_uncore_pmu *tx2_pmu, *temp;
++ struct device *dev = &pdev->dev;
++
++ if (!list_empty(&tx2_pmus)) {
++ list_for_each_entry_safe(tx2_pmu, temp, &tx2_pmus, entry) {
++ if (tx2_pmu->node == dev_to_node(dev)) {
++ cpuhp_state_remove_instance_nocalls(
++ CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
++ &tx2_pmu->hpnode);
++ perf_pmu_unregister(&tx2_pmu->pmu);
++ list_del(&tx2_pmu->entry);
++ }
++ }
++ }
++ return 0;
++}
++
++static struct platform_driver tx2_uncore_driver = {
++ .driver = {
++ .name = "tx2-uncore-pmu",
++ .acpi_match_table = ACPI_PTR(tx2_uncore_acpi_match),
++ },
++ .probe = tx2_uncore_probe,
++ .remove = tx2_uncore_remove,
++};
++
++static int __init tx2_uncore_driver_init(void)
++{
++ int ret;
++
++ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
++ "perf/tx2/uncore:online",
++ tx2_uncore_pmu_online_cpu,
++ tx2_uncore_pmu_offline_cpu);
++ if (ret) {
++ pr_err("TX2 PMU: setup hotplug failed(%d)\n", ret);
++ return ret;
++ }
++ ret = platform_driver_register(&tx2_uncore_driver);
++ if (ret)
++ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE);
++
++ return ret;
++}
++module_init(tx2_uncore_driver_init);
++
++static void __exit tx2_uncore_driver_exit(void)
++{
++ platform_driver_unregister(&tx2_uncore_driver);
++ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE);
++}
++module_exit(tx2_uncore_driver_exit);
++
++MODULE_DESCRIPTION("ThunderX2 UNCORE PMU driver");
++MODULE_LICENSE("GPL v2");
++MODULE_AUTHOR("Ganapatrao Kulkarni <gkulkarni@cavium.com>");
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -145,6 +145,7 @@ enum cpuhp_state {
+ CPUHP_AP_PERF_ARM_L2X0_ONLINE,
+ CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
+ CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
++ CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
+ CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
+ CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
+ CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
diff --git a/patches.arch/0001-efi-honour-memory-reservations-passed-via-a-linux-sp.patch b/patches.drivers/0001-efi-honour-memory-reservations-passed-via-a-linux-sp.patch
index 4dcac36f8c..bda4242c73 100644
--- a/patches.arch/0001-efi-honour-memory-reservations-passed-via-a-linux-sp.patch
+++ b/patches.drivers/0001-efi-honour-memory-reservations-passed-via-a-linux-sp.patch
@@ -4,7 +4,7 @@ Subject: efi: honour memory reservations passed via a linux specific config
table
Git-commit: 71e0940d52e107748b270213a01d3b1546657d74
Patch-mainline: v4.20-rc1
-References: bsc#1111147
+References: bsc#1111147 bsc#1117158 bsc#1134671
In order to allow the OS to reserve memory persistently across a
kexec, introduce a Linux-specific UEFI configuration table that
@@ -34,7 +34,7 @@ Signed-off-by: Matthias Brugger <mbrugger@suse.com>
};
EXPORT_SYMBOL(efi);
-@@ -463,6 +464,7 @@ static __initdata efi_config_table_type_
+@@ -462,6 +463,7 @@ static __initdata efi_config_table_type_
{EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
{EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
{LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed},
@@ -42,7 +42,7 @@ Signed-off-by: Matthias Brugger <mbrugger@suse.com>
{NULL_GUID, NULL, NULL},
};
-@@ -567,6 +569,29 @@ int __init efi_config_parse_tables(void
+@@ -566,6 +568,29 @@ int __init efi_config_parse_tables(void
early_memunmap(tbl, sizeof(*tbl));
}
diff --git a/patches.drivers/0002-Documentation-perf-Add-documentation-for-ThunderX2-P.patch b/patches.drivers/0002-Documentation-perf-Add-documentation-for-ThunderX2-P.patch
new file mode 100644
index 0000000000..5603f3cef6
--- /dev/null
+++ b/patches.drivers/0002-Documentation-perf-Add-documentation-for-ThunderX2-P.patch
@@ -0,0 +1,72 @@
+From: "Kulkarni, Ganapatrao" <Ganapatrao.Kulkarni@cavium.com>
+Date: Thu, 6 Dec 2018 11:51:27 +0000
+Subject: Documentation: perf: Add documentation for ThunderX2 PMU uncore
+ driver
+
+Git-commit: d6310a3f3396e004bdb7a76787a2a3bbc643d0b7
+Patch-mainline: v5.0-rc1
+References: fate#323052,bsc#1117114
+
+The SoC has PMU support in its L3 cache controller (L3C) and in the
+DDR4 Memory Controller (DMC).
+
+Signed-off-by: Ganapatrao Kulkarni <ganapatrao.kulkarni@cavium.com>
+[will: minor spelling and format fixes, dropped events list]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+
+Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
+---
+ Documentation/perf/thunderx2-pmu.txt | 41 ++++++++++++++++++++++++++++++++++++
+ 1 file changed, 41 insertions(+)
+ create mode 100644 Documentation/perf/thunderx2-pmu.txt
+
+diff --git a/Documentation/perf/thunderx2-pmu.txt b/Documentation/perf/thunderx2-pmu.txt
+new file mode 100644
+index 000000000000..dffc57143736
+--- /dev/null
++++ b/Documentation/perf/thunderx2-pmu.txt
+@@ -0,0 +1,41 @@
++Cavium ThunderX2 SoC Performance Monitoring Unit (PMU UNCORE)
++=============================================================
++
++The ThunderX2 SoC PMU consists of independent, system-wide, per-socket
++PMUs such as the Level 3 Cache (L3C) and DDR4 Memory Controller (DMC).
++
++The DMC has 8 interleaved channels and the L3C has 16 interleaved tiles.
++Events are counted for the default channel (i.e. channel 0) and prorated
++to the total number of channels/tiles.
++
++The DMC and L3C support up to 4 counters. Counters are independently
++programmable and can be started and stopped individually. Each counter
++can be set to a different event. Counters are 32-bit and do not support
++an overflow interrupt; they are read every 2 seconds.
++
++PMU UNCORE (perf) driver:
++
++The thunderx2_pmu driver registers per-socket perf PMUs for the DMC and
++L3C devices. Each PMU can be used to count up to 4 events
++simultaneously. The PMUs provide a description of their available events
++and configuration options under sysfs, see
++/sys/devices/uncore_<l3c_S/dmc_S/>; S is the socket id.
++
++The driver does not support sampling, therefore "perf record" will not
++work. Per-task perf sessions are also not supported.
++
++Examples:
++
++# perf stat -a -e uncore_dmc_0/cnt_cycles/ sleep 1
++
++# perf stat -a -e \
++uncore_dmc_0/cnt_cycles/,\
++uncore_dmc_0/data_transfers/,\
++uncore_dmc_0/read_txns/,\
++uncore_dmc_0/write_txns/ sleep 1
++
++# perf stat -a -e \
++uncore_l3c_0/read_request/,\
++uncore_l3c_0/read_hit/,\
++uncore_l3c_0/inv_request/,\
++uncore_l3c_0/inv_hit/ sleep 1
+--
+2.11.0
+
diff --git a/patches.drivers/ACPI-fix-menuconfig-presentation-of-ACPI-submenu.patch b/patches.drivers/ACPI-fix-menuconfig-presentation-of-ACPI-submenu.patch
index 9f726c1d09..40a12351c1 100644
--- a/patches.drivers/ACPI-fix-menuconfig-presentation-of-ACPI-submenu.patch
+++ b/patches.drivers/ACPI-fix-menuconfig-presentation-of-ACPI-submenu.patch
@@ -3,7 +3,7 @@ Date: Tue, 21 Aug 2018 22:37:33 +0200
Subject: ACPI: fix menuconfig presentation of ACPI submenu
Git-commit: f5d707ede37a962bc3cb9b3f8531a870dae29e46
Patch-mainline: v4.19-rc1
-References: bsc#1117158
+References: bsc#1117158 bsc#1134671
My fix for a recursive Kconfig dependency caused another issue where the
ACPI specific options end up in the top-level menu in 'menuconfig'. This
diff --git a/patches.drivers/ALSA-hda-realtek-Enable-micmute-LED-for-Huawei-lapto.patch b/patches.drivers/ALSA-hda-realtek-Enable-micmute-LED-for-Huawei-lapto.patch
new file mode 100644
index 0000000000..99a74ace30
--- /dev/null
+++ b/patches.drivers/ALSA-hda-realtek-Enable-micmute-LED-for-Huawei-lapto.patch
@@ -0,0 +1,66 @@
+From 0fbf21c3b36a9921467aa7525d2768b07f9f8fbb Mon Sep 17 00:00:00 2001
+From: Ayman Bagabas <ayman.bagabas@gmail.com>
+Date: Thu, 23 May 2019 05:30:11 -0400
+Subject: [PATCH] ALSA: hda/realtek - Enable micmute LED for Huawei laptops
+Git-commit: 0fbf21c3b36a9921467aa7525d2768b07f9f8fbb
+Patch-mainline: v5.2-rc3
+References: bsc#1051510
+
+Since this LED is found on all Huawei laptops, we can hook it to
+huawei-wmi platform driver to control it.
+
+Also, some renames have been made to use product name instead of common
+name to avoid confusions.
+
+Fixes: 8ac51bbc4cfe ("ALSA: hda: fix front speakers on Huawei MBXP")
+Signed-off-by: Ayman Bagabas <ayman.bagabas@gmail.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ sound/pci/hda/patch_realtek.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1ca2a83b65cd..f1bac03e954b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5752,7 +5752,7 @@ enum {
+ ALC298_FIXUP_TPT470_DOCK,
+ ALC255_FIXUP_DUMMY_LINEOUT_VERB,
+ ALC255_FIXUP_DELL_HEADSET_MIC,
+- ALC256_FIXUP_HUAWEI_MBXP_PINS,
++ ALC256_FIXUP_HUAWEI_MACH_WX9_PINS,
+ ALC295_FIXUP_HP_X360,
+ ALC221_FIXUP_HP_HEADSET_MIC,
+ ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
+@@ -6043,7 +6043,7 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MIC
+ },
+- [ALC256_FIXUP_HUAWEI_MBXP_PINS] = {
++ [ALC256_FIXUP_HUAWEI_MACH_WX9_PINS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ {0x12, 0x90a60130},
+@@ -7068,9 +7068,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+ SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+- SND_PCI_QUIRK(0x19e5, 0x3200, "Huawei MBX", ALC255_FIXUP_MIC_MUTE_LED),
+- SND_PCI_QUIRK(0x19e5, 0x3201, "Huawei MBX", ALC255_FIXUP_MIC_MUTE_LED),
+- SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MBXP", ALC256_FIXUP_HUAWEI_MBXP_PINS),
++ SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
+ SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
+
+ #if 0
+@@ -7129,6 +7127,7 @@ static const struct snd_pci_quirk alc269_fixup_vendor_tbl[] = {
+ SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
+ SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", ALC269_FIXUP_THINKPAD_ACPI),
++ SND_PCI_QUIRK_VENDOR(0x19e5, "Huawei Matebook", ALC255_FIXUP_MIC_MUTE_LED),
+ {}
+ };
+
+--
+2.16.4
+
diff --git a/patches.drivers/ALSA-hda-realtek-Improve-the-headset-mic-for-Acer-As.patch b/patches.drivers/ALSA-hda-realtek-Improve-the-headset-mic-for-Acer-As.patch
new file mode 100644
index 0000000000..8ccf8c2537
--- /dev/null
+++ b/patches.drivers/ALSA-hda-realtek-Improve-the-headset-mic-for-Acer-As.patch
@@ -0,0 +1,72 @@
+From 9cb40eb184c4220d244a532bd940c6345ad9dbd9 Mon Sep 17 00:00:00 2001
+From: Hui Wang <hui.wang@canonical.com>
+Date: Wed, 29 May 2019 12:41:38 +0800
+Subject: [PATCH] ALSA: hda/realtek - Improve the headset mic for Acer Aspire laptops
+Git-commit: 9cb40eb184c4220d244a532bd940c6345ad9dbd9
+Patch-mainline: v5.2-rc3
+References: bsc#1051510
+
+We met another Acer Aspire laptop which has the problem on the
+headset-mic, the Pin 0x19 is not set the corret configuration for a
+mic and the pin presence can't be detected too after plugging a
+headset. Kailang suggested that we should set the coeff to enable the
+mic and apply the ALC269_FIXUP_LIFEBOOK_EXTMIC. After doing that,
+both headset-mic presence and headset-mic work well.
+
+The existing ALC255_FIXUP_ACER_MIC_NO_PRESENCE set the headset-mic
+jack to be a phantom jack. Now since the jack can support presence
+unsol event, let us imporve it to set the jack to be a normal jack.
+
+https://bugs.launchpad.net/bugs/1821269
+
+Fixes: 5824ce8de7b1c ("ALSA: hda/realtek - Add support for Acer Aspire E5-475 headset mic")
+Cc: Chris Chiu <chiu@endlessm.com>
+Cc: Daniel Drake <drake@endlessm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Kailang Yang <kailang@realtek.com>
+Signed-off-by: Hui Wang <hui.wang@canonical.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ sound/pci/hda/patch_realtek.c | 16 +++++++++++-----
+ 1 file changed, 11 insertions(+), 5 deletions(-)
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f1bac03e954b..18cb48054e54 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6223,13 +6223,15 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
+ },
+ [ALC255_FIXUP_ACER_MIC_NO_PRESENCE] = {
+- .type = HDA_FIXUP_PINS,
+- .v.pins = (const struct hda_pintbl[]) {
+- { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+- { }
++ .type = HDA_FIXUP_VERBS,
++ .v.verbs = (const struct hda_verb[]) {
++ /* Enable the Mic */
++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
++ { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
++ {}
+ },
+ .chained = true,
+- .chain_id = ALC255_FIXUP_HEADSET_MODE
++ .chain_id = ALC269_FIXUP_LIFEBOOK_EXTMIC
+ },
+ [ALC255_FIXUP_ASUS_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+@@ -7273,6 +7275,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ {0x18, 0x02a11030},
+ {0x19, 0x0181303F},
+ {0x21, 0x0221102f}),
++ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1025, "Acer", ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
++ {0x12, 0x90a60140},
++ {0x14, 0x90170120},
++ {0x21, 0x02211030}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1025, "Acer", ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
+ {0x12, 0x90a601c0},
+ {0x14, 0x90171120},
+--
+2.16.4
+
diff --git a/patches.drivers/ASoC-Intel-Add-machine-driver-for-Cherrytrail-CX2072 b/patches.drivers/ASoC-Intel-Add-machine-driver-for-Cherrytrail-CX2072
index 35b4c452b4..2ca1565c25 100644
--- a/patches.drivers/ASoC-Intel-Add-machine-driver-for-Cherrytrail-CX2072
+++ b/patches.drivers/ASoC-Intel-Add-machine-driver-for-Cherrytrail-CX2072
@@ -1,23 +1,34 @@
-From 5a62f24d17554da9cf1c292aa31329237bd982f1 Mon Sep 17 00:00:00 2001
-From: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
-Date: Fri, 9 Dec 2016 07:49:28 -0600
-Subject: [PATCH] ASoC: Intel: Add machine driver for Cherrytrail-CX2072X
+From 3917da94f787e6c907e440653ead0c666a71379e Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Tue, 21 May 2019 08:26:53 +0200
+Subject: [PATCH] ASoC: Intel: Add machine driver for CX2072X on BYT/CHT
+ platforms
+Git-commit: 3917da94f787e6c907e440653ead0c666a71379e
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
+Patch-mainline: Queued in subsystem maintainer repository
References: bsc#1068546
-Patch-mainline: Submitted, alsa-devel ML
-Machine driver needed for Conexant CX2072X codec.
+This is an implementation of a machine driver needed for Conexant
+CX2072X codec on Intel Baytrail and Cherrytrail platforms. The
+current patch is based on the initial work by Pierre-Louis Bossart and
+the other Intel machine drivers.
-A couple of fixmes related to PLL
-Jack detection needs to be re-added
+The jack detection support (driven via the standard GPIO) was added on
+top of the original work.
-Signed-off-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Tested with ASUS E200HA laptop.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=115531
+Acked-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Mark Brown <broonie@kernel.org>
---
sound/soc/intel/Kconfig | 13 +
+ sound/soc/intel/atom/sst/sst_acpi.c | 4
sound/soc/intel/boards/Makefile | 2
- sound/soc/intel/boards/cht_cx2072x.c | 273 +++++++++++++++++++++++++++++++++++
- 3 files changed, 288 insertions(+)
+ sound/soc/intel/boards/cht_cx2072x.c | 337 +++++++++++++++++++++++++++++++++++
+ 4 files changed, 356 insertions(+)
create mode 100644 sound/soc/intel/boards/cht_cx2072x.c
--- a/sound/soc/intel/Kconfig
@@ -62,7 +73,7 @@ Signed-off-by: Takashi Iwai <tiwai@suse.de>
obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH) += snd-soc-sst-cht-bsw-max98090_ti.o
--- /dev/null
+++ b/sound/soc/intel/boards/cht_cx2072x.c
-@@ -0,0 +1,273 @@
+@@ -0,0 +1,337 @@
+/*
+ * cht_cx207x.c - ASoc DPCM Machine driver for CherryTrail w/ CX2072x
+ *
@@ -135,10 +146,49 @@ Signed-off-by: Takashi Iwai <tiwai@suse.de>
+ return 0;
+}
+
++static struct snd_soc_jack cht_cx_headset;
++
++/* Headset jack detection DAPM pins */
++static struct snd_soc_jack_pin cht_cx_headset_pins[] = {
++ {
++ .pin = "Headset Mic",
++ .mask = SND_JACK_MICROPHONE,
++ },
++ {
++ .pin = "Headphone",
++ .mask = SND_JACK_HEADPHONE,
++ },
++};
++
++static const struct acpi_gpio_params headset_gpios = { 0, 0, false };
++
++static const struct acpi_gpio_mapping acpi_cht_cx2072x_gpios[] = {
++ { "headset-gpios", &headset_gpios, 1 },
++ {},
++};
++
++static int cht_cx_jack_status_check(void *data)
++{
++ return cx2072x_get_jack_state(data);
++}
++
++static struct snd_soc_jack_gpio cht_cx_gpio = {
++ .name = "headset",
++ .report = SND_JACK_HEADSET | SND_JACK_BTN_0,
++ .debounce_time = 150,
++ .wake = true,
++ .jack_status_check = cht_cx_jack_status_check,
++};
++
+static int cht_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int ret;
+ struct snd_soc_card *card = rtd->card;
++ struct snd_soc_codec *codec = rtd->codec;
++
++ if (devm_acpi_dev_add_driver_gpios(codec->dev,
++ acpi_cht_cx2072x_gpios))
++ dev_warn(rtd->dev, "Unable to add GPIO mapping table\n");
+
+ card->dapm.idle_bias_off = true;
+
@@ -150,6 +200,24 @@ Signed-off-by: Takashi Iwai <tiwai@suse.de>
+ return ret;
+ }
+
++ ret = snd_soc_card_jack_new(card, "Headset",
++ SND_JACK_HEADSET | SND_JACK_BTN_0,
++ &cht_cx_headset,
++ cht_cx_headset_pins,
++ ARRAY_SIZE(cht_cx_headset_pins));
++ if (ret)
++ return ret;
++
++ cht_cx_gpio.gpiod_dev = codec->dev;
++ cht_cx_gpio.data = codec;
++ ret = snd_soc_jack_add_gpios(&cht_cx_headset, 1, &cht_cx_gpio);
++ if (ret) {
++ dev_err(rtd->dev, "Adding jack GPIO failed\n");
++ return ret;
++ }
++
++ cx2072x_enable_detect(codec);
++
+ return ret;
+}
+
@@ -324,11 +392,18 @@ Signed-off-by: Takashi Iwai <tiwai@suse.de>
+ return devm_snd_soc_register_card(&pdev->dev, &chtcx2072x_card);
+}
+
++static int snd_cht_mc_remove(struct platform_device *pdev)
++{
++ snd_soc_jack_free_gpios(&cht_cx_headset, 1, &cht_cx_gpio);
++ return 0;
++}
++
+static struct platform_driver snd_cht_mc_driver = {
+ .driver = {
+ .name = "cht-cx2072x",
+ },
+ .probe = snd_cht_mc_probe,
++ .remove = snd_cht_mc_remove,
+};
+module_platform_driver(snd_cht_mc_driver);
+
@@ -336,3 +411,23 @@ Signed-off-by: Takashi Iwai <tiwai@suse.de>
+MODULE_AUTHOR("Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:cht-cx2072x");
+--- a/sound/soc/intel/atom/sst/sst_acpi.c
++++ b/sound/soc/intel/atom/sst/sst_acpi.c
+@@ -503,6 +503,8 @@ static struct sst_acpi_mach sst_acpi_byt
+ &byt_rvp_platform_data },
+ {"10EC5648", "cht-bsw-rt5645", "intel/fw_sst_0f28.bin", "cht-bsw", NULL,
+ &byt_rvp_platform_data },
++ {"14F10720", "cht-cx2072x", "intel/fw_sst_0f28.bin", "cht-bsw", NULL,
++ &byt_rvp_platform_data },
+ #if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
+ /*
+ * This is always last in the table so that it is selected only when
+@@ -541,6 +543,8 @@ static struct sst_acpi_mach sst_acpi_chv
+ /* some CHT-T platforms rely on RT5651, use Baytrail machine driver */
+ {"10EC5651", "bytcr_rt5651", "intel/fw_sst_22a8.bin", "bytcr_rt5651", NULL,
+ &chv_platform_data },
++ {"14F10720", "cht-cx2072x", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
++ &chv_platform_data },
+ #if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
+ /*
+ * This is always last in the table so that it is selected only when
diff --git a/patches.drivers/ASoC-Intel-add-support-for-CX2072x-machine-driver b/patches.drivers/ASoC-Intel-add-support-for-CX2072x-machine-driver
deleted file mode 100644
index 0b5c9c493e..0000000000
--- a/patches.drivers/ASoC-Intel-add-support-for-CX2072x-machine-driver
+++ /dev/null
@@ -1,36 +0,0 @@
-From f6737dd82168f5a378fd035882b8ec67b3f7dba8 Mon Sep 17 00:00:00 2001
-From: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
-Date: Fri, 9 Dec 2016 09:10:39 -0600
-Subject: [PATCH] ASoC: Intel: add support for CX2072x machine driver
-References: bsc#1068546
-Patch-mainline: Submitted, alsa-devel ML
-
-Add ACPI reference to load machine driver
-
-Signed-off-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
-Signed-off-by: Takashi Iwai <tiwai@suse.de>
-
----
- sound/soc/intel/atom/sst/sst_acpi.c | 4 ++++
- 1 file changed, 4 insertions(+)
-
---- a/sound/soc/intel/atom/sst/sst_acpi.c
-+++ b/sound/soc/intel/atom/sst/sst_acpi.c
-@@ -503,6 +503,8 @@ static struct sst_acpi_mach sst_acpi_byt
- &byt_rvp_platform_data },
- {"10EC5648", "cht-bsw-rt5645", "intel/fw_sst_0f28.bin", "cht-bsw", NULL,
- &byt_rvp_platform_data },
-+ {"14F10720", "cht-cx2072x", "intel/fw_sst_0f28.bin", "cht-bsw", NULL,
-+ &byt_rvp_platform_data },
- #if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
- /*
- * This is always last in the table so that it is selected only when
-@@ -541,6 +543,8 @@ static struct sst_acpi_mach sst_acpi_chv
- /* some CHT-T platforms rely on RT5651, use Baytrail machine driver */
- {"10EC5651", "bytcr_rt5651", "intel/fw_sst_22a8.bin", "bytcr_rt5651", NULL,
- &chv_platform_data },
-+ {"14F10720", "cht-cx2072x", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
-+ &chv_platform_data },
- #if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
- /*
- * This is always last in the table so that it is selected only when
diff --git a/patches.drivers/ASoC-add-support-for-Conexant-CX2072X-CODEC b/patches.drivers/ASoC-add-support-for-Conexant-CX2072X-CODEC
index 4908964b43..84a7521746 100644
--- a/patches.drivers/ASoC-add-support-for-Conexant-CX2072X-CODEC
+++ b/patches.drivers/ASoC-add-support-for-Conexant-CX2072X-CODEC
@@ -1,9 +1,11 @@
-From e376dc97ffd2dbbc40174b08818cec8d6b6b30aa Mon Sep 17 00:00:00 2001
+From a497a4363706b3eb208c64e66e5b485bb3b186ac Mon Sep 17 00:00:00 2001
From: Simon Ho <simon.ho@conexant.com>
-Date: Wed, 5 Apr 2017 17:07:14 +0800
-Subject: [PATCH] ASoC: add support for Conexant CX2072X CODEC
+Date: Tue, 21 May 2019 08:26:52 +0200
+Subject: [PATCH] ASoC: Add support for Conexant CX2072X CODEC
+Git-commit: a497a4363706b3eb208c64e66e5b485bb3b186ac
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
+Patch-mainline: Queued in subsystem maintainer repository
References: bsc#1068546
-Patch-mainline: Submitted, alsa-devel ML
Initial commit of the Conexant CX2072X CODEC driver. Some features are
not present.
@@ -29,21 +31,55 @@ Featues of CX2072X codec:
-TDM stream supports up to 4 channels.
* AEC loopback support.
-[Fixed by tiwai:
- * missing declarations of jack detection helpers
- * missing DAPM entry definitions
- * missing power hooks
- * Workaround for the jack detection during cache-only]
+Further fixes by tiwai:
+ * Rebase to 5.2+
+ * Missing DAPM entry definitions
+ * Missing power hooks
+ * Fix uninitialized variable warning
+ * Rewrite jack detection stuff to use set_jack callback
+ * Plumbing jack detection code for Intel ASoC
+ * Move clk management into runtime PM
+ * Drop incorrect regcache usages
+ * Drop untested stuff: OF table, EQ/DRC handling
+ * Lots of code cleanups and minor refactoring
+The OF code was dropped due to the lack of testability.
+It should be easy to re-add once if someone can test it.
+
+v1->v2: No change
+v2->v3: Move register tables to appropriate place
+ Remove some confusing codes
+ Set snd_ctl_boolean_* helpers directly
+ Fix EQ put callback
+ Rename to "DAC1 Switch" from "DAC1 Mute Switch"
+ Drop superfluous regmap calls at shutdown
+ Avoid regmap_register_patch()
+ Add missing register definitions
+ Fix register access on big-endian machine
+ Remove regcache messes
+v3->v4: Fix the wrong endianess conversion in reg write
+ Minor code cleanups
+v4->v5: Move clk management to runtime PM
+ Sparse warning fixes
+ Some more code simplification
+ Drop tricky regcache fiddling
+ Apply mutex locks around possible racy sequences
+ Move exported jack detection stuff into set_jack callback
+v5->v6: Drop buggy&untested EQ and DRC codes
+ Lots of code reduction/cleanup
+ Add more comments about platform-specific stuff
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=115531
Signed-off-by: Simon Ho <simon.ho@conexant.com>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Mark Brown <broonie@kernel.org>
---
sound/soc/codecs/Kconfig | 5
sound/soc/codecs/Makefile | 2
- sound/soc/codecs/cx2072x.c | 2266 +++++++++++++++++++++++++++++++++++++++++++++
+ sound/soc/codecs/cx2072x.c | 2254 +++++++++++++++++++++++++++++++++++++++++++++
sound/soc/codecs/cx2072x.h | 320 ++++++
- 4 files changed, 2593 insertions(+)
+ 4 files changed, 2581 insertions(+)
create mode 100644 sound/soc/codecs/cx2072x.c
create mode 100644 sound/soc/codecs/cx2072x.h
@@ -88,7 +124,7 @@ Signed-off-by: Takashi Iwai <tiwai@suse.de>
obj-$(CONFIG_SND_SOC_DA7218) += snd-soc-da7218.o
--- /dev/null
+++ b/sound/soc/codecs/cx2072x.c
-@@ -0,0 +1,2266 @@
+@@ -0,0 +1,2254 @@
+/*
+ * ALSA SoC CX20721/CX20723 codec driver
+ *
@@ -108,13 +144,10 @@ Signed-off-by: Takashi Iwai <tiwai@suse.de>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
-+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
-+#include <linux/firmware.h>
+#include <linux/regmap.h>
-+#include <linux/proc_fs.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
@@ -2323,14 +2356,6 @@ Signed-off-by: Takashi Iwai <tiwai@suse.de>
+};
+MODULE_DEVICE_TABLE(i2c, cx2072x_i2c_id);
+
-+static const struct of_device_id cx2072x_of_match[] = {
-+ { .compatible = "cnxt,cx20721", },
-+ { .compatible = "cnxt,cx20723", },
-+ { .compatible = "cnxt,cx7601", },
-+ {}
-+};
-+MODULE_DEVICE_TABLE(of, cx2072x_of_match);
-+
+#ifdef CONFIG_ACPI
+static struct acpi_device_id cx2072x_acpi_match[] = {
+ { "14F10720", 0 },
@@ -2345,7 +2370,6 @@ Signed-off-by: Takashi Iwai <tiwai@suse.de>
+ .id_table = cx2072x_i2c_id,
+ .driver = {
+ .name = "cx2072x",
-+ .of_match_table = cx2072x_of_match,
+ .acpi_match_table = ACPI_PTR(cx2072x_acpi_match),
+ },
+};
diff --git a/patches.drivers/ASoC-cx2072x-Add-DT-bingings-documentation-for-CX207 b/patches.drivers/ASoC-cx2072x-Add-DT-bingings-documentation-for-CX207
deleted file mode 100644
index ca10c65ae4..0000000000
--- a/patches.drivers/ASoC-cx2072x-Add-DT-bingings-documentation-for-CX207
+++ /dev/null
@@ -1,57 +0,0 @@
-From a1cf13479084b988fa9ef536b8256d9908fc6e30 Mon Sep 17 00:00:00 2001
-From: Simon Ho <simon.ho@conexant.com>
-Date: Wed, 5 Apr 2017 17:07:13 +0800
-Subject: [PATCH] ASoC: cx2072x: Add DT bingings documentation for CX2072X
- CODEC
-References: bsc#1068546
-Patch-mainline: Submitted, alsa-devel ML
-
-Initial version of CX2072X device tree bindings document.
-
-Signed-off-by: Simon Ho <simon.ho@conexant.com>
-Signed-off-by: Takashi Iwai <tiwai@suse.de>
-
----
- Documentation/devicetree/bindings/sound/cx2072x.txt | 36 ++++++++++++++++++++
- 1 file changed, 36 insertions(+)
- create mode 100644 Documentation/devicetree/bindings/sound/cx2072x.txt
-
---- /dev/null
-+++ b/Documentation/devicetree/bindings/sound/cx2072x.txt
-@@ -0,0 +1,36 @@
-+Conexant CX20721/CX20723/CX7601 audio CODEC
-+
-+The devices support I2C only.
-+
-+Required properties:
-+
-+ - compatible : One of "cnxt,cx20721", "cnxt,cx20723", "cnxt,cx7601".
-+
-+ - reg : the I2C address of the device for I2C, it should be <0x33>
-+
-+Optional properties:
-+
-+ - clocks : phandle and clock specifier for codec MCLK.
-+ - clock-names : Clock name string for 'clocks' attribute, should be "mclk".
-+
-+CODEC output pins:
-+ "PORTA" - Headphone
-+ "PORTG" - Class-D output
-+ "PORTE" - Line out
-+
-+CODEC output pins for Conexant DSP chip:
-+ "AEC REF" - AEC reference signal
-+
-+CODEC input pins:
-+ "PORTB" - Analog mic
-+ "PORTC" - Digital mic
-+ "PORTD" - Headset mic
-+
-+Example:
-+
-+codec: cx20721@33 {
-+ compatible = "cnxt,cx20721";
-+ reg = <0x33>;
-+ clocks = <&sco>;
-+ clock-names = "mclk";
-+};
diff --git a/patches.drivers/ASoC-intel-Add-headset-jack-support-to-cht-cx2072x b/patches.drivers/ASoC-intel-Add-headset-jack-support-to-cht-cx2072x
deleted file mode 100644
index a35989a676..0000000000
--- a/patches.drivers/ASoC-intel-Add-headset-jack-support-to-cht-cx2072x
+++ /dev/null
@@ -1,111 +0,0 @@
-From 8f5df7e07f68efe5ee5da4b95f6596138e3ff736 Mon Sep 17 00:00:00 2001
-From: Takashi Iwai <tiwai@suse.de>
-Date: Tue, 11 Apr 2017 15:51:02 +0200
-Subject: [PATCH] ASoC: intel: Add headset jack support to cht-cx2072x
-References: bsc#1068546
-Patch-mainline: Submitted, alsa-devel ML
-
-This patch adds plumbing up the jack detection via the standard gpio.
-
-Signed-off-by: Takashi Iwai <tiwai@suse.de>
-
----
- sound/soc/intel/boards/cht_cx2072x.c | 64 +++++++++++++++++++++++++++++++++++
- 1 file changed, 64 insertions(+)
-
---- a/sound/soc/intel/boards/cht_cx2072x.c
-+++ b/sound/soc/intel/boards/cht_cx2072x.c
-@@ -70,10 +70,49 @@ static int cht_aif1_hw_params(struct snd
- return 0;
- }
-
-+static struct snd_soc_jack cht_cx_headset;
-+
-+/* Headset jack detection DAPM pins */
-+static struct snd_soc_jack_pin cht_cx_headset_pins[] = {
-+ {
-+ .pin = "Headset Mic",
-+ .mask = SND_JACK_MICROPHONE,
-+ },
-+ {
-+ .pin = "Headphone",
-+ .mask = SND_JACK_HEADPHONE,
-+ },
-+};
-+
-+static const struct acpi_gpio_params headset_gpios = { 0, 0, false };
-+
-+static const struct acpi_gpio_mapping acpi_cht_cx2072x_gpios[] = {
-+ { "headset-gpios", &headset_gpios, 1 },
-+ {},
-+};
-+
-+static int cht_cx_jack_status_check(void *data)
-+{
-+ return cx2072x_get_jack_state(data);
-+}
-+
-+static struct snd_soc_jack_gpio cht_cx_gpio = {
-+ .name = "headset",
-+ .report = SND_JACK_HEADSET | SND_JACK_BTN_0,
-+ .debounce_time = 150,
-+ .wake = true,
-+ .jack_status_check = cht_cx_jack_status_check,
-+};
-+
- static int cht_codec_init(struct snd_soc_pcm_runtime *rtd)
- {
- int ret;
- struct snd_soc_card *card = rtd->card;
-+ struct snd_soc_codec *codec = rtd->codec;
-+
-+ if (devm_acpi_dev_add_driver_gpios(codec->dev,
-+ acpi_cht_cx2072x_gpios))
-+ dev_warn(rtd->dev, "Unable to add GPIO mapping table\n");
-
- card->dapm.idle_bias_off = true;
-
-@@ -85,6 +124,24 @@ static int cht_codec_init(struct snd_soc
- return ret;
- }
-
-+ ret = snd_soc_card_jack_new(card, "Headset",
-+ SND_JACK_HEADSET | SND_JACK_BTN_0,
-+ &cht_cx_headset,
-+ cht_cx_headset_pins,
-+ ARRAY_SIZE(cht_cx_headset_pins));
-+ if (ret)
-+ return ret;
-+
-+ cht_cx_gpio.gpiod_dev = codec->dev;
-+ cht_cx_gpio.data = codec;
-+ ret = snd_soc_jack_add_gpios(&cht_cx_headset, 1, &cht_cx_gpio);
-+ if (ret) {
-+ dev_err(rtd->dev, "Adding jack GPIO failed\n");
-+ return ret;
-+ }
-+
-+ cx2072x_enable_detect(codec);
-+
- return ret;
- }
-
-@@ -259,11 +316,18 @@ static int snd_cht_mc_probe(struct platf
- return devm_snd_soc_register_card(&pdev->dev, &chtcx2072x_card);
- }
-
-+static int snd_cht_mc_remove(struct platform_device *pdev)
-+{
-+ snd_soc_jack_free_gpios(&cht_cx_headset, 1, &cht_cx_gpio);
-+ return 0;
-+}
-+
- static struct platform_driver snd_cht_mc_driver = {
- .driver = {
- .name = "cht-cx2072x",
- },
- .probe = snd_cht_mc_probe,
-+ .remove = snd_cht_mc_remove,
- };
- module_platform_driver(snd_cht_mc_driver);
-
diff --git a/patches.drivers/IB-hfi1-Add-debugfs-to-control-expansion-ROM-write-p.patch b/patches.drivers/IB-hfi1-Add-debugfs-to-control-expansion-ROM-write-p.patch
new file mode 100644
index 0000000000..a922436067
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Add-debugfs-to-control-expansion-ROM-write-p.patch
@@ -0,0 +1,118 @@
+From: Josh Collier <josh.d.collier@intel.com>
+Date: Thu, 11 Apr 2019 07:07:42 -0700
+Subject: IB/hfi1: Add debugfs to control expansion ROM write protect
+Patch-mainline: v5.2-rc1
+Git-commit: 07c5ba912401b2ae3f13e3ce214158aec723c3fd
+References: jsc#SLE-4925
+
+Some kernels now enable CONFIG_IO_STRICT_DEVMEM which prevents multiple
+handles to PCI resource0. In order to continue to support expansion ROM
+updates while the driver is loaded, the driver must now provide an
+interface to control the expansion ROM write protection.
+
+This patch adds an exprom_wp debugfs interface that allows the hfi1_eprom
+user tool to disable the expansion ROM write protection by opening the
+file and writing a '1'. The write protection is released when writing a
+'0' or automatically re-enabled when the file handle is closed. The
+current implementation will only allow one handle to be opened at a time
+across all hfi1 devices.
+
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Josh Collier <josh.d.collier@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/debugfs.c | 74 +++++++++++++++++++++++++++++++++++
+ 1 file changed, 74 insertions(+)
+
+--- a/drivers/infiniband/hw/hfi1/debugfs.c
++++ b/drivers/infiniband/hw/hfi1/debugfs.c
+@@ -1080,6 +1080,77 @@ static int qsfp2_debugfs_release(struct
+ return __qsfp_debugfs_release(in, fp, 1);
+ }
+
++#define EXPROM_WRITE_ENABLE BIT_ULL(14)
++
++static bool exprom_wp_disabled;
++
++static int exprom_wp_set(struct hfi1_devdata *dd, bool disable)
++{
++ u64 gpio_val = 0;
++
++ if (disable) {
++ gpio_val = EXPROM_WRITE_ENABLE;
++ exprom_wp_disabled = true;
++ dd_dev_info(dd, "Disable Expansion ROM Write Protection\n");
++ } else {
++ exprom_wp_disabled = false;
++ dd_dev_info(dd, "Enable Expansion ROM Write Protection\n");
++ }
++
++ write_csr(dd, ASIC_GPIO_OUT, gpio_val);
++ write_csr(dd, ASIC_GPIO_OE, gpio_val);
++
++ return 0;
++}
++
++static ssize_t exprom_wp_debugfs_read(struct file *file, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ return 0;
++}
++
++static ssize_t exprom_wp_debugfs_write(struct file *file,
++ const char __user *buf, size_t count,
++ loff_t *ppos)
++{
++ struct hfi1_pportdata *ppd = private2ppd(file);
++ char cdata;
++
++ if (count != 1)
++ return -EINVAL;
++ if (get_user(cdata, buf))
++ return -EFAULT;
++ if (cdata == '0')
++ exprom_wp_set(ppd->dd, false);
++ else if (cdata == '1')
++ exprom_wp_set(ppd->dd, true);
++ else
++ return -EINVAL;
++
++ return 1;
++}
++
++static unsigned long exprom_in_use;
++
++static int exprom_wp_debugfs_open(struct inode *in, struct file *fp)
++{
++ if (test_and_set_bit(0, &exprom_in_use))
++ return -EBUSY;
++
++ return 0;
++}
++
++static int exprom_wp_debugfs_release(struct inode *in, struct file *fp)
++{
++ struct hfi1_pportdata *ppd = private2ppd(fp);
++
++ if (exprom_wp_disabled)
++ exprom_wp_set(ppd->dd, false);
++ clear_bit(0, &exprom_in_use);
++
++ return 0;
++}
++
+ #define DEBUGFS_OPS(nm, readroutine, writeroutine) \
+ { \
+ .name = nm, \
+@@ -1119,6 +1190,9 @@ static const struct counter_info port_cn
+ qsfp1_debugfs_open, qsfp1_debugfs_release),
+ DEBUGFS_XOPS("qsfp2", qsfp2_debugfs_read, qsfp2_debugfs_write,
+ qsfp2_debugfs_open, qsfp2_debugfs_release),
++ DEBUGFS_XOPS("exprom_wp", exprom_wp_debugfs_read,
++ exprom_wp_debugfs_write, exprom_wp_debugfs_open,
++ exprom_wp_debugfs_release),
+ DEBUGFS_OPS("asic_flags", asic_flags_read, asic_flags_write),
+ DEBUGFS_OPS("dc8051_memory", dc8051_memory_read, NULL),
+ DEBUGFS_OPS("lcb", debugfs_lcb_read, debugfs_lcb_write),
diff --git a/patches.drivers/IB-hfi1-Add-selected-Rcv-counters.patch b/patches.drivers/IB-hfi1-Add-selected-Rcv-counters.patch
new file mode 100644
index 0000000000..11a83a8b86
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Add-selected-Rcv-counters.patch
@@ -0,0 +1,56 @@
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Thu, 11 Apr 2019 07:17:10 -0700
+Subject: IB/hfi1: Add selected Rcv counters
+Patch-mainline: v5.2-rc1
+Git-commit: a9c62e007878ba88b703369c1cd9e26682453665
+References: jsc#SLE-4925
+
+These counters are required for error analysis and debug.
+
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/chip.c | 3 +++
+ drivers/infiniband/hw/hfi1/chip.h | 3 +++
+ drivers/infiniband/hw/hfi1/chip_registers.h | 3 +++
+ 3 files changed, 9 insertions(+)
+
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -4104,6 +4104,9 @@ def_access_ibp_counter(seq_naks);
+
+ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
+ [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
++[C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
++[C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
++[C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
+ [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
+ CNTR_NORMAL),
+ [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
+--- a/drivers/infiniband/hw/hfi1/chip.h
++++ b/drivers/infiniband/hw/hfi1/chip.h
+@@ -858,6 +858,9 @@ static inline int idx_from_vl(int vl)
+ /* Per device counter indexes */
+ enum {
+ C_RCV_OVF = 0,
++ C_RX_LEN_ERR,
++ C_RX_ICRC_ERR,
++ C_RX_EBP,
+ C_RX_TID_FULL,
+ C_RX_TID_INVALID,
+ C_RX_TID_FLGMS,
+--- a/drivers/infiniband/hw/hfi1/chip_registers.h
++++ b/drivers/infiniband/hw/hfi1/chip_registers.h
+@@ -380,6 +380,9 @@
+ #define DC_LCB_PRF_TX_FLIT_CNT (DC_LCB_CSRS + 0x000000000418)
+ #define DC_LCB_STS_LINK_TRANSFER_ACTIVE (DC_LCB_CSRS + 0x000000000468)
+ #define DC_LCB_STS_ROUND_TRIP_LTP_CNT (DC_LCB_CSRS + 0x0000000004B0)
++#define RCV_LENGTH_ERR_CNT 0
++#define RCV_ICRC_ERR_CNT 6
++#define RCV_EBP_CNT 9
+ #define RCV_BUF_OVFL_CNT 10
+ #define RCV_CONTEXT_EGR_STALL 22
+ #define RCV_DATA_PKT_CNT 0
diff --git a/patches.drivers/IB-hfi1-Close-VNIC-sdma_progress-sleep-window.patch b/patches.drivers/IB-hfi1-Close-VNIC-sdma_progress-sleep-window.patch
new file mode 100644
index 0000000000..738a24107a
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Close-VNIC-sdma_progress-sleep-window.patch
@@ -0,0 +1,83 @@
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Wed, 28 Nov 2018 10:32:48 -0800
+Subject: IB/hfi1: Close VNIC sdma_progress sleep window
+Patch-mainline: v5.0-rc1
+Git-commit: 18912c4524385dd6532c682cb9d4f6aa39ba8d47
+References: jsc#SLE-4925
+
+The call to sdma_progress() is called outside the wait lock.
+
+In this case, there is a race condition where sdma_progress() can return
+false and the sdma_engine can idle. If that happens, there will be no
+more sdma interrupts to cause the wakeup and the vnic_sdma xmit will hang.
+
+Fix by moving the lock to enclose the sdma_progress() call.
+
+Also, delete the tx_retry. The need for this was removed by:
+commit bcad29137a97 ("IB/hfi1: Serve the most starved iowait entry first")
+
+Fixes: 64551ede6cd1 ("IB/hfi1: VNIC SDMA support")
+Reviewed-by: Gary Leshner <Gary.S.Leshner@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/vnic_sdma.c | 15 +++++----------
+ 1 file changed, 5 insertions(+), 10 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/vnic_sdma.c
++++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c
+@@ -57,7 +57,6 @@
+
+ #define HFI1_VNIC_TXREQ_NAME_LEN 32
+ #define HFI1_VNIC_SDMA_DESC_WTRMRK 64
+-#define HFI1_VNIC_SDMA_RETRY_COUNT 1
+
+ /*
+ * struct vnic_txreq - VNIC transmit descriptor
+@@ -67,7 +66,6 @@
+ * @pad: pad buffer
+ * @plen: pad length
+ * @pbc_val: pbc value
+- * @retry_count: tx retry count
+ */
+ struct vnic_txreq {
+ struct sdma_txreq txreq;
+@@ -77,8 +75,6 @@ struct vnic_txreq {
+ unsigned char pad[HFI1_VNIC_MAX_PAD];
+ u16 plen;
+ __le64 pbc_val;
+-
+- u32 retry_count;
+ };
+
+ static void vnic_sdma_complete(struct sdma_txreq *txreq,
+@@ -196,7 +192,6 @@ int hfi1_vnic_send_dma(struct hfi1_devda
+ ret = build_vnic_tx_desc(sde, tx, pbc);
+ if (unlikely(ret))
+ goto free_desc;
+- tx->retry_count = 0;
+
+ ret = sdma_send_txreq(sde, iowait_get_ib_work(&vnic_sdma->wait),
+ &tx->txreq, vnic_sdma->pkts_sent);
+@@ -238,14 +233,14 @@ static int hfi1_vnic_sdma_sleep(struct s
+ struct hfi1_vnic_sdma *vnic_sdma =
+ container_of(wait->iow, struct hfi1_vnic_sdma, wait);
+ struct hfi1_ibdev *dev = &vnic_sdma->dd->verbs_dev;
+- struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
+
+- if (sdma_progress(sde, seq, txreq))
+- if (tx->retry_count++ < HFI1_VNIC_SDMA_RETRY_COUNT)
+- return -EAGAIN;
++ write_seqlock(&dev->iowait_lock);
++ if (sdma_progress(sde, seq, txreq)) {
++ write_sequnlock(&dev->iowait_lock);
++ return -EAGAIN;
++ }
+
+ vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
+- write_seqlock(&dev->iowait_lock);
+ if (list_empty(&vnic_sdma->wait.list))
+ iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
+ write_sequnlock(&dev->iowait_lock);
diff --git a/patches.drivers/IB-hfi1-Consider-LMC-in-16B-bypass-ingress-packet-ch.patch b/patches.drivers/IB-hfi1-Consider-LMC-in-16B-bypass-ingress-packet-ch.patch
new file mode 100644
index 0000000000..3c688e3372
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Consider-LMC-in-16B-bypass-ingress-packet-ch.patch
@@ -0,0 +1,31 @@
+From: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Date: Wed, 28 Nov 2018 10:19:47 -0800
+Subject: IB/hfi1: Consider LMC in 16B/bypass ingress packet check
+Patch-mainline: v5.0-rc1
+Git-commit: ff8b67fccdb65402df78a1695c38be805252cf8e
+References: jsc#SLE-4925
+
+Ingress packet check for 16B/bypass packets should consider the port
+LMC. Not doing this will result in packets sent to the LMC LIDs getting
+dropped. The check is implemented in HW for 9B packets.
+
+Reviewed-by: Mike Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/driver.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hfi1/driver.c
++++ b/drivers/infiniband/hw/hfi1/driver.c
+@@ -1427,7 +1427,7 @@ static int hfi1_bypass_ingress_pkt_check
+ if ((!(hfi1_is_16B_mcast(packet->dlid))) &&
+ (packet->dlid !=
+ opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))) {
+- if (packet->dlid != ppd->lid)
++ if ((packet->dlid & ~((1 << ppd->lmc) - 1)) != ppd->lid)
+ return -EINVAL;
+ }
+
diff --git a/patches.drivers/IB-hfi1-Correctly-process-FECN-and-BECN-in-packets.patch b/patches.drivers/IB-hfi1-Correctly-process-FECN-and-BECN-in-packets.patch
new file mode 100644
index 0000000000..b79b8a91bf
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Correctly-process-FECN-and-BECN-in-packets.patch
@@ -0,0 +1,459 @@
+From: Mitko Haralanov <mitko.haralanov@intel.com>
+Date: Wed, 28 Nov 2018 10:19:15 -0800
+Subject: IB/hfi1: Correctly process FECN and BECN in packets
+Patch-mainline: v5.0-rc1
+Git-commit: fe4dd4239277486ca3a468e7bbeafd7ef3a5634e
+References: jsc#SLE-4925
+
+A CA is supposed to ignore FECN bits in multicast, ACK, and CNP
+packets. This patch corrects the behavior of the HFI1 driver in this
+regard by ignoring FECNs in those packet types.
+
+While fixing the above behavior, fix the extraction of the FECN and BECN
+bits from the packet headers for both 9B and 16B packets.
+
+Furthermore, this patch corrects the driver's response to a FECN in RDMA
+READ RESPONSE packets. Instead of sending an "empty" ACK, the driver now
+sends a CNP packet. While editing that code path, add the missing trace
+for CNP packets.
+
+Fixes: 88733e3b8450 ("IB/hfi1: Add 16B UD support")
+Fixes: f59fb9e05109 ("IB/hfi1: Fix handling of FECN marked multicast packet")
+Reviewed-by: Kaike Wan <kaike.wan@intel.com>
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Mitko Haralanov <mitko.haralanov@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/driver.c | 70 ++++++++++++++++++++++++------------
+ drivers/infiniband/hw/hfi1/hfi.h | 35 ++++++++++++------
+ drivers/infiniband/hw/hfi1/rc.c | 30 +++++----------
+ drivers/infiniband/hw/hfi1/uc.c | 2 -
+ drivers/infiniband/hw/hfi1/ud.c | 33 ++++++++++------
+ 5 files changed, 104 insertions(+), 66 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/driver.c
++++ b/drivers/infiniband/hw/hfi1/driver.c
+@@ -430,40 +430,60 @@ static const hfi1_handle_cnp hfi1_handle
+ [HFI1_PKT_TYPE_16B] = &return_cnp_16B
+ };
+
+-void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
+- bool do_cnp)
++/**
++ * hfi1_process_ecn_slowpath - Process FECN or BECN bits
++ * @qp: The packet's destination QP
++ * @pkt: The packet itself.
++ * @prescan: Is the caller the RXQ prescan
++ *
++ * Process the packet's FECN or BECN bits. By now, the packet
++ * has already been evaluated whether processing of those bit should
++ * be done.
++ * The significance of the @prescan argument is that if the caller
++ * is the RXQ prescan, a CNP will be send out instead of waiting for the
++ * normal packet processing to send an ACK with BECN set (or a CNP).
++ */
++bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
++ bool prescan)
+ {
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct ib_other_headers *ohdr = pkt->ohdr;
+ struct ib_grh *grh = pkt->grh;
+- u32 rqpn = 0, bth1;
++ u32 rqpn = 0;
+ u16 pkey;
+ u32 rlid, slid, dlid = 0;
+- u8 hdr_type, sc, svc_type;
+- bool is_mcast = false;
++ u8 hdr_type, sc, svc_type, opcode;
++ bool is_mcast = false, ignore_fecn = false, do_cnp = false,
++ fecn, becn;
+
+ /* can be called from prescan */
+ if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
+- is_mcast = hfi1_is_16B_mcast(dlid);
+ pkey = hfi1_16B_get_pkey(pkt->hdr);
+ sc = hfi1_16B_get_sc(pkt->hdr);
+ dlid = hfi1_16B_get_dlid(pkt->hdr);
+ slid = hfi1_16B_get_slid(pkt->hdr);
++ is_mcast = hfi1_is_16B_mcast(dlid);
++ opcode = ib_bth_get_opcode(ohdr);
+ hdr_type = HFI1_PKT_TYPE_16B;
++ fecn = hfi1_16B_get_fecn(pkt->hdr);
++ becn = hfi1_16B_get_becn(pkt->hdr);
+ } else {
+- is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
+- (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
+ pkey = ib_bth_get_pkey(ohdr);
+ sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf);
+- dlid = ib_get_dlid(pkt->hdr);
++ dlid = qp->ibqp.qp_type != IB_QPT_UD ? ib_get_dlid(pkt->hdr) :
++ ppd->lid;
+ slid = ib_get_slid(pkt->hdr);
++ is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
++ (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
++ opcode = ib_bth_get_opcode(ohdr);
+ hdr_type = HFI1_PKT_TYPE_9B;
++ fecn = ib_bth_get_fecn(ohdr);
++ becn = ib_bth_get_becn(ohdr);
+ }
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_UD:
+- dlid = ppd->lid;
+ rlid = slid;
+ rqpn = ib_get_sqpn(pkt->ohdr);
+ svc_type = IB_CC_SVCTYPE_UD;
+@@ -485,22 +505,31 @@ void hfi1_process_ecn_slowpath(struct rv
+ svc_type = IB_CC_SVCTYPE_RC;
+ break;
+ default:
+- return;
++ return false;
+ }
+
+- bth1 = be32_to_cpu(ohdr->bth[1]);
++ ignore_fecn = is_mcast || (opcode == IB_OPCODE_CNP) ||
++ (opcode == IB_OPCODE_RC_ACKNOWLEDGE);
++ /*
++ * ACKNOWLEDGE packets do not get a CNP but this will be
++ * guarded by ignore_fecn above.
++ */
++ do_cnp = prescan ||
++ (opcode >= IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST &&
++ opcode <= IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE);
++
+ /* Call appropriate CNP handler */
+- if (do_cnp && (bth1 & IB_FECN_SMASK))
++ if (!ignore_fecn && do_cnp && fecn)
+ hfi1_handle_cnp_tbl[hdr_type](ibp, qp, rqpn, pkey,
+ dlid, rlid, sc, grh);
+
+- if (!is_mcast && (bth1 & IB_BECN_SMASK)) {
+- u32 lqpn = bth1 & RVT_QPN_MASK;
++ if (becn) {
++ u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
+ u8 sl = ibp->sc_to_sl[sc];
+
+ process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
+ }
+-
++ return !ignore_fecn && fecn;
+ }
+
+ struct ps_mdata {
+@@ -599,7 +628,6 @@ static void __prescan_rxq(struct hfi1_pa
+ struct rvt_dev_info *rdi = &rcd->dd->verbs_dev.rdi;
+ u64 rhf = rhf_to_cpu(rhf_addr);
+ u32 etype = rhf_rcv_type(rhf), qpn, bth1;
+- int is_ecn = 0;
+ u8 lnh;
+
+ if (ps_done(&mdata, rhf, rcd))
+@@ -625,12 +653,10 @@ static void __prescan_rxq(struct hfi1_pa
+ goto next; /* just in case */
+ }
+
+- bth1 = be32_to_cpu(packet->ohdr->bth[1]);
+- is_ecn = !!(bth1 & (IB_FECN_SMASK | IB_BECN_SMASK));
+-
+- if (!is_ecn)
++ if (!hfi1_may_ecn(packet))
+ goto next;
+
++ bth1 = be32_to_cpu(packet->ohdr->bth[1]);
+ qpn = bth1 & RVT_QPN_MASK;
+ rcu_read_lock();
+ qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn);
+@@ -640,7 +666,7 @@ static void __prescan_rxq(struct hfi1_pa
+ goto next;
+ }
+
+- process_ecn(qp, packet, true);
++ hfi1_process_ecn_slowpath(qp, packet, true);
+ rcu_read_unlock();
+
+ /* turn off BECN, FECN */
+--- a/drivers/infiniband/hw/hfi1/hfi.h
++++ b/drivers/infiniband/hw/hfi1/hfi.h
+@@ -1804,13 +1804,20 @@ static inline struct hfi1_ibport *rcd_to
+ return &rcd->ppd->ibport_data;
+ }
+
+-void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
+- bool do_cnp);
+-static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt,
+- bool do_cnp)
++/**
++ * hfi1_may_ecn - Check whether FECN or BECN processing should be done
++ * @pkt: the packet to be evaluated
++ *
++ * Check whether the FECN or BECN bits in the packet's header are
++ * enabled, depending on packet type.
++ *
++ * This function only checks for FECN and BECN bits. Additional checks
++ * are done in the slowpath (hfi1_process_ecn_slowpath()) in order to
++ * ensure correct handling.
++ */
++static inline bool hfi1_may_ecn(struct hfi1_packet *pkt)
+ {
+- bool becn;
+- bool fecn;
++ bool fecn, becn;
+
+ if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
+ fecn = hfi1_16B_get_fecn(pkt->hdr);
+@@ -1819,10 +1826,18 @@ static inline bool process_ecn(struct rv
+ fecn = ib_bth_get_fecn(pkt->ohdr);
+ becn = ib_bth_get_becn(pkt->ohdr);
+ }
+- if (unlikely(fecn || becn)) {
+- hfi1_process_ecn_slowpath(qp, pkt, do_cnp);
+- return fecn;
+- }
++ return fecn || becn;
++}
++
++bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
++ bool prescan);
++static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt)
++{
++ bool do_work;
++
++ do_work = hfi1_may_ecn(pkt);
++ if (unlikely(do_work))
++ return hfi1_process_ecn_slowpath(qp, pkt, false);
+ return false;
+ }
+
+--- a/drivers/infiniband/hw/hfi1/rc.c
++++ b/drivers/infiniband/hw/hfi1/rc.c
+@@ -2049,8 +2049,7 @@ void hfi1_rc_rcv(struct hfi1_packet *pac
+ struct ib_reth *reth;
+ unsigned long flags;
+ int ret;
+- bool is_fecn = false;
+- bool copy_last = false;
++ bool copy_last = false, fecn;
+ u32 rkey;
+ u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
+
+@@ -2059,7 +2058,7 @@ void hfi1_rc_rcv(struct hfi1_packet *pac
+ if (hfi1_ruc_check_hdr(ibp, packet))
+ return;
+
+- is_fecn = process_ecn(qp, packet, false);
++ fecn = process_ecn(qp, packet);
+
+ /*
+ * Process responses (ACKs) before anything else. Note that the
+@@ -2070,8 +2069,6 @@ void hfi1_rc_rcv(struct hfi1_packet *pac
+ if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+ opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
+ rc_rcv_resp(packet);
+- if (is_fecn)
+- goto send_ack;
+ return;
+ }
+
+@@ -2347,11 +2344,11 @@ send_last:
+
+ /* Schedule the send engine. */
+ qp->s_flags |= RVT_S_RESP_PENDING;
++ if (fecn)
++ qp->s_flags |= RVT_S_ECN;
+ hfi1_schedule_send(qp);
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+- if (is_fecn)
+- goto send_ack;
+ return;
+ }
+
+@@ -2413,11 +2410,11 @@ send_last:
+
+ /* Schedule the send engine. */
+ qp->s_flags |= RVT_S_RESP_PENDING;
++ if (fecn)
++ qp->s_flags |= RVT_S_ECN;
+ hfi1_schedule_send(qp);
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+- if (is_fecn)
+- goto send_ack;
+ return;
+ }
+
+@@ -2430,16 +2427,9 @@ send_last:
+ qp->r_ack_psn = psn;
+ qp->r_nak_state = 0;
+ /* Send an ACK if requested or required. */
+- if (psn & IB_BTH_REQ_ACK) {
+- if (packet->numpkt == 0) {
+- rc_cancel_ack(qp);
+- goto send_ack;
+- }
+- if (qp->r_adefered >= HFI1_PSN_CREDIT) {
+- rc_cancel_ack(qp);
+- goto send_ack;
+- }
+- if (unlikely(is_fecn)) {
++ if (psn & IB_BTH_REQ_ACK || fecn) {
++ if (packet->numpkt == 0 || fecn ||
++ qp->r_adefered >= HFI1_PSN_CREDIT) {
+ rc_cancel_ack(qp);
+ goto send_ack;
+ }
+@@ -2480,7 +2470,7 @@ nack_acc:
+ qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+ send_ack:
+- hfi1_send_rc_ack(packet, is_fecn);
++ hfi1_send_rc_ack(packet, fecn);
+ }
+
+ void hfi1_rc_hdrerr(
+--- a/drivers/infiniband/hw/hfi1/uc.c
++++ b/drivers/infiniband/hw/hfi1/uc.c
+@@ -321,7 +321,7 @@ void hfi1_uc_rcv(struct hfi1_packet *pac
+ if (hfi1_ruc_check_hdr(ibp, packet))
+ return;
+
+- process_ecn(qp, packet, true);
++ process_ecn(qp, packet);
+
+ psn = ib_bth_get_psn(ohdr);
+ /* Compare the PSN verses the expected PSN. */
+--- a/drivers/infiniband/hw/hfi1/ud.c
++++ b/drivers/infiniband/hw/hfi1/ud.c
+@@ -51,6 +51,7 @@
+ #include "hfi.h"
+ #include "mad.h"
+ #include "verbs_txreq.h"
++#include "trace_ibhdrs.h"
+ #include "qp.h"
+
+ /* We support only two types - 9B and 16B for now */
+@@ -656,18 +657,19 @@ void return_cnp_16B(struct hfi1_ibport *
+ u32 bth0, plen, vl, hwords = 7;
+ u16 len;
+ u8 l4;
+- struct hfi1_16b_header hdr;
++ struct hfi1_opa_header hdr;
+ struct ib_other_headers *ohdr;
+ struct pio_buf *pbuf;
+ struct send_context *ctxt = qp_to_send_context(qp, sc5);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 nwords;
+
++ hdr.hdr_type = HFI1_PKT_TYPE_16B;
+ /* Populate length */
+ nwords = ((hfi1_get_16b_padding(hwords << 2, 0) +
+ SIZE_OF_LT) >> 2) + SIZE_OF_CRC;
+ if (old_grh) {
+- struct ib_grh *grh = &hdr.u.l.grh;
++ struct ib_grh *grh = &hdr.opah.u.l.grh;
+
+ grh->version_tclass_flow = old_grh->version_tclass_flow;
+ grh->paylen = cpu_to_be16(
+@@ -675,11 +677,11 @@ void return_cnp_16B(struct hfi1_ibport *
+ grh->hop_limit = 0xff;
+ grh->sgid = old_grh->dgid;
+ grh->dgid = old_grh->sgid;
+- ohdr = &hdr.u.l.oth;
++ ohdr = &hdr.opah.u.l.oth;
+ l4 = OPA_16B_L4_IB_GLOBAL;
+ hwords += sizeof(struct ib_grh) / sizeof(u32);
+ } else {
+- ohdr = &hdr.u.oth;
++ ohdr = &hdr.opah.u.oth;
+ l4 = OPA_16B_L4_IB_LOCAL;
+ }
+
+@@ -693,7 +695,7 @@ void return_cnp_16B(struct hfi1_ibport *
+
+ /* Convert dwords to flits */
+ len = (hwords + nwords) >> 1;
+- hfi1_make_16b_hdr(&hdr, slid, dlid, len, pkey, 1, 0, l4, sc5);
++ hfi1_make_16b_hdr(&hdr.opah, slid, dlid, len, pkey, 1, 0, l4, sc5);
+
+ plen = 2 /* PBC */ + hwords + nwords;
+ pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
+@@ -701,9 +703,11 @@ void return_cnp_16B(struct hfi1_ibport *
+ pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
+ if (ctxt) {
+ pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
+- if (pbuf)
++ if (pbuf) {
++ trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
+ ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
+ &hdr, hwords);
++ }
+ }
+ }
+
+@@ -715,14 +719,15 @@ void return_cnp(struct hfi1_ibport *ibp,
+ u32 bth0, plen, vl, hwords = 5;
+ u16 lrh0;
+ u8 sl = ibp->sc_to_sl[sc5];
+- struct ib_header hdr;
++ struct hfi1_opa_header hdr;
+ struct ib_other_headers *ohdr;
+ struct pio_buf *pbuf;
+ struct send_context *ctxt = qp_to_send_context(qp, sc5);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
++ hdr.hdr_type = HFI1_PKT_TYPE_9B;
+ if (old_grh) {
+- struct ib_grh *grh = &hdr.u.l.grh;
++ struct ib_grh *grh = &hdr.ibh.u.l.grh;
+
+ grh->version_tclass_flow = old_grh->version_tclass_flow;
+ grh->paylen = cpu_to_be16(
+@@ -730,11 +735,11 @@ void return_cnp(struct hfi1_ibport *ibp,
+ grh->hop_limit = 0xff;
+ grh->sgid = old_grh->dgid;
+ grh->dgid = old_grh->sgid;
+- ohdr = &hdr.u.l.oth;
++ ohdr = &hdr.ibh.u.l.oth;
+ lrh0 = HFI1_LRH_GRH;
+ hwords += sizeof(struct ib_grh) / sizeof(u32);
+ } else {
+- ohdr = &hdr.u.oth;
++ ohdr = &hdr.ibh.u.oth;
+ lrh0 = HFI1_LRH_BTH;
+ }
+
+@@ -746,16 +751,18 @@ void return_cnp(struct hfi1_ibport *ibp,
+ ohdr->bth[1] = cpu_to_be32(remote_qpn | (1 << IB_BECN_SHIFT));
+ ohdr->bth[2] = 0; /* PSN 0 */
+
+- hfi1_make_ib_hdr(&hdr, lrh0, hwords + SIZE_OF_CRC, dlid, slid);
++ hfi1_make_ib_hdr(&hdr.ibh, lrh0, hwords + SIZE_OF_CRC, dlid, slid);
+ plen = 2 /* PBC */ + hwords;
+ pbc_flags |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
+ vl = sc_to_vlt(ppd->dd, sc5);
+ pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
+ if (ctxt) {
+ pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
+- if (pbuf)
++ if (pbuf) {
++ trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
+ ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
+ &hdr, hwords);
++ }
+ }
+ }
+
+@@ -912,7 +919,7 @@ void hfi1_ud_rcv(struct hfi1_packet *pac
+ src_qp = hfi1_16B_get_src_qpn(packet->mgmt);
+ }
+
+- process_ecn(qp, packet, (opcode != IB_OPCODE_CNP));
++ process_ecn(qp, packet);
+ /*
+ * Get the number of bytes the message was padded by
+ * and drop incomplete packets.
diff --git a/patches.drivers/IB-hfi1-Dump-pio-info-for-non-user-send-contexts.patch b/patches.drivers/IB-hfi1-Dump-pio-info-for-non-user-send-contexts.patch
new file mode 100644
index 0000000000..2cdcc39d3e
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Dump-pio-info-for-non-user-send-contexts.patch
@@ -0,0 +1,142 @@
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Wed, 28 Nov 2018 10:14:32 -0800
+Subject: IB/hfi1: Dump pio info for non-user send contexts
+Patch-mainline: v5.0-rc1
+Git-commit: 937488a85986faa743d12456970a0cbe83e3b04e
+References: jsc#SLE-4925
+
+This patch dumps the pio info for non-user send contexts to assist
+debugging in the field.
+
+Reviewed-by: Mike Marciniczyn <mike.marciniszyn@intel.com>
+Reviewed-by: Mike Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/chip_registers.h | 4 ++
+ drivers/infiniband/hw/hfi1/debugfs.c | 49 ++++++++++++++++++++++++++++
+ drivers/infiniband/hw/hfi1/pio.c | 25 ++++++++++++++
+ drivers/infiniband/hw/hfi1/pio.h | 3 +
+ 4 files changed, 81 insertions(+)
+
+--- a/drivers/infiniband/hw/hfi1/chip_registers.h
++++ b/drivers/infiniband/hw/hfi1/chip_registers.h
+@@ -935,6 +935,10 @@
+ #define SEND_CTXT_CREDIT_CTRL_THRESHOLD_MASK 0x7FFull
+ #define SEND_CTXT_CREDIT_CTRL_THRESHOLD_SHIFT 0
+ #define SEND_CTXT_CREDIT_CTRL_THRESHOLD_SMASK 0x7FFull
++#define SEND_CTXT_CREDIT_STATUS (TXE + 0x000000100018)
++#define SEND_CTXT_CREDIT_STATUS_CURRENT_FREE_COUNTER_MASK 0x7FFull
++#define SEND_CTXT_CREDIT_STATUS_CURRENT_FREE_COUNTER_SHIFT 32
++#define SEND_CTXT_CREDIT_STATUS_LAST_RETURNED_COUNTER_SMASK 0x7FFull
+ #define SEND_CTXT_CREDIT_FORCE (TXE + 0x000000100028)
+ #define SEND_CTXT_CREDIT_FORCE_FORCE_RETURN_SMASK 0x1ull
+ #define SEND_CTXT_CREDIT_RETURN_ADDR (TXE + 0x000000100020)
+--- a/drivers/infiniband/hw/hfi1/debugfs.c
++++ b/drivers/infiniband/hw/hfi1/debugfs.c
+@@ -407,6 +407,54 @@ DEBUGFS_SEQ_FILE_OPS(rcds);
+ DEBUGFS_SEQ_FILE_OPEN(rcds)
+ DEBUGFS_FILE_OPS(rcds);
+
++static void *_pios_seq_start(struct seq_file *s, loff_t *pos)
++{
++ struct hfi1_ibdev *ibd;
++ struct hfi1_devdata *dd;
++
++ ibd = (struct hfi1_ibdev *)s->private;
++ dd = dd_from_dev(ibd);
++ if (!dd->send_contexts || *pos >= dd->num_send_contexts)
++ return NULL;
++ return pos;
++}
++
++static void *_pios_seq_next(struct seq_file *s, void *v, loff_t *pos)
++{
++ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
++ struct hfi1_devdata *dd = dd_from_dev(ibd);
++
++ ++*pos;
++ if (!dd->send_contexts || *pos >= dd->num_send_contexts)
++ return NULL;
++ return pos;
++}
++
++static void _pios_seq_stop(struct seq_file *s, void *v)
++{
++}
++
++static int _pios_seq_show(struct seq_file *s, void *v)
++{
++ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
++ struct hfi1_devdata *dd = dd_from_dev(ibd);
++ struct send_context_info *sci;
++ loff_t *spos = v;
++ loff_t i = *spos;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dd->sc_lock, flags);
++ sci = &dd->send_contexts[i];
++ if (sci && sci->type != SC_USER && sci->allocated && sci->sc)
++ seqfile_dump_sci(s, i, sci);
++ spin_unlock_irqrestore(&dd->sc_lock, flags);
++ return 0;
++}
++
++DEBUGFS_SEQ_FILE_OPS(pios);
++DEBUGFS_SEQ_FILE_OPEN(pios)
++DEBUGFS_FILE_OPS(pios);
++
+ /* read the per-device counters */
+ static ssize_t dev_counters_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+@@ -1143,6 +1191,7 @@ void hfi1_dbg_ibdev_init(struct hfi1_ibd
+ DEBUGFS_SEQ_FILE_CREATE(qp_stats, ibd->hfi1_ibdev_dbg, ibd);
+ DEBUGFS_SEQ_FILE_CREATE(sdes, ibd->hfi1_ibdev_dbg, ibd);
+ DEBUGFS_SEQ_FILE_CREATE(rcds, ibd->hfi1_ibdev_dbg, ibd);
++ DEBUGFS_SEQ_FILE_CREATE(pios, ibd->hfi1_ibdev_dbg, ibd);
+ DEBUGFS_SEQ_FILE_CREATE(sdma_cpu_list, ibd->hfi1_ibdev_dbg, ibd);
+ /* dev counter files */
+ for (i = 0; i < ARRAY_SIZE(cntr_ops); i++)
+--- a/drivers/infiniband/hw/hfi1/pio.c
++++ b/drivers/infiniband/hw/hfi1/pio.c
+@@ -2137,3 +2137,28 @@ void free_credit_return(struct hfi1_devd
+ kfree(dd->cr_base);
+ dd->cr_base = NULL;
+ }
++
++void seqfile_dump_sci(struct seq_file *s, u32 i,
++ struct send_context_info *sci)
++{
++ struct send_context *sc = sci->sc;
++ u64 reg;
++
++ seq_printf(s, "SCI %u: type %u base %u credits %u\n",
++ i, sci->type, sci->base, sci->credits);
++ seq_printf(s, " flags 0x%x sw_inx %u hw_ctxt %u grp %u\n",
++ sc->flags, sc->sw_index, sc->hw_context, sc->group);
++ seq_printf(s, " sr_size %u credits %u sr_head %u sr_tail %u\n",
++ sc->sr_size, sc->credits, sc->sr_head, sc->sr_tail);
++ seq_printf(s, " fill %lu free %lu fill_wrap %u alloc_free %lu\n",
++ sc->fill, sc->free, sc->fill_wrap, sc->alloc_free);
++ seq_printf(s, " credit_intr_count %u credit_ctrl 0x%llx\n",
++ sc->credit_intr_count, sc->credit_ctrl);
++ reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_STATUS));
++ seq_printf(s, " *hw_free %llu CurrentFree %llu LastReturned %llu\n",
++ (le64_to_cpu(*sc->hw_free) & CR_COUNTER_SMASK) >>
++ CR_COUNTER_SHIFT,
++ (reg >> SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_SHIFT)) &
++ SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_MASK),
++ reg & SC(CREDIT_STATUS_LAST_RETURNED_COUNTER_SMASK));
++}
+--- a/drivers/infiniband/hw/hfi1/pio.h
++++ b/drivers/infiniband/hw/hfi1/pio.h
+@@ -329,4 +329,7 @@ void seg_pio_copy_start(struct pio_buf *
+ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes);
+ void seg_pio_copy_end(struct pio_buf *pbuf);
+
++void seqfile_dump_sci(struct seq_file *s, u32 i,
++ struct send_context_info *sci);
++
+ #endif /* _PIO_H */
diff --git a/patches.drivers/IB-hfi1-Eliminate-opcode-tests-on-mr-deref.patch b/patches.drivers/IB-hfi1-Eliminate-opcode-tests-on-mr-deref.patch
new file mode 100644
index 0000000000..441d3b9f70
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Eliminate-opcode-tests-on-mr-deref.patch
@@ -0,0 +1,47 @@
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Mon, 18 Mar 2019 09:55:39 -0700
+Subject: IB/hfi1: Eliminate opcode tests on mr deref
+Patch-mainline: v5.1-rc5
+Git-commit: a8639a79e85c18c16c10089edd589c7948f19bbd
+References: jsc#SLE-4925
+
+When an old ack_queue entry is used to store an incoming request, it may
+need to clean up the old entry if it is still referencing the
+MR. Originally only RDMA READ request needed to reference MR on the
+responder side and therefore the opcode was tested when cleaning up the
+old entry. The introduction of tid rdma specific operations in the
+ack_queue makes the specific opcode tests wrong. Multiple opcodes (RDMA
+READ, TID RDMA READ, and TID RDMA WRITE) may need MR ref cleanup.
+
+Remove the opcode specific tests associated with the ack_queue.
+
+Fixes: f48ad614c100 ("IB/hfi1: Move driver out of staging")
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/rc.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/rc.c
++++ b/drivers/infiniband/hw/hfi1/rc.c
+@@ -2302,7 +2302,7 @@ send_last:
+ update_ack_queue(qp, next);
+ }
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
++ if (e->rdma_sge.mr) {
+ rvt_put_mr(e->rdma_sge.mr);
+ e->rdma_sge.mr = NULL;
+ }
+@@ -2376,7 +2376,7 @@ send_last:
+ update_ack_queue(qp, next);
+ }
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
++ if (e->rdma_sge.mr) {
+ rvt_put_mr(e->rdma_sge.mr);
+ e->rdma_sge.mr = NULL;
+ }
diff --git a/patches.drivers/IB-hfi1-Failed-to-drain-send-queue-when-QP-is-put-in.patch b/patches.drivers/IB-hfi1-Failed-to-drain-send-queue-when-QP-is-put-in.patch
new file mode 100644
index 0000000000..16c34a14e2
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Failed-to-drain-send-queue-when-QP-is-put-in.patch
@@ -0,0 +1,58 @@
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Mon, 18 Mar 2019 09:55:19 -0700
+Subject: IB/hfi1: Failed to drain send queue when QP is put into error state
+Patch-mainline: v5.1-rc5
+Git-commit: 662d66466637862ef955f7f6e78a286d8cf0ebef
+References: jsc#SLE-4925
+
+When a QP is put into error state, all pending requests in the send work
+queue should be drained. The following sequence of events could lead to a
+failure, causing a request to hang:
+
+(1) The QP builds a packet and tries to send through SDMA engine.
+ However, PIO engine is still busy. Consequently, this packet is put on
+ the QP's tx list and the QP is put on the PIO waiting list. The field
+ qp->s_flags is set with HFI1_S_WAIT_PIO_DRAIN;
+
+(2) The QP is put into error state by the user application and
+ notify_error_qp() is called, which removes the QP from the PIO waiting
+ list and the packet from the QP's tx list. In addition, qp->s_flags is
+ cleared of RVT_S_ANY_WAIT_IO bits, which does not include
+ HFI1_S_WAIT_PIO_DRAIN bit;
+
+(3) The hfi1_schdule_send() function is called to drain the QP's send
+ queue. Subsequently, hfi1_do_send() is called. Since the flag bit
+ HFI1_S_WAIT_PIO_DRAIN is set in qp->s_flags, hfi1_send_ok() fails. As
+ a result, hfi1_do_send() bails out without draining any request from
+ the send queue;
+
+(4) The PIO engine completes the sending and tries to wake up any QP on
+ its waiting list. But the QP has been removed from the PIO waiting
+ list and therefore is kept in sleep forever.
+
+The fix is to clear qp->s_flags of HFI1_S_ANY_WAIT_IO bits in step (2).
+HFI1_S_ANY_WAIT_IO includes RVT_S_ANY_WAIT_IO and HFI1_S_WAIT_PIO_DRAIN.
+
+Fixes: 2e2ba09e48b7 ("IB/rdmavt, IB/hfi1: Create device dependent s_flags")
+Cc: <stable@vger.kernel.org> # 4.19.x+
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Alex Estrin <alex.estrin@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/qp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hfi1/qp.c
++++ b/drivers/infiniband/hw/hfi1/qp.c
+@@ -833,7 +833,7 @@ void notify_error_qp(struct rvt_qp *qp)
+ write_seqlock(lock);
+ if (!list_empty(&priv->s_iowait.list) &&
+ !(qp->s_flags & RVT_S_BUSY)) {
+- qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
++ qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
+ list_del_init(&priv->s_iowait.list);
+ priv->s_iowait.lock = NULL;
+ rvt_put_qp(qp);
diff --git a/patches.drivers/IB-hfi1-Fix-WQ_MEM_RECLAIM-warning.patch b/patches.drivers/IB-hfi1-Fix-WQ_MEM_RECLAIM-warning.patch
new file mode 100644
index 0000000000..b99e63edd1
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Fix-WQ_MEM_RECLAIM-warning.patch
@@ -0,0 +1,57 @@
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Mon, 18 Mar 2019 09:55:09 -0700
+Subject: IB/hfi1: Fix WQ_MEM_RECLAIM warning
+Patch-mainline: v5.2-rc1
+Git-commit: 4c4b1996b5db688e2dcb8242b0a3bf7b1e845e42
+References: jsc#SLE-4925
+
+The work_item cancels that occur when a QP is destroyed can elicit the
+following trace:
+
+ workqueue: WQ_MEM_RECLAIM ipoib_wq:ipoib_cm_tx_reap [ib_ipoib] is flushing !WQ_MEM_RECLAIM hfi0_0:_hfi1_do_send [hfi1]
+ WARNING: CPU: 7 PID: 1403 at kernel/workqueue.c:2486 check_flush_dependency+0xb1/0x100
+ Call Trace:
+ __flush_work.isra.29+0x8c/0x1a0
+ ? __switch_to_asm+0x40/0x70
+ __cancel_work_timer+0x103/0x190
+ ? schedule+0x32/0x80
+ iowait_cancel_work+0x15/0x30 [hfi1]
+ rvt_reset_qp+0x1f8/0x3e0 [rdmavt]
+ rvt_destroy_qp+0x65/0x1f0 [rdmavt]
+ ? _cond_resched+0x15/0x30
+ ib_destroy_qp+0xe9/0x230 [ib_core]
+ ipoib_cm_tx_reap+0x21c/0x560 [ib_ipoib]
+ process_one_work+0x171/0x370
+ worker_thread+0x49/0x3f0
+ kthread+0xf8/0x130
+ ? max_active_store+0x80/0x80
+ ? kthread_bind+0x10/0x10
+ ret_from_fork+0x35/0x40
+
+Since QP destruction frees memory, hfi1_wq should have the WQ_MEM_RECLAIM.
+
+The hfi1_wq does not allocate memory with GFP_KERNEL or otherwise become
+entangled with memory reclaim, so this flag is appropriate.
+
+Fixes: 0a226edd203f ("staging/rdma/hfi1: Use parallel workqueue for SDMA engines")
+Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/init.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hfi1/init.c
++++ b/drivers/infiniband/hw/hfi1/init.c
+@@ -804,7 +804,8 @@ static int create_workqueues(struct hfi1
+ ppd->hfi1_wq =
+ alloc_workqueue(
+ "hfi%d_%d",
+- WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
++ WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
++ WQ_MEM_RECLAIM,
+ HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
+ dd->unit, pidx);
+ if (!ppd->hfi1_wq)
diff --git a/patches.drivers/IB-hfi1-Fix-the-allocation-of-RSM-table.patch b/patches.drivers/IB-hfi1-Fix-the-allocation-of-RSM-table.patch
new file mode 100644
index 0000000000..bf80d506ff
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Fix-the-allocation-of-RSM-table.patch
@@ -0,0 +1,96 @@
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Mon, 18 Mar 2019 09:55:49 -0700
+Subject: IB/hfi1: Fix the allocation of RSM table
+Patch-mainline: v5.1-rc5
+Git-commit: d0294344470e6b52d097aa7369173f32d11f2f52
+References: jsc#SLE-4925
+
+The receive side mapping (RSM) on hfi1 hardware is a special
+matching mechanism to direct an incoming packet to a given
+hardware receive context. It has 4 instances of matching capabilities
+(RSM0 - RSM3) that share the same RSM table (RMT). The RMT has a total of
+256 entries, each of which points to a receive context.
+
+Currently, three instances of RSM have been used:
+1. RSM0 by QOS;
+2. RSM1 by PSM FECN;
+3. RSM2 by VNIC.
+
+Each RSM instance should reserve enough entries in RMT to function
+properly. Since both PSM and VNIC could allocate any receive context
+between dd->first_dyn_alloc_ctxt and dd->num_rcv_contexts, PSM FECN must
+reserve enough RMT entries to cover the entire receive context index
+range (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt) instead of only
+the user receive contexts allocated for PSM
+(dd->num_user_contexts). Consequently, the sizing of
+dd->num_user_contexts in set_up_context_variables is incorrect.
+
+Fixes: 2280740f01ae ("IB/hfi1: Virtual Network Interface Controller (VNIC) HW support")
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/chip.c | 26 +++++++++++++++++++-------
+ 1 file changed, 19 insertions(+), 7 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -13222,7 +13222,7 @@ static int set_up_context_variables(stru
+ int total_contexts;
+ int ret;
+ unsigned ngroups;
+- int qos_rmt_count;
++ int rmt_count;
+ int user_rmt_reduced;
+ u32 n_usr_ctxts;
+ u32 send_contexts = chip_send_contexts(dd);
+@@ -13284,10 +13284,20 @@ static int set_up_context_variables(stru
+ n_usr_ctxts = rcv_contexts - total_contexts;
+ }
+
+- /* each user context requires an entry in the RMT */
+- qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
+- if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
+- user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
++ /*
++ * The RMT entries are currently allocated as shown below:
++ * 1. QOS (0 to 128 entries);
++ * 2. FECN for PSM (num_user_contexts + num_vnic_contexts);
++ * 3. VNIC (num_vnic_contexts).
++ * It should be noted that PSM FECN oversubscribe num_vnic_contexts
++ * entries of RMT because both VNIC and PSM could allocate any receive
++ * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
++ * and PSM FECN must reserve an RMT entry for each possible PSM receive
++ * context.
++ */
++ rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
++ if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
++ user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
+ dd_dev_err(dd,
+ "RMT size is reducing the number of user receive contexts from %u to %d\n",
+ n_usr_ctxts,
+@@ -14275,9 +14285,11 @@ static void init_user_fecn_handling(stru
+ u64 reg;
+ int i, idx, regoff, regidx;
+ u8 offset;
++ u32 total_cnt;
+
+ /* there needs to be enough room in the map table */
+- if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
++ total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
++ if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
+ dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
+ return;
+ }
+@@ -14331,7 +14343,7 @@ static void init_user_fecn_handling(stru
+ /* add rule 1 */
+ add_rsm_rule(dd, RSM_INS_FECN, &rrd);
+
+- rmt->used += dd->num_user_contexts;
++ rmt->used += total_cnt;
+ }
+
+ /* Initialize RSM for VNIC */
diff --git a/patches.drivers/IB-hfi1-Fix-two-format-strings.patch b/patches.drivers/IB-hfi1-Fix-two-format-strings.patch
new file mode 100644
index 0000000000..55a84637ad
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Fix-two-format-strings.patch
@@ -0,0 +1,57 @@
+From: Bart Van Assche <bvanassche@acm.org>
+Date: Wed, 27 Mar 2019 16:50:50 -0700
+Subject: IB/hfi1: Fix two format strings
+Patch-mainline: v5.2-rc1
+Git-commit: 920d10e45844d1448d4d279d07fa91e5a7cee4f1
+References: jsc#SLE-4925
+
+Enable format string checking for hfi1_cdbg() and fix the resulting
+compiler warnings.
+
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/init.c | 4 ++--
+ drivers/infiniband/hw/hfi1/trace_dbg.h | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/init.c
++++ b/drivers/infiniband/hw/hfi1/init.c
+@@ -468,7 +468,7 @@ int hfi1_create_ctxtdata(struct hfi1_ppo
+ if (rcd->egrbufs.size < hfi1_max_mtu) {
+ rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
+ hfi1_cdbg(PROC,
+- "ctxt%u: eager bufs size too small. Adjusting to %zu\n",
++ "ctxt%u: eager bufs size too small. Adjusting to %u\n",
+ rcd->ctxt, rcd->egrbufs.size);
+ }
+ rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
+@@ -2069,7 +2069,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctx
+ rcd->egrbufs.size = alloced_bytes;
+
+ hfi1_cdbg(PROC,
+- "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
++ "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %uKB\n",
+ rcd->ctxt, rcd->egrbufs.alloced,
+ rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
+
+--- a/drivers/infiniband/hw/hfi1/trace_dbg.h
++++ b/drivers/infiniband/hw/hfi1/trace_dbg.h
+@@ -86,14 +86,14 @@ DECLARE_EVENT_CLASS(hfi1_trace_template,
+ * actual function to work and can not be in a macro.
+ */
+ #define __hfi1_trace_def(lvl) \
+-void __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
++void __printf(2, 3) __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
+ \
+ DEFINE_EVENT(hfi1_trace_template, hfi1_ ##lvl, \
+ TP_PROTO(const char *function, struct va_format *vaf), \
+ TP_ARGS(function, vaf))
+
+ #define __hfi1_trace_fn(lvl) \
+-void __hfi1_trace_##lvl(const char *func, char *fmt, ...) \
++void __printf(2, 3) __hfi1_trace_##lvl(const char *func, char *fmt, ...)\
+ { \
+ struct va_format vaf = { \
+ .fmt = fmt, \
diff --git a/patches.drivers/IB-hfi1-Ignore-LNI-errors-before-DC8051-transitions-.patch b/patches.drivers/IB-hfi1-Ignore-LNI-errors-before-DC8051-transitions-.patch
new file mode 100644
index 0000000000..ef9f7d53e5
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Ignore-LNI-errors-before-DC8051-transitions-.patch
@@ -0,0 +1,115 @@
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Wed, 28 Nov 2018 10:19:04 -0800
+Subject: IB/hfi1: Ignore LNI errors before DC8051 transitions to Polling state
+Patch-mainline: v5.0-rc1
+Git-commit: c1a797c0818e0122c7ec8422edd971cfec9b15ea
+References: jsc#SLE-4925
+
+When it is requested to change its physical state back to Offline while in
+the process to go up, DC8051 will set the ERROR field in the
+DC8051_DBG_ERR_INFO_SET_BY_8051 register. This ERROR field will remain
+until the next time when DC8051 transitions from Offline to Polling.
+Subsequently, when the host requests DC8051 to change its physical state
+to Polling again, it may receive a DC8051 interrupt with the stale ERROR
+field still in DC8051_DBG_ERR_INFO_SET_BY_8051. If the host link state has
+been changed to Polling, this stale ERROR will force the host to
+transition to Offline state, resulting in a vicious cycle of Polling
+->Offline->Polling->Offline. On the other hand, if the host link state is
+still Offline when the stale ERROR is received, the stale ERROR will be
+ignored, and the link will come up correctly. This patch implements the
+correct behavior by changing host link state to Polling only after DC8051
+changes its physical state to Polling.
+
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Krzysztof Goreczny <krzysztof.goreczny@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/chip.c | 47 +++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 46 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -1072,6 +1072,8 @@ static void log_state_transition(struct
+ static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
+ static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
+ int msecs);
++static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
++ int msecs);
+ static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
+ static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
+ static void handle_temp_err(struct hfi1_devdata *dd);
+@@ -10771,13 +10773,15 @@ int set_link_state(struct hfi1_pportdata
+ break;
+
+ ppd->port_error_action = 0;
+- ppd->host_link_state = HLS_DN_POLL;
+
+ if (quick_linkup) {
+ /* quick linkup does not go into polling */
+ ret = do_quick_linkup(dd);
+ } else {
+ ret1 = set_physical_link_state(dd, PLS_POLLING);
++ if (!ret1)
++ ret1 = wait_phys_link_out_of_offline(ppd,
++ 3000);
+ if (ret1 != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to transition to Polling link state, return 0x%x\n",
+@@ -10785,6 +10789,14 @@ int set_link_state(struct hfi1_pportdata
+ ret = -EINVAL;
+ }
+ }
++
++ /*
++ * Change the host link state after requesting DC8051 to
++ * change its physical state so that we can ignore any
++ * interrupt with stale LNI(XX) error, which will not be
++ * cleared until DC8051 transitions to Polling state.
++ */
++ ppd->host_link_state = HLS_DN_POLL;
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
+ /*
+@@ -12924,6 +12936,39 @@ static int wait_phys_link_offline_substa
+ read_state, msecs);
+ return -ETIMEDOUT;
+ }
++ usleep_range(1950, 2050); /* sleep 2ms-ish */
++ }
++
++ log_state_transition(ppd, read_state);
++ return read_state;
++}
++
++/*
++ * wait_phys_link_out_of_offline - wait for any out of offline state
++ * @ppd: port device
++ * @msecs: the number of milliseconds to wait
++ *
++ * Wait up to msecs milliseconds for any out of offline physical link
++ * state change to occur.
++ * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
++ */
++static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
++ int msecs)
++{
++ u32 read_state;
++ unsigned long timeout;
++
++ timeout = jiffies + msecs_to_jiffies(msecs);
++ while (1) {
++ read_state = read_physical_state(ppd->dd);
++ if ((read_state & 0xF0) != PLS_OFFLINE)
++ break;
++ if (time_after(jiffies, timeout)) {
++ dd_dev_err(ppd->dd,
++ "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
++ read_state, msecs);
++ return -ETIMEDOUT;
++ }
+ usleep_range(1950, 2050); /* sleep 2ms-ish */
+ }
+
diff --git a/patches.drivers/IB-hfi1-Incorrect-sizing-of-sge-for-PIO-will-OOPs.patch b/patches.drivers/IB-hfi1-Incorrect-sizing-of-sge-for-PIO-will-OOPs.patch
new file mode 100644
index 0000000000..42271784b0
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Incorrect-sizing-of-sge-for-PIO-will-OOPs.patch
@@ -0,0 +1,53 @@
+From: "Michael J. Ruhl" <michael.j.ruhl@intel.com>
+Date: Wed, 28 Nov 2018 10:19:36 -0800
+Subject: IB/hfi1: Incorrect sizing of sge for PIO will OOPs
+Patch-mainline: v5.0-rc1
+Git-commit: dbc2970caef74e8ff41923d302aa6fb5a4812d0e
+References: jsc#SLE-4925
+
+An incorrect sge sizing in the HFI PIO path will cause an OOPs similar to
+this:
+
+BUG: unable to handle kernel NULL pointer dereference at (null)
+IP: [] hfi1_verbs_send_pio+0x3d8/0x530 [hfi1]
+PGD 0
+Oops: 0000 1 SMP
+ Call Trace:
+ ? hfi1_verbs_send_dma+0xad0/0xad0 [hfi1]
+ hfi1_verbs_send+0xdf/0x250 [hfi1]
+ ? make_rc_ack+0xa80/0xa80 [hfi1]
+ hfi1_do_send+0x192/0x430 [hfi1]
+ hfi1_do_send_from_rvt+0x10/0x20 [hfi1]
+ rvt_post_send+0x369/0x820 [rdmavt]
+ ib_uverbs_post_send+0x317/0x570 [ib_uverbs]
+ ib_uverbs_write+0x26f/0x420 [ib_uverbs]
+ ? security_file_permission+0x21/0xa0
+ vfs_write+0xbd/0x1e0
+ ? mntput+0x24/0x40
+ SyS_write+0x7f/0xe0
+ system_call_fastpath+0x16/0x1b
+
+Fix by adding the missing sizing check to correctly determine the sge
+length.
+
+Fixes: 7724105686e7 ("IB/hfi1: add driver files")
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/verbs.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/infiniband/hw/hfi1/verbs.c
++++ b/drivers/infiniband/hw/hfi1/verbs.c
+@@ -919,6 +919,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *q
+
+ if (slen > len)
+ slen = len;
++ if (slen > ss->sge.sge_length)
++ slen = ss->sge.sge_length;
+ rvt_update_sge(ss, slen, false);
+ seg_pio_copy_mid(pbuf, addr, slen);
+ len -= slen;
diff --git a/patches.drivers/IB-hfi1-Limit-VNIC-use-of-SDMA-engines-to-the-availa.patch b/patches.drivers/IB-hfi1-Limit-VNIC-use-of-SDMA-engines-to-the-availa.patch
new file mode 100644
index 0000000000..0ac9d39cb4
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Limit-VNIC-use-of-SDMA-engines-to-the-availa.patch
@@ -0,0 +1,43 @@
+From: "Michael J. Ruhl" <michael.j.ruhl@intel.com>
+Date: Wed, 28 Nov 2018 10:19:25 -0800
+Subject: IB/hfi1: Limit VNIC use of SDMA engines to the available count
+Patch-mainline: v5.0-rc1
+Git-commit: dd6c6a5a2e1e7be615c81ca6d44c2e89e22cb463
+References: jsc#SLE-4925
+
+VNIC assumes that all SDMA engines have been configured for use. This is
+not necessarily true (i.e. if the count was constrained by the module
+parameter).
+
+Update VNICs usage to use the configured count, rather than the hardware
+count.
+
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Gary Leshner <gary.s.leshner@intel.com>
+Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/vnic_main.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/vnic_main.c
++++ b/drivers/infiniband/hw/hfi1/vnic_main.c
+@@ -816,14 +816,14 @@ struct net_device *hfi1_vnic_alloc_rn(st
+
+ size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo);
+ netdev = alloc_netdev_mqs(size, name, name_assign_type, setup,
+- chip_sdma_engines(dd), dd->num_vnic_contexts);
++ dd->num_sdma, dd->num_vnic_contexts);
+ if (!netdev)
+ return ERR_PTR(-ENOMEM);
+
+ rn = netdev_priv(netdev);
+ vinfo = opa_vnic_dev_priv(netdev);
+ vinfo->dd = dd;
+- vinfo->num_tx_q = chip_sdma_engines(dd);
++ vinfo->num_tx_q = dd->num_sdma;
+ vinfo->num_rx_q = dd->num_vnic_contexts;
+ vinfo->netdev = netdev;
+ rn->free_rdma_netdev = hfi1_vnic_free_rn;
diff --git a/patches.drivers/IB-hfi1-Reduce-lock-contention-on-iowait_lock-for-sd.patch b/patches.drivers/IB-hfi1-Reduce-lock-contention-on-iowait_lock-for-sd.patch
new file mode 100644
index 0000000000..afa2636d6c
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Reduce-lock-contention-on-iowait_lock-for-sd.patch
@@ -0,0 +1,287 @@
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Wed, 28 Nov 2018 10:33:00 -0800
+Subject: IB/hfi1: Reduce lock contention on iowait_lock for sdma and pio
+Patch-mainline: v5.0-rc1
+Git-commit: 9aefcabe579bca06325ad9e577a36816f57386ff
+References: jsc#SLE-4925
+
+Commit 4e045572e2c2 ("IB/hfi1: Add unique txwait_lock for txreq events")
+laid the ground work to support per resource waiting locking.
+
+This patch adds that with a lock unique to each sdma engine and pio
+sendcontext and makes necessary changes for verbs, PSM, and vnic to use
+the new locks.
+
+This is particularly beneficial for smaller messages that will exhaust
+resources at a faster rate.
+
+Fixes: 7724105686e7 ("IB/hfi1: add driver files")
+Reviewed-by: Gary Leshner <Gary.S.Leshner@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/pio.c | 6 +++---
+ drivers/infiniband/hw/hfi1/pio.h | 2 ++
+ drivers/infiniband/hw/hfi1/qp.c | 20 ++++++++------------
+ drivers/infiniband/hw/hfi1/sdma.c | 10 +++++-----
+ drivers/infiniband/hw/hfi1/sdma.h | 1 +
+ drivers/infiniband/hw/hfi1/user_sdma.c | 5 ++---
+ drivers/infiniband/hw/hfi1/verbs.c | 7 +++----
+ drivers/infiniband/hw/hfi1/vnic_sdma.c | 7 +++----
+ 8 files changed, 27 insertions(+), 31 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/pio.c
++++ b/drivers/infiniband/hw/hfi1/pio.c
+@@ -742,6 +742,7 @@ struct send_context *sc_alloc(struct hfi
+ spin_lock_init(&sc->alloc_lock);
+ spin_lock_init(&sc->release_lock);
+ spin_lock_init(&sc->credit_ctrl_lock);
++ seqlock_init(&sc->waitlock);
+ INIT_LIST_HEAD(&sc->piowait);
+ INIT_WORK(&sc->halt_work, sc_halted);
+ init_waitqueue_head(&sc->halt_wait);
+@@ -1593,7 +1594,6 @@ void hfi1_sc_wantpiobuf_intr(struct send
+ static void sc_piobufavail(struct send_context *sc)
+ {
+ struct hfi1_devdata *dd = sc->dd;
+- struct hfi1_ibdev *dev = &dd->verbs_dev;
+ struct list_head *list;
+ struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE];
+ struct rvt_qp *qp;
+@@ -1612,7 +1612,7 @@ static void sc_piobufavail(struct send_c
+ * could end up with QPs on the wait list with the interrupt
+ * disabled.
+ */
+- write_seqlock_irqsave(&dev->iowait_lock, flags);
++ write_seqlock_irqsave(&sc->waitlock, flags);
+ while (!list_empty(list)) {
+ struct iowait *wait;
+
+@@ -1636,7 +1636,7 @@ static void sc_piobufavail(struct send_c
+ if (!list_empty(list))
+ hfi1_sc_wantpiobuf_intr(sc, 1);
+ }
+- write_sequnlock_irqrestore(&dev->iowait_lock, flags);
++ write_sequnlock_irqrestore(&sc->waitlock, flags);
+
+ /* Wake up the most starved one first */
+ if (n)
+--- a/drivers/infiniband/hw/hfi1/pio.h
++++ b/drivers/infiniband/hw/hfi1/pio.h
+@@ -127,6 +127,8 @@ struct send_context {
+ volatile __le64 *hw_free; /* HW free counter */
+ /* list for PIO waiters */
+ struct list_head piowait ____cacheline_aligned_in_smp;
++ seqlock_t waitlock;
++
+ spinlock_t credit_ctrl_lock ____cacheline_aligned_in_smp;
+ u32 credit_intr_count; /* count of credit intr users */
+ u64 credit_ctrl; /* cache for credit control */
+--- a/drivers/infiniband/hw/hfi1/qp.c
++++ b/drivers/infiniband/hw/hfi1/qp.c
+@@ -375,20 +375,18 @@ bool _hfi1_schedule_send(struct rvt_qp *
+
+ static void qp_pio_drain(struct rvt_qp *qp)
+ {
+- struct hfi1_ibdev *dev;
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ if (!priv->s_sendcontext)
+ return;
+- dev = to_idev(qp->ibqp.device);
+ while (iowait_pio_pending(&priv->s_iowait)) {
+- write_seqlock_irq(&dev->iowait_lock);
++ write_seqlock_irq(&priv->s_sendcontext->waitlock);
+ hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1);
+- write_sequnlock_irq(&dev->iowait_lock);
++ write_sequnlock_irq(&priv->s_sendcontext->waitlock);
+ iowait_pio_drain(&priv->s_iowait);
+- write_seqlock_irq(&dev->iowait_lock);
++ write_seqlock_irq(&priv->s_sendcontext->waitlock);
+ hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0);
+- write_sequnlock_irq(&dev->iowait_lock);
++ write_sequnlock_irq(&priv->s_sendcontext->waitlock);
+ }
+ }
+
+@@ -459,7 +457,6 @@ static int iowait_sleep(
+ struct hfi1_qp_priv *priv;
+ unsigned long flags;
+ int ret = 0;
+- struct hfi1_ibdev *dev;
+
+ qp = tx->qp;
+ priv = qp->priv;
+@@ -472,9 +469,8 @@ static int iowait_sleep(
+ * buffer and undoing the side effects of the copy.
+ */
+ /* Make a common routine? */
+- dev = &sde->dd->verbs_dev;
+ list_add_tail(&stx->list, &wait->tx_head);
+- write_seqlock(&dev->iowait_lock);
++ write_seqlock(&sde->waitlock);
+ if (sdma_progress(sde, seq, stx))
+ goto eagain;
+ if (list_empty(&priv->s_iowait.list)) {
+@@ -485,11 +481,11 @@ static int iowait_sleep(
+ qp->s_flags |= RVT_S_WAIT_DMA_DESC;
+ iowait_queue(pkts_sent, &priv->s_iowait,
+ &sde->dmawait);
+- priv->s_iowait.lock = &dev->iowait_lock;
++ priv->s_iowait.lock = &sde->waitlock;
+ trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
+ rvt_get_qp(qp);
+ }
+- write_sequnlock(&dev->iowait_lock);
++ write_sequnlock(&sde->waitlock);
+ hfi1_qp_unbusy(qp, wait);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ ret = -EBUSY;
+@@ -499,7 +495,7 @@ static int iowait_sleep(
+ }
+ return ret;
+ eagain:
+- write_sequnlock(&dev->iowait_lock);
++ write_sequnlock(&sde->waitlock);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ list_del_init(&stx->list);
+ return -EAGAIN;
+--- a/drivers/infiniband/hw/hfi1/sdma.c
++++ b/drivers/infiniband/hw/hfi1/sdma.c
+@@ -1424,6 +1424,7 @@ int sdma_init(struct hfi1_devdata *dd, u
+ seqlock_init(&sde->head_lock);
+ spin_lock_init(&sde->senddmactrl_lock);
+ spin_lock_init(&sde->flushlist_lock);
++ seqlock_init(&sde->waitlock);
+ /* insure there is always a zero bit */
+ sde->ahg_bits = 0xfffffffe00000000ULL;
+
+@@ -1757,7 +1758,6 @@ static void sdma_desc_avail(struct sdma_
+ struct iowait *wait, *nw;
+ struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
+ uint i, n = 0, seq, max_idx = 0;
+- struct hfi1_ibdev *dev = &sde->dd->verbs_dev;
+ u8 max_starved_cnt = 0;
+
+ #ifdef CONFIG_SDMA_VERBOSITY
+@@ -1767,10 +1767,10 @@ static void sdma_desc_avail(struct sdma_
+ #endif
+
+ do {
+- seq = read_seqbegin(&dev->iowait_lock);
++ seq = read_seqbegin(&sde->waitlock);
+ if (!list_empty(&sde->dmawait)) {
+ /* at least one item */
+- write_seqlock(&dev->iowait_lock);
++ write_seqlock(&sde->waitlock);
+ /* Harvest waiters wanting DMA descriptors */
+ list_for_each_entry_safe(
+ wait,
+@@ -1793,10 +1793,10 @@ static void sdma_desc_avail(struct sdma_
+ list_del_init(&wait->list);
+ waits[n++] = wait;
+ }
+- write_sequnlock(&dev->iowait_lock);
++ write_sequnlock(&sde->waitlock);
+ break;
+ }
+- } while (read_seqretry(&dev->iowait_lock, seq));
++ } while (read_seqretry(&sde->waitlock, seq));
+
+ /* Schedule the most starved one first */
+ if (n)
+--- a/drivers/infiniband/hw/hfi1/sdma.h
++++ b/drivers/infiniband/hw/hfi1/sdma.h
+@@ -382,6 +382,7 @@ struct sdma_engine {
+ u64 progress_int_cnt;
+
+ /* private: */
++ seqlock_t waitlock;
+ struct list_head dmawait;
+
+ /* CONFIG SDMA for now, just blindly duplicate */
+--- a/drivers/infiniband/hw/hfi1/user_sdma.c
++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
+@@ -130,7 +130,6 @@ static int defer_packet_queue(
+ {
+ struct hfi1_user_sdma_pkt_q *pq =
+ container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
+- struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
+ struct user_sdma_txreq *tx =
+ container_of(txreq, struct user_sdma_txreq, txreq);
+
+@@ -144,10 +143,10 @@ static int defer_packet_queue(
+ * it is supposed to be enqueued.
+ */
+ xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
+- write_seqlock(&dev->iowait_lock);
++ write_seqlock(&sde->waitlock);
+ if (list_empty(&pq->busy.list))
+ iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
+- write_sequnlock(&dev->iowait_lock);
++ write_sequnlock(&sde->waitlock);
+ return -EBUSY;
+ eagain:
+ return -EAGAIN;
+--- a/drivers/infiniband/hw/hfi1/verbs.c
++++ b/drivers/infiniband/hw/hfi1/verbs.c
+@@ -765,7 +765,6 @@ static int pio_wait(struct rvt_qp *qp,
+ {
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_devdata *dd = sc->dd;
+- struct hfi1_ibdev *dev = &dd->verbs_dev;
+ unsigned long flags;
+ int ret = 0;
+
+@@ -777,7 +776,7 @@ static int pio_wait(struct rvt_qp *qp,
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
+- write_seqlock(&dev->iowait_lock);
++ write_seqlock(&sc->waitlock);
+ list_add_tail(&ps->s_txreq->txreq.list,
+ &ps->wait->tx_head);
+ if (list_empty(&priv->s_iowait.list)) {
+@@ -790,14 +789,14 @@ static int pio_wait(struct rvt_qp *qp,
+ was_empty = list_empty(&sc->piowait);
+ iowait_queue(ps->pkts_sent, &priv->s_iowait,
+ &sc->piowait);
+- priv->s_iowait.lock = &dev->iowait_lock;
++ priv->s_iowait.lock = &sc->waitlock;
+ trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
+ rvt_get_qp(qp);
+ /* counting: only call wantpiobuf_intr if first user */
+ if (was_empty)
+ hfi1_sc_wantpiobuf_intr(sc, 1);
+ }
+- write_sequnlock(&dev->iowait_lock);
++ write_sequnlock(&sc->waitlock);
+ hfi1_qp_unbusy(qp, ps->wait);
+ ret = -EBUSY;
+ }
+--- a/drivers/infiniband/hw/hfi1/vnic_sdma.c
++++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c
+@@ -232,18 +232,17 @@ static int hfi1_vnic_sdma_sleep(struct s
+ {
+ struct hfi1_vnic_sdma *vnic_sdma =
+ container_of(wait->iow, struct hfi1_vnic_sdma, wait);
+- struct hfi1_ibdev *dev = &vnic_sdma->dd->verbs_dev;
+
+- write_seqlock(&dev->iowait_lock);
++ write_seqlock(&sde->waitlock);
+ if (sdma_progress(sde, seq, txreq)) {
+- write_sequnlock(&dev->iowait_lock);
++ write_sequnlock(&sde->waitlock);
+ return -EAGAIN;
+ }
+
+ vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
+ if (list_empty(&vnic_sdma->wait.list))
+ iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
+- write_sequnlock(&dev->iowait_lock);
++ write_sequnlock(&sde->waitlock);
+ return -EBUSY;
+ }
+
diff --git a/patches.drivers/IB-hfi1-Remove-WARN_ON-when-freeing-expected-receive.patch b/patches.drivers/IB-hfi1-Remove-WARN_ON-when-freeing-expected-receive.patch
new file mode 100644
index 0000000000..2a7f0aadec
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Remove-WARN_ON-when-freeing-expected-receive.patch
@@ -0,0 +1,40 @@
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Mon, 18 Mar 2019 09:59:00 -0700
+Subject: IB/hfi1: Remove WARN_ON when freeing expected receive groups
+Patch-mainline: v5.2-rc1
+Git-commit: 8da0f0f26f80612efadc23beb72d5b66a498a386
+References: jsc#SLE-4925
+
+When PSM user receive context is freed, the expected receive groups
+allocated by the receive context will also been freed. However, if there
+are still TID entries in use, the receive groups rcd->tid_full_list or
+rcd->tid_used_list will not be empty, and thus triggering the WARN_ONs in
+the function hfi1_free_ctxt_rcv_groups(). Even if the two lists may not
+be empty, the hfi1 driver will free all TID entries and receive groups
+associated with the receive context to prevent any resource leakage. Since
+a clean user application exit is not controlled by the hfi1 driver, this
+patch will remove the WARN_ONs in hfi1_free_ctxt_rcv_groups().
+
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/exp_rcv.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/exp_rcv.c
++++ b/drivers/infiniband/hw/hfi1/exp_rcv.c
+@@ -112,9 +112,6 @@ int hfi1_alloc_ctxt_rcv_groups(struct hf
+ */
+ void hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
+ {
+- WARN_ON(!EXP_TID_SET_EMPTY(rcd->tid_full_list));
+- WARN_ON(!EXP_TID_SET_EMPTY(rcd->tid_used_list));
+-
+ kfree(rcd->groups);
+ rcd->groups = NULL;
+ hfi1_exp_tid_group_init(rcd);
diff --git a/patches.drivers/IB-hfi1-Remove-overly-conservative-VM_EXEC-flag-chec.patch b/patches.drivers/IB-hfi1-Remove-overly-conservative-VM_EXEC-flag-chec.patch
new file mode 100644
index 0000000000..ae2c29b865
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Remove-overly-conservative-VM_EXEC-flag-chec.patch
@@ -0,0 +1,44 @@
+From: "Michael J. Ruhl" <michael.j.ruhl@intel.com>
+Date: Thu, 17 Jan 2019 12:42:04 -0800
+Subject: IB/hfi1: Remove overly conservative VM_EXEC flag check
+Patch-mainline: v5.0-rc5
+Git-commit: 7709b0dc265f28695487712c45f02bbd1f98415d
+References: jsc#SLE-4925
+
+Applications that use the stack for execution purposes cause userspace PSM
+jobs to fail during mmap().
+
+Both Fortran (non-standard format parsing) and C (callback functions
+located in the stack) applications can be written such that stack
+execution is required. The linker notes this via the gnu_stack ELF flag.
+
+This causes READ_IMPLIES_EXEC to be set which forces all PROT_READ mmaps
+to have PROT_EXEC for the process.
+
+Checking for VM_EXEC bit and failing the request with EPERM is overly
+conservative and will break any PSM application using executable stacks.
+
+Cc: <stable@vger.kernel.org> #v4.14+
+Fixes: 12220267645c ("IB/hfi: Protect against writable mmap")
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Reviewed-by: Ira Weiny <ira.weiny@intel.com>
+Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/file_ops.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hfi1/file_ops.c
++++ b/drivers/infiniband/hw/hfi1/file_ops.c
+@@ -488,7 +488,7 @@ static int hfi1_file_mmap(struct file *f
+ vmf = 1;
+ break;
+ case STATUS:
+- if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) {
++ if (flags & VM_WRITE) {
+ ret = -EPERM;
+ goto done;
+ }
diff --git a/patches.drivers/IB-hfi1-Unreserve-a-reserved-request-when-it-is-comp.patch b/patches.drivers/IB-hfi1-Unreserve-a-reserved-request-when-it-is-comp.patch
new file mode 100644
index 0000000000..051e64350b
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Unreserve-a-reserved-request-when-it-is-comp.patch
@@ -0,0 +1,41 @@
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Wed, 28 Nov 2018 10:22:09 -0800
+Subject: IB/hfi1: Unreserve a reserved request when it is completed
+Patch-mainline: v5.0-rc1
+Git-commit: ca95f802ef5139722acc8d30aeaab6fe5bbe939e
+References: jsc#SLE-4925
+
+Currently, When a reserved operation is completed, its entry in the send
+queue will not be unreserved, which leads to the miscalculation of
+qp->s_avail and thus the triggering of a WARN_ON call trace. This patch
+fixes the problem by unreserving the reserved operation when it is
+completed.
+
+Fixes: 856cc4c237ad ("IB/hfi1: Add the capability for reserved operations")
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/rc.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/infiniband/hw/hfi1/rc.c
++++ b/drivers/infiniband/hw/hfi1/rc.c
+@@ -1157,6 +1157,7 @@ void hfi1_rc_send_complete(struct rvt_qp
+ if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
+ cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
+ break;
++ rvt_qp_wqe_unreserve(qp, wqe);
+ s_last = qp->s_last;
+ trace_hfi1_qp_send_completion(qp, wqe, s_last);
+ if (++s_last >= qp->s_size)
+@@ -1209,6 +1210,7 @@ static struct rvt_swqe *do_rc_completion
+ u32 s_last;
+
+ rvt_put_swqe(wqe);
++ rvt_qp_wqe_unreserve(qp, wqe);
+ s_last = qp->s_last;
+ trace_hfi1_qp_send_completion(qp, wqe, s_last);
+ if (++s_last >= qp->s_size)
diff --git a/patches.drivers/IB-hw-Remove-unneeded-semicolons.patch b/patches.drivers/IB-hw-Remove-unneeded-semicolons.patch
new file mode 100644
index 0000000000..60675cfddf
--- /dev/null
+++ b/patches.drivers/IB-hw-Remove-unneeded-semicolons.patch
@@ -0,0 +1,102 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Fri, 18 Jan 2019 11:09:00 +0800
+Subject: IB/hw: Remove unneeded semicolons
+Patch-mainline: v5.1-rc1
+Git-commit: 790b57f686e29b93616b3d13b38043f5ec29fa0a
+References: bsc#1136456 jsc#SLE-4689
+
+Remove unneeded semicolons.
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +-
+ drivers/infiniband/hw/qedr/qedr_iw_cm.c | 2 +-
+ drivers/infiniband/hw/qedr/verbs.c | 14 +++++++-------
+ 3 files changed, 9 insertions(+), 9 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -4568,7 +4568,7 @@ static int hns_roce_v2_aeq_int(struct hn
+ dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
+ event_type, eq->eqn, eq->cons_index);
+ break;
+- };
++ }
+
+ eq->event_type = event_type;
+ eq->sub_type = sub_type;
+--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+@@ -349,7 +349,7 @@ qedr_iw_event_handler(void *context, str
+ default:
+ DP_NOTICE(dev, "Unknown event received %d\n", params->event);
+ break;
+- };
++ }
+ return 0;
+ }
+
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -2128,7 +2128,7 @@ static int qedr_update_qp_state(struct q
+ default:
+ status = -EINVAL;
+ break;
+- };
++ }
+ break;
+ case QED_ROCE_QP_STATE_INIT:
+ switch (new_state) {
+@@ -2149,7 +2149,7 @@ static int qedr_update_qp_state(struct q
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+- };
++ }
+ break;
+ case QED_ROCE_QP_STATE_RTR:
+ /* RTR->XXX */
+@@ -2162,7 +2162,7 @@ static int qedr_update_qp_state(struct q
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+- };
++ }
+ break;
+ case QED_ROCE_QP_STATE_RTS:
+ /* RTS->XXX */
+@@ -2175,7 +2175,7 @@ static int qedr_update_qp_state(struct q
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+- };
++ }
+ break;
+ case QED_ROCE_QP_STATE_SQD:
+ /* SQD->XXX */
+@@ -2187,7 +2187,7 @@ static int qedr_update_qp_state(struct q
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+- };
++ }
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ /* ERR->XXX */
+@@ -2205,12 +2205,12 @@ static int qedr_update_qp_state(struct q
+ default:
+ status = -EINVAL;
+ break;
+- };
++ }
+ break;
+ default:
+ status = -EINVAL;
+ break;
+- };
++ }
+
+ return status;
+ }
diff --git a/patches.drivers/IB-rdmavt-Add-wc_flags-and-wc_immdata-to-cq-entry-tr.patch b/patches.drivers/IB-rdmavt-Add-wc_flags-and-wc_immdata-to-cq-entry-tr.patch
new file mode 100644
index 0000000000..5b543aeba1
--- /dev/null
+++ b/patches.drivers/IB-rdmavt-Add-wc_flags-and-wc_immdata-to-cq-entry-tr.patch
@@ -0,0 +1,59 @@
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Thu, 17 Jan 2019 12:41:43 -0800
+Subject: IB/rdmavt: Add wc_flags and wc_immdata to cq entry trace
+Patch-mainline: v5.1-rc1
+Git-commit: 14e517e4b444a01d871893b1ea817790ee13dc0b
+References: jsc#SLE-4925
+
+These fields were missing from the trace. Add them.
+
+Fixes: c6ad9482fcb8 ("IB/rdmavt: Add tracing for cq entry and poll")
+Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/sw/rdmavt/trace_cq.h | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/sw/rdmavt/trace_cq.h
++++ b/drivers/infiniband/sw/rdmavt/trace_cq.h
+@@ -105,7 +105,7 @@ DEFINE_EVENT(rvt_cq_template, rvt_create
+ TP_ARGS(cq, attr));
+
+ #define CQ_PRN \
+-"[%s] idx %u wr_id %llx status %u opcode %u,%s length %u qpn %x"
++"[%s] idx %u wr_id %llx status %u opcode %u,%s length %u qpn %x flags %x imm %x"
+
+ DECLARE_EVENT_CLASS(
+ rvt_cq_entry_template,
+@@ -119,6 +119,8 @@ DECLARE_EVENT_CLASS(
+ __field(u32, qpn)
+ __field(u32, length)
+ __field(u32, idx)
++ __field(u32, flags)
++ __field(u32, imm)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(cq->rdi)
+@@ -128,6 +130,8 @@ DECLARE_EVENT_CLASS(
+ __entry->length = wc->byte_len;
+ __entry->qpn = wc->qp->qp_num;
+ __entry->idx = idx;
++ __entry->flags = wc->wc_flags;
++ __entry->imm = be32_to_cpu(wc->ex.imm_data);
+ ),
+ TP_printk(
+ CQ_PRN,
+@@ -137,7 +141,9 @@ DECLARE_EVENT_CLASS(
+ __entry->status,
+ __entry->opcode, show_wc_opcode(__entry->opcode),
+ __entry->length,
+- __entry->qpn
++ __entry->qpn,
++ __entry->flags,
++ __entry->imm
+ )
+ );
+
diff --git a/patches.drivers/IB-rdmavt-Fix-frwr-memory-registration.patch b/patches.drivers/IB-rdmavt-Fix-frwr-memory-registration.patch
new file mode 100644
index 0000000000..50dba7113c
--- /dev/null
+++ b/patches.drivers/IB-rdmavt-Fix-frwr-memory-registration.patch
@@ -0,0 +1,80 @@
+From: Josh Collier <josh.d.collier@intel.com>
+Date: Mon, 15 Apr 2019 11:34:22 -0700
+Subject: IB/rdmavt: Fix frwr memory registration
+Patch-mainline: v5.1-rc7
+Git-commit: 7c39f7f671d2acc0a1f39ebbbee4303ad499bbfa
+References: jsc#SLE-4925
+
+Current implementation was not properly handling frwr memory
+registrations. This was uncovered by commit 27f26cec761das ("xprtrdma:
+Plant XID in on-the-wire RDMA offset (FRWR)") in which xprtrdma, which is
+used for NFS over RDMA, started failing as it was the first ULP to modify
+the ib_mr iova resulting in the NFS server getting REMOTE ACCESS ERROR
+when attempting to perform RDMA Writes to the client.
+
+The fix is to properly capture the true iova, offset, and length in the
+call to ib_map_mr_sg, and then update the iova when processing the
+IB_WR_REG_MEM on the send queue.
+
+Fixes: a41081aa5936 ("IB/rdmavt: Add support for ib_map_mr_sg")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Josh Collier <josh.d.collier@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/sw/rdmavt/mr.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+--- a/drivers/infiniband/sw/rdmavt/mr.c
++++ b/drivers/infiniband/sw/rdmavt/mr.c
+@@ -609,11 +609,6 @@ static int rvt_set_page(struct ib_mr *ib
+ if (unlikely(mapped_segs == mr->mr.max_segs))
+ return -ENOMEM;
+
+- if (mr->mr.length == 0) {
+- mr->mr.user_base = addr;
+- mr->mr.iova = addr;
+- }
+-
+ m = mapped_segs / RVT_SEGSZ;
+ n = mapped_segs % RVT_SEGSZ;
+ mr->mr.map[m]->segs[n].vaddr = (void *)addr;
+@@ -631,17 +626,24 @@ static int rvt_set_page(struct ib_mr *ib
+ * @sg_nents: number of entries in sg
+ * @sg_offset: offset in bytes into sg
+ *
++ * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
++ *
+ * Return: number of sg elements mapped to the memory region
+ */
+ int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
+ {
+ struct rvt_mr *mr = to_imr(ibmr);
++ int ret;
+
+ mr->mr.length = 0;
+ mr->mr.page_shift = PAGE_SHIFT;
+- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
+- rvt_set_page);
++ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
++ mr->mr.user_base = ibmr->iova;
++ mr->mr.iova = ibmr->iova;
++ mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
++ mr->mr.length = (size_t)ibmr->length;
++ return ret;
+ }
+
+ /**
+@@ -672,6 +674,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, s
+ ibmr->rkey = key;
+ mr->mr.lkey = key;
+ mr->mr.access_flags = access;
++ mr->mr.iova = ibmr->iova;
+ atomic_set(&mr->mr.lkey_invalid, 0);
+
+ return 0;
diff --git a/patches.drivers/IB-rdmavt-Fix-loopback-send-with-invalidate-ordering.patch b/patches.drivers/IB-rdmavt-Fix-loopback-send-with-invalidate-ordering.patch
new file mode 100644
index 0000000000..3475e75e69
--- /dev/null
+++ b/patches.drivers/IB-rdmavt-Fix-loopback-send-with-invalidate-ordering.patch
@@ -0,0 +1,73 @@
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Tue, 26 Feb 2019 08:45:16 -0800
+Subject: IB/rdmavt: Fix loopback send with invalidate ordering
+Patch-mainline: v5.1-rc1
+Git-commit: 38bbc9f0381550d1d227fc57afa08436e36b32fc
+References: jsc#SLE-4925
+
+The IBTA spec notes:
+
+o9-5.2.1: For any HCA which supports SEND with Invalidate, upon receiving
+an IETH, the Invalidate operation must not take place until after the
+normal transport header validation checks have been successfully
+completed.
+
+The rdmavt loopback code does the validation after the invalidate.
+
+Fix by relocating the operation specific logic for all SEND variants until
+after the validity checks.
+
+Cc: <stable@vger.kernel.org> #v4.20+
+Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/sw/rdmavt/qp.c | 26 ++++++++++++++++----------
+ 1 file changed, 16 insertions(+), 10 deletions(-)
+
+--- a/drivers/infiniband/sw/rdmavt/qp.c
++++ b/drivers/infiniband/sw/rdmavt/qp.c
+@@ -2898,18 +2898,8 @@ again:
+ goto send_comp;
+
+ case IB_WR_SEND_WITH_INV:
+- if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
+- wc.wc_flags = IB_WC_WITH_INVALIDATE;
+- wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
+- }
+- goto send;
+-
+ case IB_WR_SEND_WITH_IMM:
+- wc.wc_flags = IB_WC_WITH_IMM;
+- wc.ex.imm_data = wqe->wr.ex.imm_data;
+- /* FALLTHROUGH */
+ case IB_WR_SEND:
+-send:
+ ret = rvt_get_rwqe(qp, false);
+ if (ret < 0)
+ goto op_err;
+@@ -2917,6 +2907,22 @@ send:
+ goto rnr_nak;
+ if (wqe->length > qp->r_len)
+ goto inv_err;
++ switch (wqe->wr.opcode) {
++ case IB_WR_SEND_WITH_INV:
++ if (!rvt_invalidate_rkey(qp,
++ wqe->wr.ex.invalidate_rkey)) {
++ wc.wc_flags = IB_WC_WITH_INVALIDATE;
++ wc.ex.invalidate_rkey =
++ wqe->wr.ex.invalidate_rkey;
++ }
++ break;
++ case IB_WR_SEND_WITH_IMM:
++ wc.wc_flags = IB_WC_WITH_IMM;
++ wc.ex.imm_data = wqe->wr.ex.imm_data;
++ break;
++ default:
++ break;
++ }
+ break;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
diff --git a/patches.drivers/IB-rdmavt-hfi1-Miscellaneous-comment-fixes.patch b/patches.drivers/IB-rdmavt-hfi1-Miscellaneous-comment-fixes.patch
new file mode 100644
index 0000000000..085db2a1ec
--- /dev/null
+++ b/patches.drivers/IB-rdmavt-hfi1-Miscellaneous-comment-fixes.patch
@@ -0,0 +1,40 @@
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Thu, 11 Apr 2019 07:15:49 -0700
+Subject: IB/{rdmavt, hfi1): Miscellaneous comment fixes
+Patch-mainline: v5.2-rc1
+Git-commit: ea752bc5e50a03e337dfa5c8940d357c62300f8a
+References: jsc#SLE-4925
+
+This patch fixes miscellaneous comment errors.
+
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/ruc.c | 2 +-
+ include/rdma/rdmavt_qp.h | 1 -
+ 2 files changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/ruc.c
++++ b/drivers/infiniband/hw/hfi1/ruc.c
+@@ -524,7 +524,7 @@ void _hfi1_do_send(struct work_struct *w
+
+ /**
+ * hfi1_do_send - perform a send on a QP
+- * @work: contains a pointer to the QP
++ * @qp: a pointer to the QP
+ * @in_thread: true if in a workqueue thread
+ *
+ * Process entries in the send work queue until credit or queue is
+--- a/include/rdma/rdmavt_qp.h
++++ b/include/rdma/rdmavt_qp.h
+@@ -83,7 +83,6 @@
+ * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
+ * next send completion entry not via send DMA
+ * RVT_S_WAIT_PIO - waiting for a send buffer to be available
+- * RVT_S_WAIT_PIO_DRAIN - waiting for a qp to drain pio packets
+ * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
+ * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
+ * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
diff --git a/patches.drivers/RDMA-cxbg-Use-correct-sizing-on-buffers-holding-page.patch b/patches.drivers/RDMA-cxbg-Use-correct-sizing-on-buffers-holding-page.patch
new file mode 100644
index 0000000000..c9871f2cc1
--- /dev/null
+++ b/patches.drivers/RDMA-cxbg-Use-correct-sizing-on-buffers-holding-page.patch
@@ -0,0 +1,46 @@
+From: Shiraz Saleem <shiraz.saleem@intel.com>
+Date: Thu, 28 Mar 2019 11:49:44 -0500
+Subject: RDMA/cxbg: Use correct sizing on buffers holding page DMA addresses
+Patch-mainline: v5.2-rc1
+Git-commit: 5f818d676ac455bbc812ffaaf5bf780be5465114
+References: bsc#1136348 jsc#SLE-4684
+
+The PBL array that hold the page DMA address is sized off umem->nmap.
+This can potentially cause out of bound accesses on the PBL array when
+iterating the umem DMA-mapped SGL. This is because if umem pages are
+combined, umem->nmap can be much lower than the number of system pages
+in umem.
+
+Use ib_umem_num_pages() to size this array.
+
+Cc: Potnuri Bharat Teja <bharat@chelsio.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/cxgb3/iwch_provider.c | 2 +-
+ drivers/infiniband/hw/cxgb4/mem.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
+@@ -549,7 +549,7 @@ static struct ib_mr *iwch_reg_user_mr(st
+
+ shift = mhp->umem->page_shift;
+
+- n = mhp->umem->nmap;
++ n = ib_umem_num_pages(mhp->umem);
+
+ err = iwch_alloc_pbl(mhp, n);
+ if (err)
+--- a/drivers/infiniband/hw/cxgb4/mem.c
++++ b/drivers/infiniband/hw/cxgb4/mem.c
+@@ -543,7 +543,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib
+
+ shift = mhp->umem->page_shift;
+
+- n = mhp->umem->nmap;
++ n = ib_umem_num_pages(mhp->umem);
+ err = alloc_pbl(mhp, n);
+ if (err)
+ goto err_umem_release;
diff --git a/patches.drivers/RDMA-cxgb4-Fix-null-pointer-dereference-on-alloc_skb.patch b/patches.drivers/RDMA-cxgb4-Fix-null-pointer-dereference-on-alloc_skb.patch
new file mode 100644
index 0000000000..ddfd755781
--- /dev/null
+++ b/patches.drivers/RDMA-cxgb4-Fix-null-pointer-dereference-on-alloc_skb.patch
@@ -0,0 +1,33 @@
+From: Colin Ian King <colin.king@canonical.com>
+Date: Sat, 13 Apr 2019 17:00:26 +0100
+Subject: RDMA/cxgb4: Fix null pointer dereference on alloc_skb failure
+Patch-mainline: v5.2-rc1
+Git-commit: a6d2a5a92e67d151c98886babdc86d530d27111c
+References: bsc#1136348 jsc#SLE-4684
+
+Currently if alloc_skb fails to allocate the skb a null skb is passed to
+t4_set_arp_err_handler and this ends up dereferencing the null skb. Avoid
+the NULL pointer dereference by checking for a NULL skb and returning
+early.
+
+Addresses-Coverity: ("Dereference null return")
+Fixes: b38a0ad8ec11 ("RDMA/cxgb4: Set arp error handler for PASS_ACCEPT_RPL messages")
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Acked-by: Potnuri Bharat Teja <bharat@chelsio.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/cxgb4/cm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -459,6 +459,8 @@ static struct sk_buff *get_skb(struct sk
+ skb_reset_transport_header(skb);
+ } else {
+ skb = alloc_skb(len, gfp);
++ if (!skb)
++ return NULL;
+ }
+ t4_set_arp_err_handler(skb, NULL, NULL);
+ return skb;
diff --git a/patches.drivers/RDMA-cxgb4-Fix-spelling-mistake-immedate-immediate.patch b/patches.drivers/RDMA-cxgb4-Fix-spelling-mistake-immedate-immediate.patch
new file mode 100644
index 0000000000..c86e1b16f6
--- /dev/null
+++ b/patches.drivers/RDMA-cxgb4-Fix-spelling-mistake-immedate-immediate.patch
@@ -0,0 +1,27 @@
+From: Colin Ian King <colin.king@canonical.com>
+Date: Tue, 16 Apr 2019 15:38:04 +0100
+Subject: RDMA/cxgb4: Fix spelling mistake "immedate" -> "immediate"
+Patch-mainline: v5.2-rc1
+Git-commit: ff5eefe6d3a3a2cd93b71165741ebdeda6d58e1d
+References: bsc#1136348 jsc#SLE-4684
+
+There is a spelling mistake in a module parameter description. Fix it.
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/cxgb4/qp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -56,7 +56,7 @@ MODULE_PARM_DESC(db_coalescing_threshold
+
+ static int max_fr_immd = T4_MAX_FR_IMMD;
+ module_param(max_fr_immd, int, 0644);
+-MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
++MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immediate");
+
+ static int alloc_ird(struct c4iw_dev *dev, u32 ird)
+ {
diff --git a/patches.drivers/RDMA-cxgb4-Remove-kref-accounting-for-sync-operation.patch b/patches.drivers/RDMA-cxgb4-Remove-kref-accounting-for-sync-operation.patch
new file mode 100644
index 0000000000..3af9c69696
--- /dev/null
+++ b/patches.drivers/RDMA-cxgb4-Remove-kref-accounting-for-sync-operation.patch
@@ -0,0 +1,112 @@
+From: Leon Romanovsky <leonro@mellanox.com>
+Date: Tue, 12 Feb 2019 20:39:15 +0200
+Subject: RDMA/cxgb4: Remove kref accounting for sync operation
+Patch-mainline: v5.1-rc1
+Git-commit: cfe876d8e6b0491170d44c8040c518b121957104
+References: bsc#1136348 jsc#SLE-4684
+
+Ucontext allocation and release aren't async events and don't need kref
+accounting. The common layer of RDMA subsystem ensures that dealloc
+ucontext will be called after all other objects are released.
+
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Reviewed-by: Steve Wise <swise@opengridcomputing.com>
+Tested-by: Raju Rangoju <rajur@chelsio.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 13 -------------
+ drivers/infiniband/hw/cxgb4/provider.c | 16 +++-------------
+ drivers/infiniband/hw/cxgb4/qp.c | 3 ---
+ 3 files changed, 3 insertions(+), 29 deletions(-)
+
+--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
++++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+@@ -590,7 +590,6 @@ struct c4iw_ucontext {
+ u32 key;
+ spinlock_t mmap_lock;
+ struct list_head mmaps;
+- struct kref kref;
+ bool is_32b_cqe;
+ };
+
+@@ -599,18 +598,6 @@ static inline struct c4iw_ucontext *to_c
+ return container_of(c, struct c4iw_ucontext, ibucontext);
+ }
+
+-void _c4iw_free_ucontext(struct kref *kref);
+-
+-static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
+-{
+- kref_put(&ucontext->kref, _c4iw_free_ucontext);
+-}
+-
+-static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
+-{
+- kref_get(&ucontext->kref);
+-}
+-
+ struct c4iw_mm_entry {
+ struct list_head entry;
+ u64 addr;
+--- a/drivers/infiniband/hw/cxgb4/provider.c
++++ b/drivers/infiniband/hw/cxgb4/provider.c
+@@ -58,28 +58,19 @@ static int fastreg_support = 1;
+ module_param(fastreg_support, int, 0644);
+ MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
+
+-void _c4iw_free_ucontext(struct kref *kref)
++static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+ {
+- struct c4iw_ucontext *ucontext;
++ struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+ struct c4iw_dev *rhp;
+ struct c4iw_mm_entry *mm, *tmp;
+
+- ucontext = container_of(kref, struct c4iw_ucontext, kref);
++ pr_debug("context %p\n", context);
+ rhp = to_c4iw_dev(ucontext->ibucontext.device);
+
+- pr_debug("ucontext %p\n", ucontext);
+ list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
+ kfree(mm);
+ c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
+ kfree(ucontext);
+-}
+-
+-static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+-{
+- struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+-
+- pr_debug("context %p\n", context);
+- c4iw_put_ucontext(ucontext);
+ return 0;
+ }
+
+@@ -102,7 +93,6 @@ static struct ib_ucontext *c4iw_alloc_uc
+ c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
+ INIT_LIST_HEAD(&context->mmaps);
+ spin_lock_init(&context->mmap_lock);
+- kref_init(&context->kref);
+
+ if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
+ pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n");
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -903,8 +903,6 @@ static void free_qp_work(struct work_str
+ destroy_qp(&rhp->rdev, &qhp->wq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
+
+- if (ucontext)
+- c4iw_put_ucontext(ucontext);
+ c4iw_put_wr_wait(qhp->wr_waitp);
+ kfree(qhp);
+ }
+@@ -2338,7 +2336,6 @@ struct ib_qp *c4iw_create_qp(struct ib_p
+ insert_mmap(ucontext, ma_sync_key_mm);
+ }
+
+- c4iw_get_ucontext(ucontext);
+ qhp->ucontext = ucontext;
+ }
+ if (!attrs->srq) {
diff --git a/patches.drivers/RDMA-hns-Add-SCC-context-allocation-support-for-hip0.patch b/patches.drivers/RDMA-hns-Add-SCC-context-allocation-support-for-hip0.patch
new file mode 100644
index 0000000000..182a71dcf2
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Add-SCC-context-allocation-support-for-hip0.patch
@@ -0,0 +1,447 @@
+From: Yangyang Li <liyangyang20@huawei.com>
+Date: Tue, 18 Dec 2018 21:21:53 +0800
+Subject: RDMA/hns: Add SCC context allocation support for hip08
+Patch-mainline: v5.1-rc1
+Git-commit: 6a157f7d1b14eb88d89fbd396cfea15ac4bded2d
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+This patch adds SCC context allocation and initialization support for
+DCQCN in kernel space driver.
+
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_cmd.h | 4 ++
+ drivers/infiniband/hw/hns/hns_roce_device.h | 6 +++
+ drivers/infiniband/hw/hns/hns_roce_hem.c | 26 ++++++++++++++-
+ drivers/infiniband/hw/hns/hns_roce_hem.h | 1
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 46 +++++++++++++++++++++++++++-
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 33 ++++++++++++++++++--
+ drivers/infiniband/hw/hns/hns_roce_main.c | 18 ++++++++++
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 20 +++++++++++-
+ 8 files changed, 146 insertions(+), 8 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
++++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
+@@ -98,6 +98,10 @@ enum {
+ HNS_ROCE_CMD_MODIFY_CEQC = 0x91,
+ HNS_ROCE_CMD_QUERY_CEQC = 0x92,
+ HNS_ROCE_CMD_DESTROY_CEQC = 0x93,
++
++ /* SCC CTX BT commands */
++ HNS_ROCE_CMD_READ_SCCC_BT0 = 0xa4,
++ HNS_ROCE_CMD_WRITE_SCCC_BT0 = 0xa5,
+ };
+
+ enum {
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -482,6 +482,7 @@ struct hns_roce_qp_table {
+ struct hns_roce_hem_table qp_table;
+ struct hns_roce_hem_table irrl_table;
+ struct hns_roce_hem_table trrl_table;
++ struct hns_roce_hem_table sccc_table;
+ };
+
+ struct hns_roce_cq_table {
+@@ -769,6 +770,7 @@ struct hns_roce_caps {
+ int irrl_entry_sz;
+ int trrl_entry_sz;
+ int cqc_entry_sz;
++ int sccc_entry_sz;
+ int srqc_entry_sz;
+ int idx_entry_sz;
+ u32 pbl_ba_pg_sz;
+@@ -781,6 +783,7 @@ struct hns_roce_caps {
+ u32 srqc_bt_num;
+ u32 cqc_bt_num;
+ u32 mpt_bt_num;
++ u32 sccc_bt_num;
+ u32 qpc_ba_pg_sz;
+ u32 qpc_buf_pg_sz;
+ u32 qpc_hop_num;
+@@ -796,6 +799,9 @@ struct hns_roce_caps {
+ u32 mtt_ba_pg_sz;
+ u32 mtt_buf_pg_sz;
+ u32 mtt_hop_num;
++ u32 sccc_ba_pg_sz;
++ u32 sccc_buf_pg_sz;
++ u32 sccc_hop_num;
+ u32 cqe_ba_pg_sz;
+ u32 cqe_buf_pg_sz;
+ u32 cqe_hop_num;
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -45,6 +45,7 @@ bool hns_roce_check_whether_mhop(struct
+ (hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) ||
+ (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
+ (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
++ (hr_dev->caps.sccc_hop_num && type == HEM_TYPE_SCCC) ||
+ (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
+ (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) ||
+ (hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) ||
+@@ -125,6 +126,14 @@ int hns_roce_calc_hem_mhop(struct hns_ro
+ mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
+ mhop->hop_num = hr_dev->caps.cqc_hop_num;
+ break;
++ case HEM_TYPE_SCCC:
++ mhop->buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
++ + PAGE_SHIFT);
++ mhop->bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
++ + PAGE_SHIFT);
++ mhop->ba_l0_num = hr_dev->caps.sccc_bt_num;
++ mhop->hop_num = hr_dev->caps.sccc_hop_num;
++ break;
+ case HEM_TYPE_SRQC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ + PAGE_SHIFT);
+@@ -175,7 +184,7 @@ int hns_roce_calc_hem_mhop(struct hns_ro
+ return 0;
+
+ /*
+- * QPC/MTPT/CQC/SRQC alloc hem for buffer pages.
++ * QPC/MTPT/CQC/SRQC/SCCC alloc hem for buffer pages.
+ * MTT/CQE alloc hem for bt pages.
+ */
+ bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
+@@ -486,7 +495,7 @@ static int hns_roce_table_mhop_get(struc
+ }
+
+ /*
+- * alloc buffer space chunk for QPC/MTPT/CQC/SRQC.
++ * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
+ * alloc bt space chunk for MTT/CQE.
+ */
+ size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size;
+@@ -658,7 +667,7 @@ static void hns_roce_table_mhop_put(stru
+ }
+
+ /*
+- * free buffer space chunk for QPC/MTPT/CQC/SRQC.
++ * free buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
+ * free bt space chunk for MTT/CQE.
+ */
+ hns_roce_free_hem(hr_dev, table->hem[hem_idx]);
+@@ -904,6 +913,14 @@ int hns_roce_init_hem_table(struct hns_r
+ num_bt_l0 = hr_dev->caps.cqc_bt_num;
+ hop_num = hr_dev->caps.cqc_hop_num;
+ break;
++ case HEM_TYPE_SCCC:
++ buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
++ + PAGE_SHIFT);
++ bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
++ + PAGE_SHIFT);
++ num_bt_l0 = hr_dev->caps.sccc_bt_num;
++ hop_num = hr_dev->caps.sccc_hop_num;
++ break;
+ case HEM_TYPE_SRQC:
+ buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ + PAGE_SHIFT);
+@@ -1081,6 +1098,9 @@ void hns_roce_cleanup_hem(struct hns_roc
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->srq_table.table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
++ if (hr_dev->caps.sccc_entry_sz)
++ hns_roce_cleanup_hem_table(hr_dev,
++ &hr_dev->qp_table.sccc_table);
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qp_table.trrl_table);
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
+@@ -44,6 +44,7 @@ enum {
+ HEM_TYPE_MTPT,
+ HEM_TYPE_CQC,
+ HEM_TYPE_SRQC,
++ HEM_TYPE_SCCC,
+
+ /* UNMAP HEM */
+ HEM_TYPE_MTT,
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1078,6 +1078,9 @@ static int hns_roce_query_pf_resource(st
+ hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
+ PF_RES_DATA_3_PF_SL_NUM_M,
+ PF_RES_DATA_3_PF_SL_NUM_S);
++ hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
++ PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
++ PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
+
+ return 0;
+ }
+@@ -1193,6 +1196,14 @@ static int hns_roce_alloc_vf_resource(st
+ VF_RES_B_DATA_3_VF_SL_NUM_M,
+ VF_RES_B_DATA_3_VF_SL_NUM_S,
+ HNS_ROCE_VF_SL_NUM);
++
++ roce_set_field(req_b->vf_sccc_idx_num,
++ VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
++ VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
++ roce_set_field(req_b->vf_sccc_idx_num,
++ VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
++ VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
++ HNS_ROCE_VF_SCCC_BT_NUM);
+ }
+ }
+
+@@ -1205,6 +1216,7 @@ static int hns_roce_v2_set_bt(struct hns
+ u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
+ u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
+ u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
++ u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
+ struct hns_roce_cfg_bt_attr *req;
+ struct hns_roce_cmq_desc desc;
+
+@@ -1252,6 +1264,20 @@ static int hns_roce_v2_set_bt(struct hns
+ CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
+ mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
+
++ roce_set_field(req->vf_sccc_cfg,
++ CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
++ CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
++ hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
++ roce_set_field(req->vf_sccc_cfg,
++ CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
++ CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
++ hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
++ roce_set_field(req->vf_sccc_cfg,
++ CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
++ CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
++ sccc_hop_num ==
++ HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
++
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+ }
+
+@@ -1408,9 +1434,14 @@ static int hns_roce_v2_profile(struct hn
+ caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
+ caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
+
+- if (hr_dev->pci_dev->revision == 0x21)
++ if (hr_dev->pci_dev->revision == 0x21) {
+ caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC |
+ HNS_ROCE_CAP_FLAG_SRQ;
++ caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
++ caps->sccc_ba_pg_sz = 0;
++ caps->sccc_buf_pg_sz = 0;
++ caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
++ }
+
+ ret = hns_roce_v2_set_bt(hr_dev);
+ if (ret)
+@@ -2663,11 +2694,18 @@ static int hns_roce_v2_set_hem(struct hn
+ case HEM_TYPE_SRQC:
+ op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
+ break;
++ case HEM_TYPE_SCCC:
++ op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
++ break;
+ default:
+ dev_warn(dev, "Table %d not to be written by mailbox!\n",
+ table->type);
+ return 0;
+ }
++
++ if (table->type == HEM_TYPE_SCCC && step_idx)
++ return 0;
++
+ op += step_idx;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+@@ -2722,6 +2760,8 @@ static int hns_roce_v2_clear_hem(struct
+ case HEM_TYPE_CQC:
+ op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
+ break;
++ case HEM_TYPE_SCCC:
++ break;
+ case HEM_TYPE_SRQC:
+ op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
+ break;
+@@ -2730,6 +2770,10 @@ static int hns_roce_v2_clear_hem(struct
+ table->type);
+ return 0;
+ }
++
++ if (table->type == HEM_TYPE_SCCC)
++ return 0;
++
+ op += step_idx;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -36,6 +36,7 @@
+ #include <linux/bitops.h>
+
+ #define HNS_ROCE_VF_QPC_BT_NUM 256
++#define HNS_ROCE_VF_SCCC_BT_NUM 64
+ #define HNS_ROCE_VF_SRQC_BT_NUM 64
+ #define HNS_ROCE_VF_CQC_BT_NUM 64
+ #define HNS_ROCE_VF_MPT_BT_NUM 64
+@@ -83,6 +84,7 @@
+ #define HNS_ROCE_V2_MTPT_ENTRY_SZ 64
+ #define HNS_ROCE_V2_MTT_ENTRY_SZ 64
+ #define HNS_ROCE_V2_CQE_ENTRY_SIZE 32
++#define HNS_ROCE_V2_SCCC_ENTRY_SZ 32
+ #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
+ #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
+ #define HNS_ROCE_INVALID_LKEY 0x100
+@@ -91,6 +93,7 @@
+ #define HNS_ROCE_V2_RSV_QPS 8
+
+ #define HNS_ROCE_CONTEXT_HOP_NUM 1
++#define HNS_ROCE_SCCC_HOP_NUM 1
+ #define HNS_ROCE_MTT_HOP_NUM 1
+ #define HNS_ROCE_CQE_HOP_NUM 1
+ #define HNS_ROCE_SRQWQE_HOP_NUM 1
+@@ -1300,7 +1303,8 @@ struct hns_roce_pf_res_b {
+ __le32 smac_idx_num;
+ __le32 sgid_idx_num;
+ __le32 qid_idx_sl_num;
+- __le32 rsv[2];
++ __le32 sccc_bt_idx_num;
++ __le32 rsv;
+ };
+
+ #define PF_RES_DATA_1_PF_SMAC_IDX_S 0
+@@ -1321,6 +1325,12 @@ struct hns_roce_pf_res_b {
+ #define PF_RES_DATA_3_PF_SL_NUM_S 16
+ #define PF_RES_DATA_3_PF_SL_NUM_M GENMASK(26, 16)
+
++#define PF_RES_DATA_4_PF_SCCC_BT_IDX_S 0
++#define PF_RES_DATA_4_PF_SCCC_BT_IDX_M GENMASK(8, 0)
++
++#define PF_RES_DATA_4_PF_SCCC_BT_NUM_S 9
++#define PF_RES_DATA_4_PF_SCCC_BT_NUM_M GENMASK(17, 9)
++
+ struct hns_roce_vf_res_a {
+ __le32 vf_id;
+ __le32 vf_qpc_bt_idx_num;
+@@ -1365,7 +1375,8 @@ struct hns_roce_vf_res_b {
+ __le32 vf_smac_idx_num;
+ __le32 vf_sgid_idx_num;
+ __le32 vf_qid_idx_sl_num;
+- __le32 rsv[2];
++ __le32 vf_sccc_idx_num;
++ __le32 rsv1;
+ };
+
+ #define VF_RES_B_DATA_0_VF_ID_S 0
+@@ -1389,6 +1400,12 @@ struct hns_roce_vf_res_b {
+ #define VF_RES_B_DATA_3_VF_SL_NUM_S 16
+ #define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16)
+
++#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S 0
++#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M GENMASK(8, 0)
++
++#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S 9
++#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M GENMASK(17, 9)
++
+ struct hns_roce_vf_switch {
+ __le32 rocee_sel;
+ __le32 fun_id;
+@@ -1424,7 +1441,8 @@ struct hns_roce_cfg_bt_attr {
+ __le32 vf_srqc_cfg;
+ __le32 vf_cqc_cfg;
+ __le32 vf_mpt_cfg;
+- __le32 rsv[2];
++ __le32 vf_sccc_cfg;
++ __le32 rsv;
+ };
+
+ #define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S 0
+@@ -1463,6 +1481,15 @@ struct hns_roce_cfg_bt_attr {
+ #define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S 8
+ #define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8)
+
++#define CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S 0
++#define CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M GENMASK(3, 0)
++
++#define CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S 4
++#define CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M GENMASK(7, 4)
++
++#define CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S 8
++#define CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M GENMASK(9, 8)
++
+ struct hns_roce_cfg_sgid_tb {
+ __le32 table_idx_rsv;
+ __le32 vf_sgid_l;
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -704,8 +704,26 @@ static int hns_roce_init_hem(struct hns_
+ }
+ }
+
++ if (hr_dev->caps.sccc_entry_sz) {
++ ret = hns_roce_init_hem_table(hr_dev,
++ &hr_dev->qp_table.sccc_table,
++ HEM_TYPE_SCCC,
++ hr_dev->caps.sccc_entry_sz,
++ hr_dev->caps.num_qps, 1);
++ if (ret) {
++ dev_err(dev,
++ "Failed to init SCC context memory, aborting.\n");
++ goto err_unmap_idx;
++ }
++ }
++
+ return 0;
+
++err_unmap_idx:
++ if (hr_dev->caps.num_idx_segs)
++ hns_roce_cleanup_hem_table(hr_dev,
++ &hr_dev->mr_table.mtt_idx_table);
++
+ err_unmap_srqwqe:
+ if (hr_dev->caps.num_srqwqe_segs)
+ hns_roce_cleanup_hem_table(hr_dev,
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -209,13 +209,23 @@ static int hns_roce_qp_alloc(struct hns_
+ }
+ }
+
++ if (hr_dev->caps.sccc_entry_sz) {
++ /* Alloc memory for SCC CTX */
++ ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
++ hr_qp->qpn);
++ if (ret) {
++ dev_err(dev, "SCC CTX table get failed\n");
++ goto err_put_trrl;
++ }
++ }
++
+ spin_lock_irq(&qp_table->lock);
+ ret = radix_tree_insert(&hr_dev->qp_table_tree,
+ hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
+ spin_unlock_irq(&qp_table->lock);
+ if (ret) {
+ dev_err(dev, "QPC radix_tree_insert failed\n");
+- goto err_put_trrl;
++ goto err_put_sccc;
+ }
+
+ atomic_set(&hr_qp->refcount, 1);
+@@ -223,6 +233,11 @@ static int hns_roce_qp_alloc(struct hns_
+
+ return 0;
+
++err_put_sccc:
++ if (hr_dev->caps.sccc_entry_sz)
++ hns_roce_table_put(hr_dev, &qp_table->sccc_table,
++ hr_qp->qpn);
++
+ err_put_trrl:
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
+@@ -258,6 +273,9 @@ void hns_roce_qp_free(struct hns_roce_de
+ wait_for_completion(&hr_qp->free);
+
+ if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
++ if (hr_dev->caps.sccc_entry_sz)
++ hns_roce_table_put(hr_dev, &qp_table->sccc_table,
++ hr_qp->qpn);
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_table_put(hr_dev, &qp_table->trrl_table,
+ hr_qp->qpn);
diff --git a/patches.drivers/RDMA-hns-Add-SCC-context-clr-support-for-hip08.patch b/patches.drivers/RDMA-hns-Add-SCC-context-clr-support-for-hip08.patch
new file mode 100644
index 0000000000..23cd338997
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Add-SCC-context-clr-support-for-hip08.patch
@@ -0,0 +1,189 @@
+From: Yangyang Li <liyangyang20@huawei.com>
+Date: Tue, 18 Dec 2018 21:21:54 +0800
+Subject: RDMA/hns: Add SCC context clr support for hip08
+Patch-mainline: v5.1-rc1
+Git-commit: aa84fa18741b83daf0f8f160c46ae92f4d6f1343
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+This patch adds SCC context clear support for DCQCN in kernel space
+driver.
+
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_device.h | 4 +
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 59 +++++++++++++++++++++++++++-
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 15 +++++++
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 8 +++
+ 4 files changed, 85 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -202,6 +202,7 @@ enum {
+ HNS_ROCE_CAP_FLAG_SRQ = BIT(5),
+ HNS_ROCE_CAP_FLAG_MW = BIT(7),
+ HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
++ HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
+ HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
+ };
+
+@@ -483,6 +484,7 @@ struct hns_roce_qp_table {
+ struct hns_roce_hem_table irrl_table;
+ struct hns_roce_hem_table trrl_table;
+ struct hns_roce_hem_table sccc_table;
++ struct mutex scc_mutex;
+ };
+
+ struct hns_roce_cq_table {
+@@ -868,6 +870,8 @@ struct hns_roce_hw {
+ int attr_mask, enum ib_qp_state cur_state,
+ enum ib_qp_state new_state);
+ int (*destroy_qp)(struct ib_qp *ibqp);
++ int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
++ struct hns_roce_qp *hr_qp);
+ int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+ int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1436,7 +1436,9 @@ static int hns_roce_v2_profile(struct hn
+
+ if (hr_dev->pci_dev->revision == 0x21) {
+ caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC |
+- HNS_ROCE_CAP_FLAG_SRQ;
++ HNS_ROCE_CAP_FLAG_SRQ |
++ HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
++
+ caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
+ caps->sccc_ba_pg_sz = 0;
+ caps->sccc_buf_pg_sz = 0;
+@@ -4277,6 +4279,60 @@ static int hns_roce_v2_destroy_qp(struct
+ return 0;
+ }
+
++static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
++ struct hns_roce_qp *hr_qp)
++{
++ struct hns_roce_sccc_clr_done *rst, *resp;
++ struct hns_roce_sccc_clr *clr;
++ struct hns_roce_cmq_desc desc;
++ int ret, i;
++
++ mutex_lock(&hr_dev->qp_table.scc_mutex);
++
++ /* set scc ctx clear done flag */
++ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
++ rst = (struct hns_roce_sccc_clr_done *)desc.data;
++ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
++ if (ret) {
++ dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret);
++ goto out;
++ }
++
++ /* clear scc context */
++ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
++ clr = (struct hns_roce_sccc_clr *)desc.data;
++ clr->qpn = cpu_to_le32(hr_qp->qpn);
++ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
++ if (ret) {
++ dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret);
++ goto out;
++ }
++
++ /* query scc context clear is done or not */
++ resp = (struct hns_roce_sccc_clr_done *)desc.data;
++ for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
++ hns_roce_cmq_setup_basic_desc(&desc,
++ HNS_ROCE_OPC_QUERY_SCCC, true);
++ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
++ if (ret) {
++ dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret);
++ goto out;
++ }
++
++ if (resp->clr_done)
++ goto out;
++
++ msleep(20);
++ }
++
++ dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n");
++ ret = -ETIMEDOUT;
++
++out:
++ mutex_unlock(&hr_dev->qp_table.scc_mutex);
++ return ret;
++}
++
+ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+ {
+ struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
+@@ -5819,6 +5875,7 @@ static const struct hns_roce_hw hns_roce
+ .modify_qp = hns_roce_v2_modify_qp,
+ .query_qp = hns_roce_v2_query_qp,
+ .destroy_qp = hns_roce_v2_destroy_qp,
++ .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
+ .modify_cq = hns_roce_v2_modify_cq,
+ .post_send = hns_roce_v2_post_send,
+ .post_recv = hns_roce_v2_post_recv,
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -123,6 +123,8 @@
+ #define HNS_ROCE_CMQ_EN_B 16
+ #define HNS_ROCE_CMQ_ENABLE BIT(HNS_ROCE_CMQ_EN_B)
+
++#define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT 5
++
+ #define check_whether_last_step(hop_num, step_idx) \
+ ((step_idx == 0 && hop_num == HNS_ROCE_HOP_NUM_0) || \
+ (step_idx == 1 && hop_num == 1) || \
+@@ -232,6 +234,9 @@ enum hns_roce_opcode_type {
+ HNS_ROCE_OPC_POST_MB = 0x8504,
+ HNS_ROCE_OPC_QUERY_MB_ST = 0x8505,
+ HNS_ROCE_OPC_CFG_BT_ATTR = 0x8506,
++ HNS_ROCE_OPC_CLR_SCCC = 0x8509,
++ HNS_ROCE_OPC_QUERY_SCCC = 0x850a,
++ HNS_ROCE_OPC_RESET_SCCC = 0x850b,
+ HNS_SWITCH_PARAMETER_CFG = 0x1033,
+ };
+
+@@ -1757,4 +1762,14 @@ struct hns_roce_wqe_atomic_seg {
+ __le64 cmp_data;
+ };
+
++struct hns_roce_sccc_clr {
++ __le32 qpn;
++ __le32 rsv[5];
++};
++
++struct hns_roce_sccc_clr_done {
++ __le32 clr_done;
++ __le32 rsv[5];
++};
++
+ #endif
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -812,6 +812,13 @@ static int hns_roce_create_qp_common(str
+ if (ret)
+ goto err_qp;
+ }
++
++ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
++ ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
++ if (ret)
++ goto err_qp;
++ }
++
+ hr_qp->event = hns_roce_ib_qp_event;
+
+ return 0;
+@@ -1153,6 +1160,7 @@ int hns_roce_init_qp_table(struct hns_ro
+ int reserved_from_bot;
+ int ret;
+
++ mutex_init(&qp_table->scc_mutex);
+ spin_lock_init(&qp_table->lock);
+ INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
+
diff --git a/patches.drivers/RDMA-hns-Add-constraint-on-the-setting-of-local-ACK-.patch b/patches.drivers/RDMA-hns-Add-constraint-on-the-setting-of-local-ACK-.patch
new file mode 100644
index 0000000000..b2f6bc67ef
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Add-constraint-on-the-setting-of-local-ACK-.patch
@@ -0,0 +1,42 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Wed, 12 Dec 2018 17:49:08 +0800
+Subject: RDMA/hns: Add constraint on the setting of local ACK timeout
+Patch-mainline: v5.1-rc1
+Git-commit: 44754b95dd35ee07c462b5425ae9c4cde8c7e7c8
+References: bsc#1104427 FATE#326416 bsc#1137233
+
+According to IB protocol, local ACK timeout shall be a 5 bit
+value. Currently, hip08 could not support the possible max value 31. Fail
+the request in this case.
+
+Signed-off-by: Yixian Liu <liuyixian@huawei.com>
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3686,10 +3686,16 @@ static int modify_qp_rtr_to_rts(struct i
+ V2_QPC_BYTE_212_LSN_S, 0);
+
+ if (attr_mask & IB_QP_TIMEOUT) {
+- roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
+- V2_QPC_BYTE_28_AT_S, attr->timeout);
+- roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
+- V2_QPC_BYTE_28_AT_S, 0);
++ if (attr->timeout < 31) {
++ roce_set_field(context->byte_28_at_fl,
++ V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
++ attr->timeout);
++ roce_set_field(qpc_mask->byte_28_at_fl,
++ V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
++ 0);
++ } else {
++ dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
++ }
+ }
+
+ roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
diff --git a/patches.drivers/RDMA-hns-Add-the-process-of-AEQ-overflow-for-hip08.patch b/patches.drivers/RDMA-hns-Add-the-process-of-AEQ-overflow-for-hip08.patch
new file mode 100644
index 0000000000..0f7e6c080e
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Add-the-process-of-AEQ-overflow-for-hip08.patch
@@ -0,0 +1,47 @@
+From: Xiaofei Tan <tanxiaofei@huawei.com>
+Date: Sat, 19 Jan 2019 14:23:29 +0800
+Subject: RDMA/hns: Add the process of AEQ overflow for hip08
+Patch-mainline: v5.1-rc1
+Git-commit: 2b9acb9a97fe9b4101ca020643760c4a090b4cb4
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+AEQ overflow will be reported by hardware when too many asynchronous
+events occurred but not be handled in time. Normally, AEQ overflow error
+is not easy to occur. Once happened, we have to do physical function reset
+to recover. PF reset is implemented in two steps. Firstly, set reset
+level with ae_dev->ops->set_default_reset_request. Secondly, run reset
+with ae_dev->ops->reset_event.
+
+Signed-off-by: Xiaofei Tan <tanxiaofei@huawei.com>
+Signed-off-by: Yixian Liu <liuyixian@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -4702,11 +4702,22 @@ static irqreturn_t hns_roce_v2_msix_inte
+ int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
+
+ if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
++ struct pci_dev *pdev = hr_dev->pci_dev;
++ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
++ const struct hnae3_ae_ops *ops = ae_dev->ops;
++
+ dev_err(dev, "AEQ overflow!\n");
+
+ roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+
++ /* Set reset level for reset_event() */
++ if (ops->set_default_reset_request)
++ ops->set_default_reset_request(ae_dev,
++ HNAE3_FUNC_RESET);
++ if (ops->reset_event)
++ ops->reset_event(pdev, NULL);
++
+ roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+
diff --git a/patches.drivers/RDMA-hns-Add-timer-allocation-support-for-hip08.patch b/patches.drivers/RDMA-hns-Add-timer-allocation-support-for-hip08.patch
new file mode 100644
index 0000000000..67166ad771
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Add-timer-allocation-support-for-hip08.patch
@@ -0,0 +1,466 @@
+From: Yangyang Li <liyangyang20@huawei.com>
+Date: Tue, 18 Dec 2018 21:21:55 +0800
+Subject: RDMA/hns: Add timer allocation support for hip08
+Patch-mainline: v5.1-rc1
+Git-commit: 0e40dc2f70cda099e13392a26bd37aed24bcd25d
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+This patch adds qpc timer and cqc timer allocation support for hardware
+timeout retransmission in kernel space driver.
+
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_cmd.h | 8 ++
+ drivers/infiniband/hw/hns/hns_roce_device.h | 14 +++
+ drivers/infiniband/hw/hns/hns_roce_hem.c | 42 +++++++++++
+ drivers/infiniband/hw/hns/hns_roce_hem.h | 2
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 103 +++++++++++++++++++++++++++-
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 24 ++++++
+ drivers/infiniband/hw/hns/hns_roce_main.c | 36 +++++++++
+ 7 files changed, 227 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
++++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
+@@ -75,6 +75,10 @@ enum {
+ HNS_ROCE_CMD_DESTROY_MPT_BT1 = 0x29,
+ HNS_ROCE_CMD_DESTROY_MPT_BT2 = 0x2a,
+
++ /* CQC TIMER commands */
++ HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 = 0x23,
++ HNS_ROCE_CMD_READ_CQC_TIMER_BT0 = 0x27,
++
+ /* MPT commands */
+ HNS_ROCE_CMD_QUERY_MPT = 0x62,
+
+@@ -89,6 +93,10 @@ enum {
+ HNS_ROCE_CMD_DESTROY_SRQC_BT1 = 0x39,
+ HNS_ROCE_CMD_DESTROY_SRQC_BT2 = 0x3a,
+
++ /* QPC TIMER commands */
++ HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 = 0x33,
++ HNS_ROCE_CMD_READ_QPC_TIMER_BT0 = 0x37,
++
+ /* EQC commands */
+ HNS_ROCE_CMD_CREATE_AEQC = 0x80,
+ HNS_ROCE_CMD_MODIFY_AEQC = 0x81,
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -733,6 +733,8 @@ struct hns_roce_caps {
+ u32 max_extend_sg;
+ int num_qps; /* 256k */
+ int reserved_qps;
++ int num_qpc_timer;
++ int num_cqc_timer;
+ u32 max_srq_sg;
+ int num_srqs;
+ u32 max_wqes; /* 16k */
+@@ -773,6 +775,8 @@ struct hns_roce_caps {
+ int trrl_entry_sz;
+ int cqc_entry_sz;
+ int sccc_entry_sz;
++ int qpc_timer_entry_sz;
++ int cqc_timer_entry_sz;
+ int srqc_entry_sz;
+ int idx_entry_sz;
+ u32 pbl_ba_pg_sz;
+@@ -782,8 +786,10 @@ struct hns_roce_caps {
+ int ceqe_depth;
+ enum ib_mtu max_mtu;
+ u32 qpc_bt_num;
++ u32 qpc_timer_bt_num;
+ u32 srqc_bt_num;
+ u32 cqc_bt_num;
++ u32 cqc_timer_bt_num;
+ u32 mpt_bt_num;
+ u32 sccc_bt_num;
+ u32 qpc_ba_pg_sz;
+@@ -804,6 +810,12 @@ struct hns_roce_caps {
+ u32 sccc_ba_pg_sz;
+ u32 sccc_buf_pg_sz;
+ u32 sccc_hop_num;
++ u32 qpc_timer_ba_pg_sz;
++ u32 qpc_timer_buf_pg_sz;
++ u32 qpc_timer_hop_num;
++ u32 cqc_timer_ba_pg_sz;
++ u32 cqc_timer_buf_pg_sz;
++ u32 cqc_timer_hop_num;
+ u32 cqe_ba_pg_sz;
+ u32 cqe_buf_pg_sz;
+ u32 cqe_hop_num;
+@@ -931,6 +943,8 @@ struct hns_roce_dev {
+ struct hns_roce_srq_table srq_table;
+ struct hns_roce_qp_table qp_table;
+ struct hns_roce_eq_table eq_table;
++ struct hns_roce_hem_table qpc_timer_table;
++ struct hns_roce_hem_table cqc_timer_table;
+
+ int cmd_mod;
+ int loop_idc;
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -46,6 +46,8 @@ bool hns_roce_check_whether_mhop(struct
+ (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
+ (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
+ (hr_dev->caps.sccc_hop_num && type == HEM_TYPE_SCCC) ||
++ (hr_dev->caps.qpc_timer_hop_num && type == HEM_TYPE_QPC_TIMER) ||
++ (hr_dev->caps.cqc_timer_hop_num && type == HEM_TYPE_CQC_TIMER) ||
+ (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
+ (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) ||
+ (hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) ||
+@@ -134,6 +136,22 @@ int hns_roce_calc_hem_mhop(struct hns_ro
+ mhop->ba_l0_num = hr_dev->caps.sccc_bt_num;
+ mhop->hop_num = hr_dev->caps.sccc_hop_num;
+ break;
++ case HEM_TYPE_QPC_TIMER:
++ mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
++ + PAGE_SHIFT);
++ mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
++ + PAGE_SHIFT);
++ mhop->ba_l0_num = hr_dev->caps.qpc_timer_bt_num;
++ mhop->hop_num = hr_dev->caps.qpc_timer_hop_num;
++ break;
++ case HEM_TYPE_CQC_TIMER:
++ mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
++ + PAGE_SHIFT);
++ mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
++ + PAGE_SHIFT);
++ mhop->ba_l0_num = hr_dev->caps.cqc_timer_bt_num;
++ mhop->hop_num = hr_dev->caps.cqc_timer_hop_num;
++ break;
+ case HEM_TYPE_SRQC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ + PAGE_SHIFT);
+@@ -602,6 +620,7 @@ out:
+ mutex_unlock(&table->mutex);
+ return ret;
+ }
++EXPORT_SYMBOL_GPL(hns_roce_table_get);
+
+ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+@@ -744,6 +763,7 @@ void hns_roce_table_put(struct hns_roce_
+
+ mutex_unlock(&table->mutex);
+ }
++EXPORT_SYMBOL_GPL(hns_roce_table_put);
+
+ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+@@ -921,6 +941,22 @@ int hns_roce_init_hem_table(struct hns_r
+ num_bt_l0 = hr_dev->caps.sccc_bt_num;
+ hop_num = hr_dev->caps.sccc_hop_num;
+ break;
++ case HEM_TYPE_QPC_TIMER:
++ buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
++ + PAGE_SHIFT);
++ bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
++ + PAGE_SHIFT);
++ num_bt_l0 = hr_dev->caps.qpc_timer_bt_num;
++ hop_num = hr_dev->caps.qpc_timer_hop_num;
++ break;
++ case HEM_TYPE_CQC_TIMER:
++ buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
++ + PAGE_SHIFT);
++ bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
++ + PAGE_SHIFT);
++ num_bt_l0 = hr_dev->caps.cqc_timer_bt_num;
++ hop_num = hr_dev->caps.cqc_timer_hop_num;
++ break;
+ case HEM_TYPE_SRQC:
+ buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ + PAGE_SHIFT);
+@@ -1098,6 +1134,12 @@ void hns_roce_cleanup_hem(struct hns_roc
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->srq_table.table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
++ if (hr_dev->caps.qpc_timer_entry_sz)
++ hns_roce_cleanup_hem_table(hr_dev,
++ &hr_dev->qpc_timer_table);
++ if (hr_dev->caps.cqc_timer_entry_sz)
++ hns_roce_cleanup_hem_table(hr_dev,
++ &hr_dev->cqc_timer_table);
+ if (hr_dev->caps.sccc_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qp_table.sccc_table);
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
+@@ -45,6 +45,8 @@ enum {
+ HEM_TYPE_CQC,
+ HEM_TYPE_SRQC,
+ HEM_TYPE_SCCC,
++ HEM_TYPE_QPC_TIMER,
++ HEM_TYPE_CQC_TIMER,
+
+ /* UNMAP HEM */
+ HEM_TYPE_MTT,
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1085,6 +1085,41 @@ static int hns_roce_query_pf_resource(st
+ return 0;
+ }
+
++static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
++{
++ struct hns_roce_pf_timer_res_a *req_a;
++ struct hns_roce_cmq_desc desc[2];
++ int ret, i;
++
++ for (i = 0; i < 2; i++) {
++ hns_roce_cmq_setup_basic_desc(&desc[i],
++ HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
++ true);
++
++ if (i == 0)
++ desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
++ else
++ desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
++ }
++
++ ret = hns_roce_cmq_send(hr_dev, desc, 2);
++ if (ret)
++ return ret;
++
++ req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data;
++
++ hr_dev->caps.qpc_timer_bt_num =
++ roce_get_field(req_a->qpc_timer_bt_idx_num,
++ PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
++ PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
++ hr_dev->caps.cqc_timer_bt_num =
++ roce_get_field(req_a->cqc_timer_bt_idx_num,
++ PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
++ PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
++
++ return 0;
++}
++
+ static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
+ int vf_id)
+ {
+@@ -1315,6 +1350,16 @@ static int hns_roce_v2_profile(struct hn
+ return ret;
+ }
+
++ if (hr_dev->pci_dev->revision == 0x21) {
++ ret = hns_roce_query_pf_timer_resource(hr_dev);
++ if (ret) {
++ dev_err(hr_dev->dev,
++ "Query pf timer resource fail, ret = %d.\n",
++ ret);
++ return ret;
++ }
++ }
++
+ ret = hns_roce_alloc_vf_resource(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
+@@ -1439,6 +1484,17 @@ static int hns_roce_v2_profile(struct hn
+ HNS_ROCE_CAP_FLAG_SRQ |
+ HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
+
++ caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
++ caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
++ caps->qpc_timer_ba_pg_sz = 0;
++ caps->qpc_timer_buf_pg_sz = 0;
++ caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
++ caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
++ caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
++ caps->cqc_timer_ba_pg_sz = 0;
++ caps->cqc_timer_buf_pg_sz = 0;
++ caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
++
+ caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
+ caps->sccc_ba_pg_sz = 0;
+ caps->sccc_buf_pg_sz = 0;
+@@ -1644,7 +1700,8 @@ static void hns_roce_free_link_table(str
+ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
+ {
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+- int ret;
++ int qpc_count, cqc_count;
++ int ret, i;
+
+ /* TSQ includes SQ doorbell and ack doorbell */
+ ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
+@@ -1659,8 +1716,40 @@ static int hns_roce_v2_init(struct hns_r
+ goto err_tpq_init_failed;
+ }
+
++ /* Alloc memory for QPC Timer buffer space chunk*/
++ for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
++ qpc_count++) {
++ ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
++ qpc_count);
++ if (ret) {
++ dev_err(hr_dev->dev, "QPC Timer get failed\n");
++ goto err_qpc_timer_failed;
++ }
++ }
++
++ /* Alloc memory for CQC Timer buffer space chunk*/
++ for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
++ cqc_count++) {
++ ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
++ cqc_count);
++ if (ret) {
++ dev_err(hr_dev->dev, "CQC Timer get failed\n");
++ goto err_cqc_timer_failed;
++ }
++ }
++
+ return 0;
+
++err_cqc_timer_failed:
++ for (i = 0; i < cqc_count; i++)
++ hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
++
++err_qpc_timer_failed:
++ for (i = 0; i < qpc_count; i++)
++ hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
++
++ hns_roce_free_link_table(hr_dev, &priv->tpq);
++
+ err_tpq_init_failed:
+ hns_roce_free_link_table(hr_dev, &priv->tsq);
+
+@@ -2699,6 +2788,12 @@ static int hns_roce_v2_set_hem(struct hn
+ case HEM_TYPE_SCCC:
+ op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
+ break;
++ case HEM_TYPE_QPC_TIMER:
++ op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
++ break;
++ case HEM_TYPE_CQC_TIMER:
++ op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
++ break;
+ default:
+ dev_warn(dev, "Table %d not to be written by mailbox!\n",
+ table->type);
+@@ -2763,6 +2858,8 @@ static int hns_roce_v2_clear_hem(struct
+ op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
+ break;
+ case HEM_TYPE_SCCC:
++ case HEM_TYPE_QPC_TIMER:
++ case HEM_TYPE_CQC_TIMER:
+ break;
+ case HEM_TYPE_SRQC:
+ op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
+@@ -2773,7 +2870,9 @@ static int hns_roce_v2_clear_hem(struct
+ return 0;
+ }
+
+- if (table->type == HEM_TYPE_SCCC)
++ if (table->type == HEM_TYPE_SCCC ||
++ table->type == HEM_TYPE_QPC_TIMER ||
++ table->type == HEM_TYPE_CQC_TIMER)
+ return 0;
+
+ op += step_idx;
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -46,11 +46,13 @@
+ #define HNS_ROCE_VF_SL_NUM 8
+
+ #define HNS_ROCE_V2_MAX_QP_NUM 0x2000
++#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
+ #define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
+ #define HNS_ROCE_V2_MAX_SRQ 0x100000
+ #define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
+ #define HNS_ROCE_V2_MAX_SRQ_SGE 0x100
+ #define HNS_ROCE_V2_MAX_CQ_NUM 0x8000
++#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
+ #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
+ #define HNS_ROCE_V2_MAX_CQE_NUM 0x10000
+ #define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000
+@@ -85,6 +87,8 @@
+ #define HNS_ROCE_V2_MTT_ENTRY_SZ 64
+ #define HNS_ROCE_V2_CQE_ENTRY_SIZE 32
+ #define HNS_ROCE_V2_SCCC_ENTRY_SZ 32
++#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ 4096
++#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ 4096
+ #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
+ #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
+ #define HNS_ROCE_INVALID_LKEY 0x100
+@@ -229,6 +233,7 @@ enum hns_roce_opcode_type {
+ HNS_ROCE_OPC_ALLOC_VF_RES = 0x8401,
+ HNS_ROCE_OPC_CFG_EXT_LLM = 0x8403,
+ HNS_ROCE_OPC_CFG_TMOUT_LLM = 0x8404,
++ HNS_ROCE_OPC_QUERY_PF_TIMER_RES = 0x8406,
+ HNS_ROCE_OPC_CFG_SGID_TB = 0x8500,
+ HNS_ROCE_OPC_CFG_SMAC_TB = 0x8501,
+ HNS_ROCE_OPC_POST_MB = 0x8504,
+@@ -1336,6 +1341,25 @@ struct hns_roce_pf_res_b {
+ #define PF_RES_DATA_4_PF_SCCC_BT_NUM_S 9
+ #define PF_RES_DATA_4_PF_SCCC_BT_NUM_M GENMASK(17, 9)
+
++struct hns_roce_pf_timer_res_a {
++ __le32 rsv0;
++ __le32 qpc_timer_bt_idx_num;
++ __le32 cqc_timer_bt_idx_num;
++ __le32 rsv[3];
++};
++
++#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_S 0
++#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_M GENMASK(11, 0)
++
++#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S 16
++#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M GENMASK(28, 16)
++
++#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_S 0
++#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_M GENMASK(10, 0)
++
++#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S 16
++#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M GENMASK(27, 16)
++
+ struct hns_roce_vf_res_a {
+ __le32 vf_id;
+ __le32 vf_qpc_bt_idx_num;
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -717,8 +717,44 @@ static int hns_roce_init_hem(struct hns_
+ }
+ }
+
++ if (hr_dev->caps.qpc_timer_entry_sz) {
++ ret = hns_roce_init_hem_table(hr_dev,
++ &hr_dev->qpc_timer_table,
++ HEM_TYPE_QPC_TIMER,
++ hr_dev->caps.qpc_timer_entry_sz,
++ hr_dev->caps.num_qpc_timer, 1);
++ if (ret) {
++ dev_err(dev,
++ "Failed to init QPC timer memory, aborting.\n");
++ goto err_unmap_ctx;
++ }
++ }
++
++ if (hr_dev->caps.cqc_timer_entry_sz) {
++ ret = hns_roce_init_hem_table(hr_dev,
++ &hr_dev->cqc_timer_table,
++ HEM_TYPE_CQC_TIMER,
++ hr_dev->caps.cqc_timer_entry_sz,
++ hr_dev->caps.num_cqc_timer, 1);
++ if (ret) {
++ dev_err(dev,
++ "Failed to init CQC timer memory, aborting.\n");
++ goto err_unmap_qpc_timer;
++ }
++ }
++
+ return 0;
+
++err_unmap_qpc_timer:
++ if (hr_dev->caps.qpc_timer_entry_sz)
++ hns_roce_cleanup_hem_table(hr_dev,
++ &hr_dev->qpc_timer_table);
++
++err_unmap_ctx:
++ if (hr_dev->caps.sccc_entry_sz)
++ hns_roce_cleanup_hem_table(hr_dev,
++ &hr_dev->qp_table.sccc_table);
++
+ err_unmap_idx:
+ if (hr_dev->caps.num_idx_segs)
+ hns_roce_cleanup_hem_table(hr_dev,
diff --git a/patches.drivers/RDMA-hns-Bugfix-for-SCC-hem-free.patch b/patches.drivers/RDMA-hns-Bugfix-for-SCC-hem-free.patch
new file mode 100644
index 0000000000..9dad6cd70b
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Bugfix-for-SCC-hem-free.patch
@@ -0,0 +1,41 @@
+From: Yangyang Li <liyangyang20@huawei.com>
+Date: Sun, 7 Apr 2019 13:23:39 +0800
+Subject: RDMA/hns: Bugfix for SCC hem free
+Patch-mainline: v5.1-rc5
+Git-commit: 00fb67ec6b98114a887d9ef26fc7c3e566e7f665
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+The method of hem free for SCC context is different from qp context.
+
+In the current version, if free SCC hem during the execution of qp free,
+there may be smmu error as below:
+
+ arm-smmu-v3 arm-smmu-v3.1.auto: event 0x10 received:
+ arm-smmu-v3 arm-smmu-v3.1.auto: 0x00007d0000000010
+ arm-smmu-v3 arm-smmu-v3.1.auto: 0x000012000000017c
+ arm-smmu-v3 arm-smmu-v3.1.auto: 0x00000000000009e0
+ arm-smmu-v3 arm-smmu-v3.1.auto: 0x0000000000000000
+
+As SCC context is still used by hardware after qp free, we can solve this
+problem by removing SCC hem free from hns_roce_qp_free.
+
+Fixes: 6a157f7d1b14 ("RDMA/hns: Add SCC context allocation support for hip08")
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -273,9 +273,6 @@ void hns_roce_qp_free(struct hns_roce_de
+ wait_for_completion(&hr_qp->free);
+
+ if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
+- if (hr_dev->caps.sccc_entry_sz)
+- hns_roce_table_put(hr_dev, &qp_table->sccc_table,
+- hr_qp->qpn);
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_table_put(hr_dev, &qp_table->trrl_table,
+ hr_qp->qpn);
diff --git a/patches.drivers/RDMA-hns-Bugfix-for-mapping-user-db.patch b/patches.drivers/RDMA-hns-Bugfix-for-mapping-user-db.patch
new file mode 100644
index 0000000000..c6bc11523f
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Bugfix-for-mapping-user-db.patch
@@ -0,0 +1,41 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Tue, 23 Apr 2019 17:30:26 +0800
+Subject: RDMA/hns: Bugfix for mapping user db
+Patch-mainline: v5.1-rc7
+Git-commit: 2557fabd6e29f349bfa0ac13f38ac98aa5eafc74
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+When the maximum send wr delivered by the user is zero, the qp does not
+have a sq.
+
+When allocating the sq db buffer to store the user sq pi pointer and map
+it to the kernel mode, max_send_wr is used as the trigger condition, while
+the kernel does not consider the max_send_wr trigger condition when
+mapmping db. It will cause sq record doorbell map fail and create qp fail.
+
+The failed print information as follows:
+
+ hns3 0000:7d:00.1: Send cmd: tail - 418, opcode - 0x8504, flag - 0x0011, retval - 0x0000
+ hns3 0000:7d:00.1: Send cmd: 0xe59dc000 0x00000000 0x00000000 0x00000000 0x00000116 0x0000ffff
+ hns3 0000:7d:00.1: sq record doorbell map failed!
+ hns3 0000:7d:00.1: Create RC QP failed
+
+Fixes: 0425e3e6e0c7 ("RDMA/hns: Support flush cqe for hip08 in kernel space")
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -532,7 +532,7 @@ static int hns_roce_set_kernel_sq_size(s
+
+ static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
+ {
+- if (attr->qp_type == IB_QPT_XRC_TGT)
++ if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
+ return 0;
+
+ return 1;
diff --git a/patches.drivers/RDMA-hns-Bugfix-for-posting-multiple-srq-work-reques.patch b/patches.drivers/RDMA-hns-Bugfix-for-posting-multiple-srq-work-reques.patch
new file mode 100644
index 0000000000..8150c82df3
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Bugfix-for-posting-multiple-srq-work-reques.patch
@@ -0,0 +1,142 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Thu, 30 May 2019 23:55:53 +0800
+Subject: RDMA/hns: Bugfix for posting multiple srq work request
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
+Git-commit: 97545b10221ad14b046dba135a37f4e98a560697
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+When the user submits more than 32 work request to a srq queue
+at a time, it needs to find the corresponding number of entries
+in the bitmap in the idx queue. However, the original lookup
+function named ffs only processes 32 bits of the array element,
+When the number of srq wqe issued exceeds 32, the ffs will only
+process the lower 32 bits of the elements, it will not be able
+to get the correct wqe index for srq wqe.
+
+Signed-off-by: Xi Wang <wangxi11@huawei.com>
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_device.h | 2 -
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 34 ++++++++++++++--------------
+ drivers/infiniband/hw/hns/hns_roce_srq.c | 15 ++----------
+ 3 files changed, 22 insertions(+), 29 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -472,7 +472,7 @@ struct hns_roce_idx_que {
+ u32 buf_size;
+ struct ib_umem *umem;
+ struct hns_roce_mtt mtt;
+- u64 *bitmap;
++ unsigned long *bitmap;
+ };
+
+ struct hns_roce_srq {
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -2343,15 +2343,10 @@ static void *get_srq_wqe(struct hns_roce
+
+ static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
+ {
+- u32 bitmap_num;
+- int bit_num;
+-
+ /* always called with interrupts disabled. */
+ spin_lock(&srq->lock);
+
+- bitmap_num = wqe_index / (sizeof(u64) * 8);
+- bit_num = wqe_index % (sizeof(u64) * 8);
+- srq->idx_que.bitmap[bitmap_num] |= (1ULL << bit_num);
++ bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
+ srq->tail++;
+
+ spin_unlock(&srq->lock);
+@@ -5971,18 +5966,19 @@ out:
+ return ret;
+ }
+
+-static int find_empty_entry(struct hns_roce_idx_que *idx_que)
++static int find_empty_entry(struct hns_roce_idx_que *idx_que,
++ unsigned long size)
+ {
+- int bit_num;
+- int i;
++ int wqe_idx;
+
+- /* bitmap[i] is set zero if all bits are allocated */
+- for (i = 0; idx_que->bitmap[i] == 0; ++i)
+- ;
+- bit_num = ffs(idx_que->bitmap[i]);
+- idx_que->bitmap[i] &= ~(1ULL << (bit_num - 1));
++ if (unlikely(bitmap_full(idx_que->bitmap, size)))
++ return -ENOSPC;
++
++ wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
++
++ bitmap_set(idx_que->bitmap, wqe_idx, 1);
+
+- return i * sizeof(u64) * 8 + (bit_num - 1);
++ return wqe_idx;
+ }
+
+ static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
+@@ -6028,7 +6024,13 @@ static int hns_roce_v2_post_srq_recv(str
+ break;
+ }
+
+- wqe_idx = find_empty_entry(&srq->idx_que);
++ wqe_idx = find_empty_entry(&srq->idx_que, srq->max);
++ if (wqe_idx < 0) {
++ ret = -ENOMEM;
++ *bad_wr = wr;
++ break;
++ }
++
+ fill_idx_queue(&srq->idx_que, ind, wqe_idx);
+ wqe = get_srq_wqe(srq, wqe_idx);
+ dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
+--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
+@@ -188,28 +188,19 @@ static int hns_roce_create_idx_que(struc
+ {
+ struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
+- u32 bitmap_num;
+- int i;
+
+- bitmap_num = HNS_ROCE_ALOGN_UP(srq->max, 8 * sizeof(u64));
+-
+- idx_que->bitmap = kcalloc(1, bitmap_num / 8, GFP_KERNEL);
++ idx_que->bitmap = bitmap_zalloc(srq->max, GFP_KERNEL);
+ if (!idx_que->bitmap)
+ return -ENOMEM;
+
+- bitmap_num = bitmap_num / (8 * sizeof(u64));
+-
+ idx_que->buf_size = srq->idx_que.buf_size;
+
+ if (hns_roce_buf_alloc(hr_dev, idx_que->buf_size, (1 << page_shift) * 2,
+ &idx_que->idx_buf, page_shift)) {
+- kfree(idx_que->bitmap);
++ bitmap_free(idx_que->bitmap);
+ return -ENOMEM;
+ }
+
+- for (i = 0; i < bitmap_num; i++)
+- idx_que->bitmap[i] = ~(0UL);
+-
+ return 0;
+ }
+
+@@ -415,7 +406,7 @@ err_idx_mtt:
+ err_create_idx:
+ hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
+ &srq->idx_que.idx_buf);
+- kfree(srq->idx_que.bitmap);
++ bitmap_free(srq->idx_que.bitmap);
+
+ err_srq_mtt:
+ hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
diff --git a/patches.drivers/RDMA-hns-Bugfix-for-sending-with-invalidate.patch b/patches.drivers/RDMA-hns-Bugfix-for-sending-with-invalidate.patch
new file mode 100644
index 0000000000..0ab666db0e
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Bugfix-for-sending-with-invalidate.patch
@@ -0,0 +1,29 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:26 +0800
+Subject: RDMA/hns: Bugfix for sending with invalidate
+Patch-mainline: v5.2-rc1
+Git-commit: 82342e493b7e53f5e0d0698a48190f05e84d6690
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+According to IB protocol, the send with invalidate operation will not
+invalidate mr that was created through a register mr or reregister mr.
+
+Fixes: e93df0108579 ("RDMA/hns: Support local invalidate for hip08 in kernel space")
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -2155,7 +2155,7 @@ static int hns_roce_v2_write_mtpt(void *
+ V2_MPT_BYTE_4_PD_S, mr->pd);
+
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
+- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
++ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
+ (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
diff --git a/patches.drivers/RDMA-hns-Bugfix-for-set-hem-of-SCC.patch b/patches.drivers/RDMA-hns-Bugfix-for-set-hem-of-SCC.patch
new file mode 100644
index 0000000000..375f057966
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Bugfix-for-set-hem-of-SCC.patch
@@ -0,0 +1,60 @@
+From: Yangyang Li <liyangyang20@huawei.com>
+Date: Sat, 16 Feb 2019 20:10:25 +0800
+Subject: RDMA/hns: Bugfix for set hem of SCC
+Patch-mainline: v5.1-rc1
+Git-commit: 6ac16e403900a98f9b330daa5f0d89f76a24c6eb
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+The method of set hem for scc context is different from other contexts. It
+should notify the hardware with the detailed idx in bt0 for scc, while for
+other contexts, it only need to notify the bt step and the hardware will
+calculate the idx.
+
+Here fixes the following error when unloading the hip08 driver:
+
+[ 123.570768] {1}[Hardware Error]: Hardware error from APEI Generic Hardware Error Source: 0
+[ 123.579023] {1}[Hardware Error]: event severity: recoverable
+[ 123.584670] {1}[Hardware Error]: Error 0, type: recoverable
+[ 123.590317] {1}[Hardware Error]: section_type: PCIe error
+[ 123.595877] {1}[Hardware Error]: version: 4.0
+[ 123.600395] {1}[Hardware Error]: command: 0x0006, status: 0x0010
+[ 123.606562] {1}[Hardware Error]: device_id: 0000:7d:00.0
+[ 123.612034] {1}[Hardware Error]: slot: 0
+[ 123.616120] {1}[Hardware Error]: secondary_bus: 0x00
+[ 123.621245] {1}[Hardware Error]: vendor_id: 0x19e5, device_id: 0xa222
+[ 123.627847] {1}[Hardware Error]: class_code: 000002
+[ 123.632977] hns3 0000:7d:00.0: aer_status: 0x00000000, aer_mask: 0x00000000
+[ 123.639928] hns3 0000:7d:00.0: aer_layer=Transaction Layer, aer_agent=Receiver ID
+[ 123.647400] hns3 0000:7d:00.0: aer_uncor_severity: 0x00000000
+[ 123.653136] hns3 0000:7d:00.0: PCI error detected, state(=1)!!
+[ 123.658959] hns3 0000:7d:00.0: ROCEE uncorrected RAS error identified
+[ 123.665395] hns3 0000:7d:00.0: ROCEE RAS AXI rresp error
+[ 123.670713] hns3 0000:7d:00.0: requesting reset due to PCI error
+[ 123.676715] hns3 0000:7d:00.0: received reset event , reset type is 5
+[ 123.683147] hns3 0000:7d:00.0: AER: Device recovery successful
+[ 123.688978] hns3 0000:7d:00.0: PF Reset requested
+[ 123.693684] hns3 0000:7d:00.0: PF failed(=-5) to send mailbox message to VF
+[ 123.700633] hns3 0000:7d:00.0: inform reset to vf(1) failded -5!
+
+Fixes: 6a157f7d1b14 ("RDMA/hns: Add SCC context allocation support for hip08")
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Reviewed-by: Yixian Liu <liuyixian@huawei.com>
+Reviewed-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -2945,6 +2945,9 @@ static int hns_roce_v2_set_hem(struct hn
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
++ if (table->type == HEM_TYPE_SCCC)
++ obj = mhop.l0_idx;
++
+ if (check_whether_last_step(hop_num, step_idx)) {
+ hem = table->hem[hem_idx];
+ for (hns_roce_hem_first(hem, &iter);
diff --git a/patches.drivers/RDMA-hns-Bugfix-for-the-scene-without-receiver-queue.patch b/patches.drivers/RDMA-hns-Bugfix-for-the-scene-without-receiver-queue.patch
new file mode 100644
index 0000000000..3a6e75684f
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Bugfix-for-the-scene-without-receiver-queue.patch
@@ -0,0 +1,29 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Wed, 12 Dec 2018 17:49:07 +0800
+Subject: RDMA/hns: Bugfix for the scene without receiver queue
+Patch-mainline: v5.1-rc1
+Git-commit: 4d103905eb1e4f14cb62fcf962c9d35da7005dea
+References: bsc#1104427 FATE#326416 bsc#1137233
+
+In some application scenario, the user could not have receive queue when
+run rdma write or read operation.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -526,7 +526,8 @@ static int hns_roce_qp_has_sq(struct ib_
+ static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
+ {
+ if (attr->qp_type == IB_QPT_XRC_INI ||
+- attr->qp_type == IB_QPT_XRC_TGT || attr->srq)
++ attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
++ !attr->cap.max_recv_wr)
+ return 0;
+
+ return 1;
diff --git a/patches.drivers/RDMA-hns-Configure-capacity-of-hns-device.patch b/patches.drivers/RDMA-hns-Configure-capacity-of-hns-device.patch
new file mode 100644
index 0000000000..6f06739949
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Configure-capacity-of-hns-device.patch
@@ -0,0 +1,38 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sun, 3 Feb 2019 16:13:07 +0800
+Subject: RDMA/hns: Configure capacity of hns device
+Patch-mainline: v5.1-rc1
+Git-commit: dad1f9802ecee3a21143293b2505e1b57b1ae525
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+This patch adds new device capability for IB_DEVICE_MEM_MGT_EXTENSIONS to
+indicate device support for the following features:
+
+1. Fast register memory region.
+2. send with remote invalidate by frmr
+3. local invalidate memory regsion
+
+As well as adds the max depth of frmr page list len.
+
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_main.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -226,6 +226,11 @@ static int hns_roce_query_device(struct
+ props->max_srq_sge = hr_dev->caps.max_srq_sges;
+ }
+
++ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) {
++ props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
++ props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
++ }
++
+ return 0;
+ }
+
diff --git a/patches.drivers/RDMA-hns-Delete-unused-variable-in-hns_roce_v2_modif.patch b/patches.drivers/RDMA-hns-Delete-unused-variable-in-hns_roce_v2_modif.patch
new file mode 100644
index 0000000000..014a7d2d59
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Delete-unused-variable-in-hns_roce_v2_modif.patch
@@ -0,0 +1,34 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:28 +0800
+Subject: RDMA/hns: Delete unused variable in hns_roce_v2_modify_qp function
+Patch-mainline: v5.2-rc1
+Git-commit: d0a935563bc0f447abed7799388fa3f13099cc0d
+References: bsc#1104427 FATE#326416
+
+The src_mac array is not used in hns_roce_v2_modify_qp function.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3975,7 +3975,6 @@ static int hns_roce_v2_modify_qp(struct
+ const struct ib_global_route *grh =
+ rdma_ah_read_grh(&attr->ah_attr);
+ const struct ib_gid_attr *gid_attr = NULL;
+- u8 src_mac[ETH_ALEN];
+ int is_roce_protocol;
+ u16 vlan = 0xffff;
+ u8 ib_port;
+@@ -3990,7 +3989,6 @@ static int hns_roce_v2_modify_qp(struct
+ if (is_roce_protocol) {
+ gid_attr = attr->ah_attr.grh.sgid_attr;
+ vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev);
+- memcpy(src_mac, gid_attr->ndev->dev_addr, ETH_ALEN);
+ }
+
+ if (is_vlan_dev(gid_attr->ndev)) {
diff --git a/patches.drivers/RDMA-hns-Delete-useful-prints-for-aeq-subtype-event.patch b/patches.drivers/RDMA-hns-Delete-useful-prints-for-aeq-subtype-event.patch
new file mode 100644
index 0000000000..9288fac491
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Delete-useful-prints-for-aeq-subtype-event.patch
@@ -0,0 +1,91 @@
+From: Yixian Liu <liuyixian@huawei.com>
+Date: Sun, 3 Feb 2019 16:13:06 +0800
+Subject: RDMA/hns: Delete useful prints for aeq subtype event
+Patch-mainline: v5.1-rc1
+Git-commit: e95c716c7faa0d0eede5eabb6fea2504709e25b6
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+Current all messages printed for aeq subtype event are wrong. Thus,
+delete them and only the value of subtype event is printed.
+
+Signed-off-by: Yixian Liu <liuyixian@huawei.com>
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 57 +++--------------------------
+ 1 file changed, 6 insertions(+), 51 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -4663,64 +4663,19 @@ static void hns_roce_irq_work_handle(str
+ dev_warn(dev, "Send queue drained.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+- dev_err(dev, "Local work queue catastrophic error.\n");
++ dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n",
++ qpn, irq_work->sub_type);
+ hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+- switch (irq_work->sub_type) {
+- case HNS_ROCE_LWQCE_QPC_ERROR:
+- dev_err(dev, "QP %d, QPC error.\n", qpn);
+- break;
+- case HNS_ROCE_LWQCE_MTU_ERROR:
+- dev_err(dev, "QP %d, MTU error.\n", qpn);
+- break;
+- case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
+- dev_err(dev, "QP %d, WQE BA addr error.\n", qpn);
+- break;
+- case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
+- dev_err(dev, "QP %d, WQE addr error.\n", qpn);
+- break;
+- case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
+- dev_err(dev, "QP %d, WQE shift error.\n", qpn);
+- break;
+- default:
+- dev_err(dev, "Unhandled sub_event type %d.\n",
+- irq_work->sub_type);
+- break;
+- }
+ break;
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+- dev_err(dev, "Invalid request local work queue error.\n");
++ dev_err(dev, "Invalid request local work queue 0x%x error.\n",
++ qpn);
+ hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+- dev_err(dev, "Local access violation work queue error.\n");
++ dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n",
++ qpn, irq_work->sub_type);
+ hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+- switch (irq_work->sub_type) {
+- case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
+- dev_err(dev, "QP %d, R_key violation.\n", qpn);
+- break;
+- case HNS_ROCE_LAVWQE_LENGTH_ERROR:
+- dev_err(dev, "QP %d, length error.\n", qpn);
+- break;
+- case HNS_ROCE_LAVWQE_VA_ERROR:
+- dev_err(dev, "QP %d, VA error.\n", qpn);
+- break;
+- case HNS_ROCE_LAVWQE_PD_ERROR:
+- dev_err(dev, "QP %d, PD error.\n", qpn);
+- break;
+- case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
+- dev_err(dev, "QP %d, rw acc error.\n", qpn);
+- break;
+- case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
+- dev_err(dev, "QP %d, key state error.\n", qpn);
+- break;
+- case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
+- dev_err(dev, "QP %d, MR operation error.\n", qpn);
+- break;
+- default:
+- dev_err(dev, "Unhandled sub_event type %d.\n",
+- irq_work->sub_type);
+- break;
+- }
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+ dev_warn(dev, "SRQ limit reach.\n");
diff --git a/patches.drivers/RDMA-hns-Fix-bad-endianess-of-port_pd-variable.patch b/patches.drivers/RDMA-hns-Fix-bad-endianess-of-port_pd-variable.patch
new file mode 100644
index 0000000000..ac01c7524f
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Fix-bad-endianess-of-port_pd-variable.patch
@@ -0,0 +1,36 @@
+From: Leon Romanovsky <leonro@mellanox.com>
+Date: Tue, 19 Mar 2019 11:10:08 +0200
+Subject: RDMA/hns: Fix bad endianess of port_pd variable
+Patch-mainline: v5.2-rc1
+Git-commit: 6734b2973565e36659e97e12ab0d0faf1d9f3fbe
+References: bsc#1104427 FATE#326416
+
+port_pd is treated as le32 in declaration and read, fix assignment to be
+in le32 too. This change fixes the following compilation warnings.
+
+drivers/infiniband/hw/hns/hns_roce_ah.c:67:24: warning: incorrect type
+in assignment (different base types)
+drivers/infiniband/hw/hns/hns_roce_ah.c:67:24: expected restricted __le32 [usertype] port_pd
+drivers/infiniband/hw/hns/hns_roce_ah.c:67:24: got restricted __be32 [usertype]
+
+Fixes: 9a4435375cd1 ("IB/hns: Add driver files for hns RoCE driver")
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Reviewed-by: Gal Pressman <galpress@amazon.com>
+Reviewed-by: Lijun Ou <ouliun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_ah.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
++++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
+@@ -69,7 +69,7 @@ struct ib_ah *hns_roce_create_ah(struct
+ HNS_ROCE_VLAN_SL_BIT_MASK) <<
+ HNS_ROCE_VLAN_SL_SHIFT;
+
+- ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn |
++ ah->av.port_pd = cpu_to_le32(to_hr_pd(ibpd)->pdn |
+ (rdma_ah_get_port_num(ah_attr) <<
+ HNS_ROCE_PORT_NUM_SHIFT));
+ ah->av.gid_index = grh->sgid_index;
diff --git a/patches.drivers/RDMA-hns-Fix-the-Oops-during-rmmod-or-insmod-ko-when.patch b/patches.drivers/RDMA-hns-Fix-the-Oops-during-rmmod-or-insmod-ko-when.patch
new file mode 100644
index 0000000000..65227a37ae
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Fix-the-Oops-during-rmmod-or-insmod-ko-when.patch
@@ -0,0 +1,307 @@
+From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
+Date: Sun, 3 Feb 2019 20:43:13 +0800
+Subject: RDMA/hns: Fix the Oops during rmmod or insmod ko when reset occurs
+Patch-mainline: v5.1-rc1
+Git-commit: d061effc36f7bd38a12912977a37a50ac9140d11
+References: bsc#1104427 FATE#326416 bsc#1137232
+
+In the reset process, the hns3 NIC driver notifies the RoCE driver to
+perform reset related processing by calling the .reset_notify() interface
+registered by the RoCE driver in hip08 SoC.
+
+In the current version, if a reset occurs simultaneously during the
+execution of rmmod or insmod ko, there may be Oops error as below:
+
+ Internal error: Oops: 86000007 [#1] PREEMPT SMP
+ Modules linked in: hns_roce(O) hns3(O) hclge(O) hnae3(O) [last unloaded: hns_roce_hw_v2]
+ CPU: 0 PID: 14 Comm: kworker/0:1 Tainted: G O 4.19.0-ge00d540 #1
+ Hardware name: Huawei Technologies Co., Ltd.
+ Workqueue: events hclge_reset_service_task [hclge]
+ pstate: 60c00009 (nZCv daif +PAN +UAO)
+ pc : 0xffff00000100b0b8
+ lr : 0xffff00000100aea0
+ sp : ffff000009afbab0
+ x29: ffff000009afbab0 x28: 0000000000000800
+ x27: 0000000000007ff0 x26: ffff80002f90c004
+ x25: 00000000000007ff x24: ffff000008f97000
+ x23: ffff80003efee0a8 x22: 0000000000001000
+ x21: ffff80002f917ff0 x20: ffff8000286ea070
+ x19: 0000000000000800 x18: 0000000000000400
+ x17: 00000000c4d3225d x16: 00000000000021b8
+ x15: 0000000000000400 x14: 0000000000000400
+ x13: 0000000000000000 x12: ffff80003fac6e30
+ x11: 0000800036303000 x10: 0000000000000001
+ x9 : 0000000000000000 x8 : ffff80003016d000
+ x7 : 0000000000000000 x6 : 000000000000003f
+ x5 : 0000000000000040 x4 : 0000000000000000
+ x3 : 0000000000000004 x2 : 00000000000007ff
+ x1 : 0000000000000000 x0 : 0000000000000000
+ Process kworker/0:1 (pid: 14, stack limit = 0x00000000af8f0ad9)
+ Call trace:
+ 0xffff00000100b0b8
+ 0xffff00000100b3a0
+ hns_roce_init+0x624/0xc88 [hns_roce]
+ 0xffff000001002df8
+ 0xffff000001006960
+ hclge_notify_roce_client+0x74/0xe0 [hclge]
+ hclge_reset_service_task+0xa58/0xbc0 [hclge]
+ process_one_work+0x1e4/0x458
+ worker_thread+0x40/0x450
+ kthread+0x12c/0x130
+ ret_from_fork+0x10/0x18
+ Code: bad PC value
+
+In the reset process, we will release the resources firstly, and after the
+hardware reset is completed, we will reapply resources and reconfigure the
+hardware.
+
+We can solve this problem by modifying both the NIC and the RoCE
+driver. We can modify the concurrent processing in the NIC driver to avoid
+calling the .reset_notify and .uninit_instance ops at the same time. And
+we need to modify the RoCE driver to record the reset stage and the
+driver's init/uninit state, and check the state in the .reset_notify,
+.init_instance. and uninit_instance functions to avoid NULL pointer
+operation.
+
+Fixes: cb7a94c9c808 ("RDMA/hns: Add reset process for RoCE in hip08")
+Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_device.h | 21 +++++
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 103 ++++++++++++++++++++++++----
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 1
+ 3 files changed, 112 insertions(+), 13 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -217,6 +217,26 @@ enum {
+ HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
+ };
+
++enum hns_roce_reset_stage {
++ HNS_ROCE_STATE_NON_RST,
++ HNS_ROCE_STATE_RST_BEF_DOWN,
++ HNS_ROCE_STATE_RST_DOWN,
++ HNS_ROCE_STATE_RST_UNINIT,
++ HNS_ROCE_STATE_RST_INIT,
++ HNS_ROCE_STATE_RST_INITED,
++};
++
++enum hns_roce_instance_state {
++ HNS_ROCE_STATE_NON_INIT,
++ HNS_ROCE_STATE_INIT,
++ HNS_ROCE_STATE_INITED,
++ HNS_ROCE_STATE_UNINIT,
++};
++
++enum {
++ HNS_ROCE_RST_DIRECT_RETURN = 0,
++};
++
+ #define HNS_ROCE_CMD_SUCCESS 1
+
+ #define HNS_ROCE_PORT_DOWN 0
+@@ -919,6 +939,7 @@ struct hns_roce_dev {
+ spinlock_t bt_cmd_lock;
+ bool active;
+ bool is_reset;
++ unsigned long reset_cnt;
+ struct hns_roce_ib_iboe iboe;
+
+ struct list_head pgdir_list;
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -6002,6 +6002,7 @@ MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_
+ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
+ struct hnae3_handle *handle)
+ {
++ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ const struct pci_device_id *id;
+ int i;
+
+@@ -6032,10 +6033,13 @@ static int hns_roce_hw_v2_get_cfg(struct
+ hr_dev->cmd_mod = 1;
+ hr_dev->loop_idc = 0;
+
++ hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
++ priv->handle = handle;
++
+ return 0;
+ }
+
+-static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
++static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
+ {
+ struct hns_roce_dev *hr_dev;
+ int ret;
+@@ -6052,7 +6056,6 @@ static int hns_roce_hw_v2_init_instance(
+
+ hr_dev->pci_dev = handle->pdev;
+ hr_dev->dev = &handle->pdev->dev;
+- handle->priv = hr_dev;
+
+ ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
+ if (ret) {
+@@ -6066,6 +6069,8 @@ static int hns_roce_hw_v2_init_instance(
+ goto error_failed_get_cfg;
+ }
+
++ handle->priv = hr_dev;
++
+ return 0;
+
+ error_failed_get_cfg:
+@@ -6077,7 +6082,7 @@ error_failed_kzalloc:
+ return ret;
+ }
+
+-static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
++static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
+ bool reset)
+ {
+ struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
+@@ -6085,24 +6090,78 @@ static void hns_roce_hw_v2_uninit_instan
+ if (!hr_dev)
+ return;
+
++ handle->priv = NULL;
+ hns_roce_exit(hr_dev);
+ kfree(hr_dev->priv);
+ ib_dealloc_device(&hr_dev->ib_dev);
+ }
+
++static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
++{
++ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++ struct device *dev = &handle->pdev->dev;
++ int ret;
++
++ handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
++
++ if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
++ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
++ goto reset_chk_err;
++ }
++
++ ret = __hns_roce_hw_v2_init_instance(handle);
++ if (ret) {
++ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
++ dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
++ if (ops->ae_dev_resetting(handle) ||
++ ops->get_hw_reset_stat(handle))
++ goto reset_chk_err;
++ else
++ return ret;
++ }
++
++ handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
++
++
++ return 0;
++
++reset_chk_err:
++ dev_err(dev, "Device is busy in resetting state.\n"
++ "please retry later.\n");
++
++ return -EBUSY;
++}
++
++static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
++ bool reset)
++{
++ if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
++ return;
++
++ handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
++
++ __hns_roce_hw_v2_uninit_instance(handle, reset);
++
++ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
++}
+ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
+ {
+- struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
++ struct hns_roce_dev *hr_dev;
+ struct ib_event event;
+
+- if (!hr_dev) {
+- dev_err(&handle->pdev->dev,
+- "Input parameter handle->priv is NULL!\n");
+- return -EINVAL;
++ if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
++ set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
++ return 0;
+ }
+
++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
++ clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
++
++ hr_dev = (struct hns_roce_dev *)handle->priv;
++ if (!hr_dev)
++ return 0;
++
+ hr_dev->active = false;
+- hr_dev->is_reset = true;
+
+ event.event = IB_EVENT_DEVICE_FATAL;
+ event.device = &hr_dev->ib_dev;
+@@ -6114,17 +6173,29 @@ static int hns_roce_hw_v2_reset_notify_d
+
+ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
+ {
++ struct device *dev = &handle->pdev->dev;
+ int ret;
+
+- ret = hns_roce_hw_v2_init_instance(handle);
++ if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
++ &handle->rinfo.state)) {
++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
++ return 0;
++ }
++
++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
++
++ dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
++ ret = __hns_roce_hw_v2_init_instance(handle);
+ if (ret) {
+ /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
+ * callback function, RoCE Engine reinitialize. If RoCE reinit
+ * failed, we should inform NIC driver.
+ */
+ handle->priv = NULL;
+- dev_err(&handle->pdev->dev,
+- "In reset process RoCE reinit failed %d.\n", ret);
++ dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
++ } else {
++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
++ dev_info(dev, "Reset done, RoCE client reinit finished.\n");
+ }
+
+ return ret;
+@@ -6132,8 +6203,14 @@ static int hns_roce_hw_v2_reset_notify_i
+
+ static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
+ {
++ if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
++ return 0;
++
++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
++ dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
+ msleep(100);
+- hns_roce_hw_v2_uninit_instance(handle, false);
++ __hns_roce_hw_v2_uninit_instance(handle, false);
++
+ return 0;
+ }
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -1602,6 +1602,7 @@ struct hns_roce_link_table_entry {
+ #define HNS_ROCE_LINK_TABLE_NXT_PTR_M GENMASK(31, 20)
+
+ struct hns_roce_v2_priv {
++ struct hnae3_handle *handle;
+ struct hns_roce_v2_cmq cmq;
+ struct hns_roce_link_table tsq;
+ struct hns_roce_link_table tpq;
diff --git a/patches.drivers/RDMA-hns-Fix-the-bug-with-updating-rq-head-pointer-w.patch b/patches.drivers/RDMA-hns-Fix-the-bug-with-updating-rq-head-pointer-w.patch
new file mode 100644
index 0000000000..25bd379ed6
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Fix-the-bug-with-updating-rq-head-pointer-w.patch
@@ -0,0 +1,48 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Wed, 12 Dec 2018 17:49:06 +0800
+Subject: RDMA/hns: Fix the bug with updating rq head pointer when flush cqe
+Patch-mainline: v5.1-rc1
+Git-commit: 9c6ccc035c209dda07685e8dba829a203ba17499
+References: bsc#1104427 FATE#326416 bsc#1137233
+
+When flush cqe with srq, the driver disable to update the rq head pointer
+into the hardware.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3789,13 +3789,16 @@ static int hns_roce_v2_modify_qp(struct
+ roce_set_field(qpc_mask->byte_160_sq_ci_pi,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
+- roce_set_field(context->byte_84_rq_ci_pi,
++
++ if (!ibqp->srq) {
++ roce_set_field(context->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
+ hr_qp->rq.head);
+- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
++ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
++ }
+ }
+
+ if (attr_mask & IB_QP_AV) {
+@@ -4281,7 +4284,8 @@ static void hns_roce_set_qps_to_err(stru
+ if (hr_qp->ibqp.uobject) {
+ if (hr_qp->sdb_en == 1) {
+ hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
+- hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
++ if (hr_qp->rdb_en == 1)
++ hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
+ } else {
+ dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
+ return;
diff --git a/patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-door.patch b/patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-door.patch
new file mode 100644
index 0000000000..b0f860e779
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-door.patch
@@ -0,0 +1,168 @@
+From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
+Date: Sun, 3 Feb 2019 20:43:15 +0800
+Subject: RDMA/hns: Fix the chip hanging caused by sending doorbell during
+ reset
+Patch-mainline: v5.1-rc1
+Git-commit: d3743fa94ccd177917783726faf54632439ddb54
+References: bsc#1104427 FATE#326416 bsc#1137232
+
+On hi08 chip, There is a possibility of chip hanging when sending doorbell
+during reset. We can fix it by prohibiting doorbell during reset.
+
+Fixes: 2d40788825ac ("RDMA/hns: Add support for processing send wr and receive wr")
+Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_device.h | 1 +
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 25 ++++++++++++++++---------
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 11 +++++++++++
+ 3 files changed, 28 insertions(+), 9 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -946,6 +946,7 @@ struct hns_roce_dev {
+ spinlock_t bt_cmd_lock;
+ bool active;
+ bool is_reset;
++ bool dis_db;
+ unsigned long reset_cnt;
+ struct hns_roce_ib_iboe iboe;
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -587,7 +587,7 @@ out:
+ roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
+ V2_DB_PARAMETER_SL_S, qp->sl);
+
+- hns_roce_write64_k((__le32 *)&sq_db, qp->sq.db_reg_l);
++ hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
+
+ qp->sq_next_wqe = ind;
+ qp->next_sge = sge_ind;
+@@ -717,7 +717,7 @@ static int hns_roce_v2_cmd_hw_reseted(st
+ unsigned long reset_stage)
+ {
+ /* When hardware reset has been completed once or more, we should stop
+- * sending mailbox&cmq to hardware. If now in .init_instance()
++ * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
+ * function, we should exit with error. If now at HNAE3_INIT_CLIENT
+ * stage of soft reset process, we should exit with error, and then
+ * HNAE3_INIT_CLIENT related process can rollback the operation like
+@@ -726,6 +726,7 @@ static int hns_roce_v2_cmd_hw_reseted(st
+ * reset process once again.
+ */
+ hr_dev->is_reset = true;
++ hr_dev->dis_db = true;
+
+ if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
+ instance_stage == HNS_ROCE_STATE_INIT)
+@@ -742,8 +743,8 @@ static int hns_roce_v2_cmd_hw_resetting(
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+- /* When hardware reset is detected, we should stop sending mailbox&cmq
+- * to hardware. If now in .init_instance() function, we should
++ /* When hardware reset is detected, we should stop sending mailbox&cmq&
++ * doorbell to hardware. If now in .init_instance() function, we should
+ * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
+ * process, we should exit with error, and then HNAE3_INIT_CLIENT
+ * related process can rollback the operation like notifing hardware to
+@@ -751,6 +752,7 @@ static int hns_roce_v2_cmd_hw_resetting(
+ * error to notify NIC driver to reschedule soft reset process once
+ * again.
+ */
++ hr_dev->dis_db = true;
+ if (!ops->get_hw_reset_stat(handle))
+ hr_dev->is_reset = true;
+
+@@ -768,9 +770,10 @@ static int hns_roce_v2_cmd_sw_resetting(
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ /* When software reset is detected at .init_instance() function, we
+- * should stop sending mailbox&cmq to hardware, and exit with
+- * error.
++ * should stop sending mailbox&cmq&doorbell to hardware, and exit
++ * with error.
+ */
++ hr_dev->dis_db = true;
+ if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
+ hr_dev->is_reset = true;
+
+@@ -2495,6 +2498,7 @@ static void hns_roce_v2_write_cqc(struct
+ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
+ enum ib_cq_notify_flags flags)
+ {
++ struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
+ struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
+ u32 notification_flag;
+ u32 doorbell[2];
+@@ -2520,7 +2524,7 @@ static int hns_roce_v2_req_notify_cq(str
+ roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
+ notification_flag);
+
+- hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
++ hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
+
+ return 0;
+ }
+@@ -4763,6 +4767,7 @@ static void hns_roce_v2_init_irq_work(st
+
+ static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
+ {
++ struct hns_roce_dev *hr_dev = eq->hr_dev;
+ u32 doorbell[2];
+
+ doorbell[0] = 0;
+@@ -4789,7 +4794,7 @@ static void set_eq_cons_index_v2(struct
+ HNS_ROCE_V2_EQ_DB_PARA_S,
+ (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
+
+- hns_roce_write64_k(doorbell, eq->doorbell);
++ hns_roce_write64(hr_dev, doorbell, eq->doorbell);
+ }
+
+ static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
+@@ -6011,6 +6016,7 @@ static int hns_roce_v2_post_srq_recv(str
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+ {
++ struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
+ struct hns_roce_srq *srq = to_hr_srq(ibsrq);
+ struct hns_roce_v2_wqe_data_seg *dseg;
+ struct hns_roce_v2_db srq_db;
+@@ -6072,7 +6078,7 @@ static int hns_roce_v2_post_srq_recv(str
+ srq_db.byte_4 = HNS_ROCE_V2_SRQ_DB << 24 | srq->srqn;
+ srq_db.parameter = srq->head;
+
+- hns_roce_write64_k((__le32 *)&srq_db, srq->db_reg_l);
++ hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
+
+ }
+
+@@ -6291,6 +6297,7 @@ static int hns_roce_hw_v2_reset_notify_d
+ return 0;
+
+ hr_dev->active = false;
++ hr_dev->dis_db = true;
+
+ event.event = IB_EVENT_DEVICE_FATAL;
+ event.device = &hr_dev->ib_dev;
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -1799,4 +1799,15 @@ struct hns_roce_sccc_clr_done {
+ __le32 rsv[5];
+ };
+
++static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2],
++ void __iomem *dest)
++{
++ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
++ struct hnae3_handle *handle = priv->handle;
++ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++
++ if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle))
++ hns_roce_write64_k(val, dest);
++}
++
+ #endif
diff --git a/patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-mail.patch b/patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-mail.patch
new file mode 100644
index 0000000000..529996624b
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-mail.patch
@@ -0,0 +1,285 @@
+From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
+Date: Sun, 3 Feb 2019 20:43:14 +0800
+Subject: RDMA/hns: Fix the chip hanging caused by sending mailbox&CMQ during
+ reset
+Patch-mainline: v5.1-rc1
+Git-commit: 6a04aed6afaefd5fd396f23da184298135f31e37
+References: bsc#1104427 FATE#326416 bsc#1137232
+
+On hi08 chip, There is a possibility of chip hanging and some errors when
+sending mailbox & doorbell during reset. We can fix it by prohibiting
+mailbox and doorbell during reset and reset occurred to ensure that
+hardware can work normally.
+
+Fixes: a04ff739f2a9 ("RDMA/hns: Add command queue support for hip08 RoCE driver")
+Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_cmd.c | 32 ++++--
+ drivers/infiniband/hw/hns/hns_roce_device.h | 7 +
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 139 ++++++++++++++++++++++++++--
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 2
+ 4 files changed, 167 insertions(+), 13 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
++++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
+@@ -176,17 +176,33 @@ int hns_roce_cmd_mbox(struct hns_roce_de
+ unsigned long in_modifier, u8 op_modifier, u16 op,
+ unsigned long timeout)
+ {
+- if (hr_dev->is_reset)
+- return 0;
++ int ret;
++
++ if (hr_dev->hw->rst_prc_mbox) {
++ ret = hr_dev->hw->rst_prc_mbox(hr_dev);
++ if (ret == CMD_RST_PRC_SUCCESS)
++ return 0;
++ else if (ret == CMD_RST_PRC_EBUSY)
++ return -EBUSY;
++ }
+
+ if (hr_dev->cmd.use_events)
+- return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
+- in_modifier, op_modifier, op,
+- timeout);
++ ret = hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
++ in_modifier, op_modifier, op,
++ timeout);
+ else
+- return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
+- in_modifier, op_modifier, op,
+- timeout);
++ ret = hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
++ in_modifier, op_modifier, op,
++ timeout);
++
++ if (ret == CMD_RST_PRC_EBUSY)
++ return -EBUSY;
++
++ if (ret && (hr_dev->hw->rst_prc_mbox &&
++ hr_dev->hw->rst_prc_mbox(hr_dev) == CMD_RST_PRC_SUCCESS))
++ return 0;
++
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(hns_roce_cmd_mbox);
+
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -237,6 +237,12 @@ enum {
+ HNS_ROCE_RST_DIRECT_RETURN = 0,
+ };
+
++enum {
++ CMD_RST_PRC_OTHERS,
++ CMD_RST_PRC_SUCCESS,
++ CMD_RST_PRC_EBUSY,
++};
++
+ #define HNS_ROCE_CMD_SUCCESS 1
+
+ #define HNS_ROCE_PORT_DOWN 0
+@@ -875,6 +881,7 @@ struct hns_roce_hw {
+ u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
+ u16 token, int event);
+ int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout);
++ int (*rst_prc_mbox)(struct hns_roce_dev *hr_dev);
+ int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
+ const union ib_gid *gid, const struct ib_gid_attr *attr);
+ int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -712,6 +712,110 @@ out:
+ return ret;
+ }
+
++static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
++ unsigned long instance_stage,
++ unsigned long reset_stage)
++{
++ /* When hardware reset has been completed once or more, we should stop
++ * sending mailbox&cmq to hardware. If now in .init_instance()
++ * function, we should exit with error. If now at HNAE3_INIT_CLIENT
++ * stage of soft reset process, we should exit with error, and then
++ * HNAE3_INIT_CLIENT related process can rollback the operation like
++ * notifing hardware to free resources, HNAE3_INIT_CLIENT related
++ * process will exit with error to notify NIC driver to reschedule soft
++ * reset process once again.
++ */
++ hr_dev->is_reset = true;
++
++ if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
++ instance_stage == HNS_ROCE_STATE_INIT)
++ return CMD_RST_PRC_EBUSY;
++
++ return CMD_RST_PRC_SUCCESS;
++}
++
++static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
++ unsigned long instance_stage,
++ unsigned long reset_stage)
++{
++ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
++ struct hnae3_handle *handle = priv->handle;
++ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++
++ /* When hardware reset is detected, we should stop sending mailbox&cmq
++ * to hardware. If now in .init_instance() function, we should
++ * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
++ * process, we should exit with error, and then HNAE3_INIT_CLIENT
++ * related process can rollback the operation like notifing hardware to
++ * free resources, HNAE3_INIT_CLIENT related process will exit with
++ * error to notify NIC driver to reschedule soft reset process once
++ * again.
++ */
++ if (!ops->get_hw_reset_stat(handle))
++ hr_dev->is_reset = true;
++
++ if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
++ instance_stage == HNS_ROCE_STATE_INIT)
++ return CMD_RST_PRC_EBUSY;
++
++ return CMD_RST_PRC_SUCCESS;
++}
++
++static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
++{
++ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
++ struct hnae3_handle *handle = priv->handle;
++ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++
++ /* When software reset is detected at .init_instance() function, we
++ * should stop sending mailbox&cmq to hardware, and exit with
++ * error.
++ */
++ if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
++ hr_dev->is_reset = true;
++
++ return CMD_RST_PRC_EBUSY;
++}
++
++static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
++{
++ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
++ struct hnae3_handle *handle = priv->handle;
++ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++ unsigned long instance_stage; /* the current instance stage */
++ unsigned long reset_stage; /* the current reset stage */
++ unsigned long reset_cnt;
++ bool sw_resetting;
++ bool hw_resetting;
++
++ if (hr_dev->is_reset)
++ return CMD_RST_PRC_SUCCESS;
++
++ /* Get information about reset from NIC driver or RoCE driver itself,
++ * the meaning of the following variables from NIC driver are described
++ * as below:
++ * reset_cnt -- The count value of completed hardware reset.
++ * hw_resetting -- Whether hardware device is resetting now.
++ * sw_resetting -- Whether NIC's software reset process is running now.
++ */
++ instance_stage = handle->rinfo.instance_state;
++ reset_stage = handle->rinfo.reset_state;
++ reset_cnt = ops->ae_dev_reset_cnt(handle);
++ hw_resetting = ops->get_hw_reset_stat(handle);
++ sw_resetting = ops->ae_dev_resetting(handle);
++
++ if (reset_cnt != hr_dev->reset_cnt)
++ return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
++ reset_stage);
++ else if (hw_resetting)
++ return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
++ reset_stage);
++ else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
++ return hns_roce_v2_cmd_sw_resetting(hr_dev);
++
++ return 0;
++}
++
+ static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
+ {
+ int ntu = ring->next_to_use;
+@@ -892,8 +996,8 @@ static int hns_roce_cmq_csq_clean(struct
+ return clean;
+ }
+
+-static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+- struct hns_roce_cmq_desc *desc, int num)
++static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
++ struct hns_roce_cmq_desc *desc, int num)
+ {
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
+@@ -905,9 +1009,6 @@ static int hns_roce_cmq_send(struct hns_
+ int ret = 0;
+ int ntc;
+
+- if (hr_dev->is_reset)
+- return 0;
+-
+ spin_lock_bh(&csq->lock);
+
+ if (num > hns_roce_cmq_space(csq)) {
+@@ -982,6 +1083,30 @@ static int hns_roce_cmq_send(struct hns_
+ return ret;
+ }
+
++int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
++ struct hns_roce_cmq_desc *desc, int num)
++{
++ int retval;
++ int ret;
++
++ ret = hns_roce_v2_rst_process_cmd(hr_dev);
++ if (ret == CMD_RST_PRC_SUCCESS)
++ return 0;
++ if (ret == CMD_RST_PRC_EBUSY)
++ return ret;
++
++ ret = __hns_roce_cmq_send(hr_dev, desc, num);
++ if (ret) {
++ retval = hns_roce_v2_rst_process_cmd(hr_dev);
++ if (retval == CMD_RST_PRC_SUCCESS)
++ return 0;
++ else if (retval == CMD_RST_PRC_EBUSY)
++ return retval;
++ }
++
++ return ret;
++}
++
+ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
+ {
+ struct hns_roce_query_version *resp;
+@@ -1857,6 +1982,9 @@ static int hns_roce_v2_chk_mbox(struct h
+
+ status = hns_roce_v2_cmd_complete(hr_dev);
+ if (status != 0x1) {
++ if (status == CMD_RST_PRC_EBUSY)
++ return status;
++
+ dev_err(dev, "mailbox status 0x%x!\n", status);
+ return -EBUSY;
+ }
+@@ -5961,6 +6089,7 @@ static const struct hns_roce_hw hns_roce
+ .hw_exit = hns_roce_v2_exit,
+ .post_mbox = hns_roce_v2_post_mbox,
+ .chk_mbox = hns_roce_v2_chk_mbox,
++ .rst_prc_mbox = hns_roce_v2_rst_process_cmd,
+ .set_gid = hns_roce_v2_set_gid,
+ .set_mac = hns_roce_v2_set_mac,
+ .write_mtpt = hns_roce_v2_write_mtpt,
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -96,6 +96,8 @@
+ #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
+ #define HNS_ROCE_V2_RSV_QPS 8
+
++#define HNS_ROCE_V2_HW_RST_TIMEOUT 1000
++
+ #define HNS_ROCE_CONTEXT_HOP_NUM 1
+ #define HNS_ROCE_SCCC_HOP_NUM 1
+ #define HNS_ROCE_MTT_HOP_NUM 1
diff --git a/patches.drivers/RDMA-hns-Fix-the-state-of-rereg-mr.patch b/patches.drivers/RDMA-hns-Fix-the-state-of-rereg-mr.patch
new file mode 100644
index 0000000000..40bf3f4bf9
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Fix-the-state-of-rereg-mr.patch
@@ -0,0 +1,29 @@
+From: Yixian Liu <liuyixian@huawei.com>
+Date: Sun, 3 Feb 2019 16:13:04 +0800
+Subject: RDMA/hns: Fix the state of rereg mr
+Patch-mainline: v5.1-rc1
+Git-commit: ab22bf05216a6bb4812448f3a8609489047cf311
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+The state of mr after reregister operation should be set to valid
+state. Otherwise, it will keep the same as the state before reregistered.
+
+Signed-off-by: Yixian Liu <liuyixian@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -2195,6 +2195,9 @@ static int hns_roce_v2_rereg_write_mtpt(
+ struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
+ int ret = 0;
+
++ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
++ V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
++
+ if (flags & IB_MR_REREG_PD) {
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
+ V2_MPT_BYTE_4_PD_S, pdn);
diff --git a/patches.drivers/RDMA-hns-Hide-error-print-information-with-roce-vf-d.patch b/patches.drivers/RDMA-hns-Hide-error-print-information-with-roce-vf-d.patch
new file mode 100644
index 0000000000..6460ca7056
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Hide-error-print-information-with-roce-vf-d.patch
@@ -0,0 +1,54 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:25 +0800
+Subject: RDMA/hns: Hide error print information with roce vf device
+Patch-mainline: v5.2-rc1
+Git-commit: 07c2339a91c1ec3a8b8ada00361eced7b153ec0c
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+The driver should not print the error information when the hip08 driver
+not support virtual function.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -6123,15 +6123,8 @@ static int hns_roce_hw_v2_get_cfg(struct
+ struct hnae3_handle *handle)
+ {
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+- const struct pci_device_id *id;
+ int i;
+
+- id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
+- if (!id) {
+- dev_err(hr_dev->dev, "device is not compatible!\n");
+- return -ENXIO;
+- }
+-
+ hr_dev->hw = &hns_roce_hw_v2;
+ hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
+ hr_dev->odb_offset = hr_dev->sdb_offset;
+@@ -6219,6 +6212,7 @@ static void __hns_roce_hw_v2_uninit_inst
+ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
+ {
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++ const struct pci_device_id *id;
+ struct device *dev = &handle->pdev->dev;
+ int ret;
+
+@@ -6229,6 +6223,10 @@ static int hns_roce_hw_v2_init_instance(
+ goto reset_chk_err;
+ }
+
++ id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
++ if (!id)
++ return 0;
++
+ ret = __hns_roce_hw_v2_init_instance(handle);
+ if (ret) {
+ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
diff --git a/patches.drivers/RDMA-hns-Limit-minimum-ROCE-CQ-depth-to-64.patch b/patches.drivers/RDMA-hns-Limit-minimum-ROCE-CQ-depth-to-64.patch
new file mode 100644
index 0000000000..7a6da7c908
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Limit-minimum-ROCE-CQ-depth-to-64.patch
@@ -0,0 +1,27 @@
+From: chenglang <chenglang@huawei.com>
+Date: Sun, 3 Feb 2019 16:13:03 +0800
+Subject: RDMA/hns: Limit minimum ROCE CQ depth to 64
+Patch-mainline: v5.1-rc1
+Git-commit: 704e0e613a6d584fde1c80ead0329e918b4f8671
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+This patch modifies the minimum CQ depth specification of hip08 and is
+consistent with the processing of hip06.
+
+Signed-off-by: chenglang <chenglang@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1512,6 +1512,7 @@ static int hns_roce_v2_profile(struct hn
+ caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
+ caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
+ caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
++ caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
+ caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
+ caps->max_srqwqes = HNS_ROCE_V2_MAX_SRQWQE_NUM;
+ caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
diff --git a/patches.drivers/RDMA-hns-Limit-scope-of-hns_roce_cmq_send.patch b/patches.drivers/RDMA-hns-Limit-scope-of-hns_roce_cmq_send.patch
new file mode 100644
index 0000000000..8c19fffea0
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Limit-scope-of-hns_roce_cmq_send.patch
@@ -0,0 +1,34 @@
+From: Leon Romanovsky <leonro@mellanox.com>
+Date: Tue, 19 Mar 2019 11:10:09 +0200
+Subject: RDMA/hns: Limit scope of hns_roce_cmq_send()
+Patch-mainline: v5.2-rc1
+Git-commit: e95e52a1788d4a8af547261875c0fbae2e6e3028
+References: bsc#1104427 FATE#326416
+
+The forgotten static keyword causes to the following error to appear while
+building HNS driver. Declare hns_roce_cmq_send() to be static function to
+fix this warning.
+
+drivers/infiniband/hw/hns/hns_roce_hw_v2.c:1089:5: warning: no previous
+prototype for _hns_roce_cmq_send_ [-Wmissing-prototypes]
+ int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+
+Fixes: 6a04aed6afae ("RDMA/hns: Fix the chip hanging caused by sending mailbox&CMQ during reset")
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1086,7 +1086,7 @@ static int __hns_roce_cmq_send(struct hn
+ return ret;
+ }
+
+-int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
++static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc, int num)
+ {
+ int retval;
diff --git a/patches.drivers/RDMA-hns-Make-some-function-static.patch b/patches.drivers/RDMA-hns-Make-some-function-static.patch
new file mode 100644
index 0000000000..95f39ddb79
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Make-some-function-static.patch
@@ -0,0 +1,60 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Fri, 1 Feb 2019 11:11:04 +0800
+Subject: RDMA/hns: Make some function static
+Patch-mainline: v5.1-rc1
+Git-commit: c3c668e742397dfc107e44c09606cc68b37df30d
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+Fixes the following sparse warnings:
+
+drivers/infiniband/hw/hns/hns_roce_hw_v2.c:5822:5: warning:
+ symbol 'hns_roce_v2_query_srq' was not declared. Should it be static?
+drivers/infiniband/hw/hns/hns_roce_srq.c:158:6: warning:
+ symbol 'hns_roce_srq_free' was not declared. Should it be static?
+drivers/infiniband/hw/hns/hns_roce_srq.c:81:5: warning:
+ symbol 'hns_roce_srq_alloc' was not declared. Should it be static?
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +-
+ drivers/infiniband/hw/hns/hns_roce_srq.c | 9 +++++----
+ 2 files changed, 6 insertions(+), 5 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -5818,7 +5818,7 @@ static int hns_roce_v2_modify_srq(struct
+ return 0;
+ }
+
+-int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
++static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+ {
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
+ struct hns_roce_srq *srq = to_hr_srq(ibsrq);
+--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
+@@ -79,9 +79,9 @@ static int hns_roce_hw2sw_srq(struct hns
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ }
+
+-int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn, u16 xrcd,
+- struct hns_roce_mtt *hr_mtt, u64 db_rec_addr,
+- struct hns_roce_srq *srq)
++static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
++ u16 xrcd, struct hns_roce_mtt *hr_mtt,
++ u64 db_rec_addr, struct hns_roce_srq *srq)
+ {
+ struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
+ struct hns_roce_cmd_mailbox *mailbox;
+@@ -160,7 +160,8 @@ err_out:
+ return ret;
+ }
+
+-void hns_roce_srq_free(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
++static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
++ struct hns_roce_srq *srq)
+ {
+ struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
+ int ret;
diff --git a/patches.drivers/RDMA-hns-Modify-qp-cq-pd-specification-according-to-.patch b/patches.drivers/RDMA-hns-Modify-qp-cq-pd-specification-according-to-.patch
new file mode 100644
index 0000000000..09e16f07be
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Modify-qp-cq-pd-specification-according-to-.patch
@@ -0,0 +1,48 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 16 Feb 2019 20:10:24 +0800
+Subject: RDMA/hns: Modify qp&cq&pd specification according to UM
+Patch-mainline: v5.1-rc1
+Git-commit: 3e394f9413ecba2779b6a1d77095f4d8611a52d2
+References: bsc#1104427 FATE#326416 bsc#1137233
+
+Accroding to hip08's limitation, qp&cq specification is 1M, mtpt
+specification 1M in kernel space.
+
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -45,14 +45,14 @@
+ #define HNS_ROCE_VF_SGID_NUM 32
+ #define HNS_ROCE_VF_SL_NUM 8
+
+-#define HNS_ROCE_V2_MAX_QP_NUM 0x2000
+-#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
++#define HNS_ROCE_V2_MAX_QP_NUM 0x100000
++#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
+ #define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
+ #define HNS_ROCE_V2_MAX_SRQ 0x100000
+ #define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
+ #define HNS_ROCE_V2_MAX_SRQ_SGE 0x100
+-#define HNS_ROCE_V2_MAX_CQ_NUM 0x8000
+-#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
++#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
++#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
+ #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
+ #define HNS_ROCE_V2_MAX_CQE_NUM 0x10000
+ #define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000
+@@ -67,7 +67,7 @@
+ #define HNS_ROCE_V2_COMP_VEC_NUM 63
+ #define HNS_ROCE_V2_AEQE_VEC_NUM 1
+ #define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
+-#define HNS_ROCE_V2_MAX_MTPT_NUM 0x8000
++#define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000
+ #define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000
+ #define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000
+ #define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
diff --git a/patches.drivers/RDMA-hns-Modify-the-pbl-ba-page-size-for-hip08.patch b/patches.drivers/RDMA-hns-Modify-the-pbl-ba-page-size-for-hip08.patch
new file mode 100644
index 0000000000..cf9eae1d3f
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Modify-the-pbl-ba-page-size-for-hip08.patch
@@ -0,0 +1,28 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Wed, 12 Dec 2018 17:49:09 +0800
+Subject: RDMA/hns: Modify the pbl ba page size for hip08
+Patch-mainline: v5.1-rc1
+Git-commit: 91fb4d83b88a7b544ce564c44167aad29d4154f0
+References: bsc#1104427 FATE#326416 bsc#1137233
+
+Modify the pbl ba page size to 16K for in order to support 4G MR size.
+
+Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1366,7 +1366,7 @@ static int hns_roce_v2_profile(struct hn
+ caps->mpt_ba_pg_sz = 0;
+ caps->mpt_buf_pg_sz = 0;
+ caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
+- caps->pbl_ba_pg_sz = 0;
++ caps->pbl_ba_pg_sz = 2;
+ caps->pbl_buf_pg_sz = 0;
+ caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
+ caps->mtt_ba_pg_sz = 0;
diff --git a/patches.drivers/RDMA-hns-Move-spin_lock_irqsave-to-the-correct-place.patch b/patches.drivers/RDMA-hns-Move-spin_lock_irqsave-to-the-correct-place.patch
new file mode 100644
index 0000000000..3a110d3ffd
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Move-spin_lock_irqsave-to-the-correct-place.patch
@@ -0,0 +1,92 @@
+From: Lang Cheng <chenglang@huawei.com>
+Date: Fri, 24 May 2019 15:31:22 +0800
+Subject: RDMA/hns: Move spin_lock_irqsave to the correct place
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
+Git-commit: 780f33962ef27d7f27c6b47a55593c6ffd357922
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+When hip08 set gid, it will call spin_unlock_bh when send cmq. if main.ko
+call spin_lock_irqsave firstly, and the kernel is before commit
+f71b74bca637 ("irq/softirqs: Use lockdep to assert IRQs are
+disabled/enabled"), it will cause WARN_ON_ONCE because of calling
+spin_unlock_bh in disable context.
+
+In fact, the spin_lock_irqsave in main.ko is only used for hip06, and
+should be placed in hns_roce_hw_v1.c. hns_roce_hw_v2.c uses its own
+spin_unlock_bh and does not need main.ko manage spin_lock.
+
+Signed-off-by: Lang Cheng <chenglang@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 5 +++++
+ drivers/infiniband/hw/hns/hns_roce_main.c | 10 ----------
+ 2 files changed, 5 insertions(+), 10 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+@@ -1780,11 +1780,14 @@ static int hns_roce_v1_set_gid(struct hn
+ int gid_index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr)
+ {
++ unsigned long flags;
+ u32 *p = NULL;
+ u8 gid_idx = 0;
+
+ gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
+
++ spin_lock_irqsave(&hr_dev->iboe.lock, flags);
++
+ p = (u32 *)&gid->raw[0];
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
+ (HNS_ROCE_V1_GID_NUM * gid_idx));
+@@ -1801,6 +1804,8 @@ static int hns_roce_v1_set_gid(struct hn
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
+ (HNS_ROCE_V1_GID_NUM * gid_idx));
+
++ spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
++
+ return 0;
+ }
+
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -78,18 +78,13 @@ static int hns_roce_add_gid(const struct
+ {
+ struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
+ u8 port = attr->port_num - 1;
+- unsigned long flags;
+ int ret;
+
+ if (port >= hr_dev->caps.num_ports)
+ return -EINVAL;
+
+- spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+-
+ ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr);
+
+- spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+-
+ return ret;
+ }
+
+@@ -98,18 +93,13 @@ static int hns_roce_del_gid(const struct
+ struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
+ struct ib_gid_attr zattr = { };
+ u8 port = attr->port_num - 1;
+- unsigned long flags;
+ int ret;
+
+ if (port >= hr_dev->caps.num_ports)
+ return -EINVAL;
+
+- spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+-
+ ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &zgid, &zattr);
+
+- spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+-
+ return ret;
+ }
+
diff --git a/patches.drivers/RDMA-hns-Only-assgin-some-fields-if-the-relatived-at.patch b/patches.drivers/RDMA-hns-Only-assgin-some-fields-if-the-relatived-at.patch
new file mode 100644
index 0000000000..1f92cbf96e
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Only-assgin-some-fields-if-the-relatived-at.patch
@@ -0,0 +1,248 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:24 +0800
+Subject: RDMA/hns: Only assgin some fields if the relatived attr_mask is set
+Patch-mainline: v5.2-rc1
+Git-commit: 5b01b243b0b3725b4460e8924e1f105bb4038969
+References: bsc#1104427 FATE#326416
+
+According to IB protocol, some fields of qp context are filled with
+optional when the relatived attr_mask are set. The relatived attr_mask
+include IB_QP_TIMEOUT, IB_QP_RETRY_CNT, IB_QP_RNR_RETRY and
+IB_QP_MIN_RNR_TIMER. Besides, we move some assignments of the fields of
+qp context into the outside of the specific qp state jump function.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 152 +++++++++++++++--------------
+ 1 file changed, 81 insertions(+), 71 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3176,12 +3176,6 @@ static void modify_qp_reset_to_init(stru
+ roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
+ roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
+
+- if (attr_mask & IB_QP_QKEY) {
+- context->qkey_xrcd = attr->qkey;
+- qpc_mask->qkey_xrcd = 0;
+- hr_qp->qkey = attr->qkey;
+- }
+-
+ if (hr_qp->rdb_en) {
+ roce_set_bit(context->byte_68_rq_db,
+ V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
+@@ -3393,7 +3387,6 @@ static void modify_qp_reset_to_init(stru
+ 0);
+
+ hr_qp->access_flags = attr->qp_access_flags;
+- hr_qp->pkey_index = attr->pkey_index;
+ roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
+ V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
+ roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
+@@ -3517,11 +3510,6 @@ static void modify_qp_init_to_init(struc
+ V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
+ }
+
+- if (attr_mask & IB_QP_QKEY) {
+- context->qkey_xrcd = attr->qkey;
+- qpc_mask->qkey_xrcd = 0;
+- }
+-
+ roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
+ V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
+ roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
+@@ -3641,13 +3629,6 @@ static int modify_qp_init_to_rtr(struct
+ V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
+ V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
+
+- roce_set_field(context->byte_80_rnr_rx_cqn,
+- V2_QPC_BYTE_80_MIN_RNR_TIME_M,
+- V2_QPC_BYTE_80_MIN_RNR_TIME_S, attr->min_rnr_timer);
+- roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
+- V2_QPC_BYTE_80_MIN_RNR_TIME_M,
+- V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
+-
+ page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
+ context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size]
+ >> PAGE_ADDR_SHIFT);
+@@ -3713,15 +3694,6 @@ static int modify_qp_init_to_rtr(struct
+ roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
+ }
+
+- if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
+- attr->max_dest_rd_atomic) {
+- roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+- V2_QPC_BYTE_140_RR_MAX_S,
+- fls(attr->max_dest_rd_atomic - 1));
+- roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+- V2_QPC_BYTE_140_RR_MAX_S, 0);
+- }
+-
+ if (attr_mask & IB_QP_DEST_QPN) {
+ roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+ V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
+@@ -3902,57 +3874,14 @@ static int modify_qp_rtr_to_rts(struct i
+ roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
+ V2_QPC_BYTE_212_CHECK_FLG_S, 0);
+
+- roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
+- V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
+- roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
+- V2_QPC_BYTE_212_RETRY_CNT_S, 0);
+-
+- roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
+- V2_QPC_BYTE_212_RETRY_NUM_INIT_S, attr->retry_cnt);
+- roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
+- V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
+-
+- roce_set_field(context->byte_244_rnr_rxack,
+- V2_QPC_BYTE_244_RNR_NUM_INIT_M,
+- V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
+- roce_set_field(qpc_mask->byte_244_rnr_rxack,
+- V2_QPC_BYTE_244_RNR_NUM_INIT_M,
+- V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
+-
+- roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
+- V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
+- roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
+- V2_QPC_BYTE_244_RNR_CNT_S, 0);
+-
+ roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
+ V2_QPC_BYTE_212_LSN_S, 0x100);
+ roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
+ V2_QPC_BYTE_212_LSN_S, 0);
+
+- if (attr_mask & IB_QP_TIMEOUT) {
+- if (attr->timeout < 31) {
+- roce_set_field(context->byte_28_at_fl,
+- V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
+- attr->timeout);
+- roce_set_field(qpc_mask->byte_28_at_fl,
+- V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
+- 0);
+- } else {
+- dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
+- }
+- }
+-
+ roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
+ V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
+
+- if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
+- roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+- V2_QPC_BYTE_208_SR_MAX_S,
+- fls(attr->max_rd_atomic - 1));
+- roce_set_field(qpc_mask->byte_208_irrl,
+- V2_QPC_BYTE_208_SR_MAX_M,
+- V2_QPC_BYTE_208_SR_MAX_S, 0);
+- }
+ return 0;
+ }
+
+@@ -4146,6 +4075,53 @@ static int hns_roce_v2_modify_qp(struct
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+ }
+
++ if (attr_mask & IB_QP_TIMEOUT) {
++ if (attr->timeout < 31) {
++ roce_set_field(context->byte_28_at_fl,
++ V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
++ attr->timeout);
++ roce_set_field(qpc_mask->byte_28_at_fl,
++ V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
++ 0);
++ } else {
++ dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
++ }
++ }
++
++ if (attr_mask & IB_QP_RETRY_CNT) {
++ roce_set_field(context->byte_212_lsn,
++ V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
++ V2_QPC_BYTE_212_RETRY_NUM_INIT_S,
++ attr->retry_cnt);
++ roce_set_field(qpc_mask->byte_212_lsn,
++ V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
++ V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
++
++ roce_set_field(context->byte_212_lsn,
++ V2_QPC_BYTE_212_RETRY_CNT_M,
++ V2_QPC_BYTE_212_RETRY_CNT_S,
++ attr->retry_cnt);
++ roce_set_field(qpc_mask->byte_212_lsn,
++ V2_QPC_BYTE_212_RETRY_CNT_M,
++ V2_QPC_BYTE_212_RETRY_CNT_S, 0);
++ }
++
++ if (attr_mask & IB_QP_RNR_RETRY) {
++ roce_set_field(context->byte_244_rnr_rxack,
++ V2_QPC_BYTE_244_RNR_NUM_INIT_M,
++ V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
++ roce_set_field(qpc_mask->byte_244_rnr_rxack,
++ V2_QPC_BYTE_244_RNR_NUM_INIT_M,
++ V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
++
++ roce_set_field(context->byte_244_rnr_rxack,
++ V2_QPC_BYTE_244_RNR_CNT_M,
++ V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
++ roce_set_field(qpc_mask->byte_244_rnr_rxack,
++ V2_QPC_BYTE_244_RNR_CNT_M,
++ V2_QPC_BYTE_244_RNR_CNT_S, 0);
++ }
++
+ if (attr_mask & IB_QP_SQ_PSN) {
+ roce_set_field(context->byte_172_sq_psn,
+ V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+@@ -4192,9 +4168,37 @@ static int hns_roce_v2_modify_qp(struct
+ V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
+ }
+
++ if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
++ attr->max_dest_rd_atomic) {
++ roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
++ V2_QPC_BYTE_140_RR_MAX_S,
++ fls(attr->max_dest_rd_atomic - 1));
++ roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
++ V2_QPC_BYTE_140_RR_MAX_S, 0);
++ }
++
++ if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
++ roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
++ V2_QPC_BYTE_208_SR_MAX_S,
++ fls(attr->max_rd_atomic - 1));
++ roce_set_field(qpc_mask->byte_208_irrl,
++ V2_QPC_BYTE_208_SR_MAX_M,
++ V2_QPC_BYTE_208_SR_MAX_S, 0);
++ }
++
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
+ set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
+
++ if (attr_mask & IB_QP_MIN_RNR_TIMER) {
++ roce_set_field(context->byte_80_rnr_rx_cqn,
++ V2_QPC_BYTE_80_MIN_RNR_TIME_M,
++ V2_QPC_BYTE_80_MIN_RNR_TIME_S,
++ attr->min_rnr_timer);
++ roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
++ V2_QPC_BYTE_80_MIN_RNR_TIME_M,
++ V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
++ }
++
+ /* RC&UC required attr */
+ if (attr_mask & IB_QP_RQ_PSN) {
+ roce_set_field(context->byte_108_rx_reqepsn,
+@@ -4211,6 +4215,12 @@ static int hns_roce_v2_modify_qp(struct
+ V2_QPC_BYTE_152_RAQ_PSN_S, 0);
+ }
+
++ if (attr_mask & IB_QP_QKEY) {
++ context->qkey_xrcd = attr->qkey;
++ qpc_mask->qkey_xrcd = 0;
++ hr_qp->qkey = attr->qkey;
++ }
++
+ roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
+ ibqp->srq ? 1 : 0);
+ roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
diff --git a/patches.drivers/RDMA-hns-Only-assign-the-fields-of-the-rq-psn-if-IB_.patch b/patches.drivers/RDMA-hns-Only-assign-the-fields-of-the-rq-psn-if-IB_.patch
new file mode 100644
index 0000000000..ae6f922422
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Only-assign-the-fields-of-the-rq-psn-if-IB_.patch
@@ -0,0 +1,68 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:22 +0800
+Subject: RDMA/hns: Only assign the fields of the rq psn if IB_QP_RQ_PSN is set
+Patch-mainline: v5.2-rc1
+Git-commit: 601f3e6d067c4399953dc7ede8f4c5448f91b02a
+References: bsc#1104427 FATE#326416
+
+Only when the IB_QP_RQ_PSN flags of attr_mask is set is it valid to assign
+the relatived fields of rq'psn into the qp context when modified qp.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 28 ++++++++++++++++------------
+ 1 file changed, 16 insertions(+), 12 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3675,13 +3675,6 @@ static int modify_qp_init_to_rtr(struct
+ V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
+ V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
+
+- roce_set_field(context->byte_108_rx_reqepsn,
+- V2_QPC_BYTE_108_RX_REQ_EPSN_M,
+- V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
+- roce_set_field(qpc_mask->byte_108_rx_reqepsn,
+- V2_QPC_BYTE_108_RX_REQ_EPSN_M,
+- V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
+-
+ roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
+ V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
+ roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
+@@ -3789,11 +3782,6 @@ static int modify_qp_init_to_rtr(struct
+ context->rq_rnr_timer = 0;
+ qpc_mask->rq_rnr_timer = 0;
+
+- roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
+- V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
+- roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
+- V2_QPC_BYTE_152_RAQ_PSN_S, 0);
+-
+ roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
+ V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
+ roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
+@@ -4207,6 +4195,22 @@ static int hns_roce_v2_modify_qp(struct
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
+ set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
+
++ /* RC&UC required attr */
++ if (attr_mask & IB_QP_RQ_PSN) {
++ roce_set_field(context->byte_108_rx_reqepsn,
++ V2_QPC_BYTE_108_RX_REQ_EPSN_M,
++ V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
++ roce_set_field(qpc_mask->byte_108_rx_reqepsn,
++ V2_QPC_BYTE_108_RX_REQ_EPSN_M,
++ V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
++
++ roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
++ V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
++ roce_set_field(qpc_mask->byte_152_raq,
++ V2_QPC_BYTE_152_RAQ_PSN_M,
++ V2_QPC_BYTE_152_RAQ_PSN_S, 0);
++ }
++
+ roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
+ ibqp->srq ? 1 : 0);
+ roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
diff --git a/patches.drivers/RDMA-hns-Only-assign-the-relatived-fields-of-psn-if-.patch b/patches.drivers/RDMA-hns-Only-assign-the-relatived-fields-of-psn-if-.patch
new file mode 100644
index 0000000000..3dfafb3c11
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Only-assign-the-relatived-fields-of-psn-if-.patch
@@ -0,0 +1,133 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:21 +0800
+Subject: RDMA/hns: Only assign the relatived fields of psn if IB_QP_SQ_PSN is
+ set
+Patch-mainline: v5.2-rc1
+Git-commit: f04cc17878b47bfa47af2e50f481d7f6eaaf3ca7
+References: bsc#1104427 FATE#326416
+
+Only when the IB_QP_SQ_PSN flags of attr_mask is set is it valid to assign
+the relatived fields of psn into the qp context when modified qp.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 83 ++++++++++++++++-------------
+ 1 file changed, 46 insertions(+), 37 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3891,13 +3891,6 @@ static int modify_qp_rtr_to_rts(struct i
+ V2_QPC_BYTE_240_RX_ACK_MSN_M,
+ V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
+
+- roce_set_field(context->byte_244_rnr_rxack,
+- V2_QPC_BYTE_244_RX_ACK_EPSN_M,
+- V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
+- roce_set_field(qpc_mask->byte_244_rnr_rxack,
+- V2_QPC_BYTE_244_RX_ACK_EPSN_M,
+- V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
+-
+ roce_set_field(qpc_mask->byte_248_ack_psn,
+ V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
+ V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
+@@ -3911,27 +3904,6 @@ static int modify_qp_rtr_to_rts(struct i
+ V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
+ V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
+
+- roce_set_field(context->byte_220_retry_psn_msn,
+- V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
+- V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
+- roce_set_field(qpc_mask->byte_220_retry_psn_msn,
+- V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
+- V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
+-
+- roce_set_field(context->byte_224_retry_msg,
+- V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
+- V2_QPC_BYTE_224_RETRY_MSG_PSN_S, attr->sq_psn >> 16);
+- roce_set_field(qpc_mask->byte_224_retry_msg,
+- V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
+- V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
+-
+- roce_set_field(context->byte_224_retry_msg,
+- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
+- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, attr->sq_psn);
+- roce_set_field(qpc_mask->byte_224_retry_msg,
+- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
+- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
+-
+ roce_set_field(qpc_mask->byte_220_retry_psn_msn,
+ V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
+ V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
+@@ -3982,17 +3954,8 @@ static int modify_qp_rtr_to_rts(struct i
+ }
+ }
+
+- roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+- V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
+- roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+- V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
+-
+ roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
+ V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
+- roce_set_field(context->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
+- V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
+- roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
+- V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
+
+ if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
+ roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+@@ -4195,6 +4158,52 @@ static int hns_roce_v2_modify_qp(struct
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+ }
+
++ if (attr_mask & IB_QP_SQ_PSN) {
++ roce_set_field(context->byte_172_sq_psn,
++ V2_QPC_BYTE_172_SQ_CUR_PSN_M,
++ V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
++ roce_set_field(qpc_mask->byte_172_sq_psn,
++ V2_QPC_BYTE_172_SQ_CUR_PSN_M,
++ V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
++
++ roce_set_field(context->byte_196_sq_psn,
++ V2_QPC_BYTE_196_SQ_MAX_PSN_M,
++ V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
++ roce_set_field(qpc_mask->byte_196_sq_psn,
++ V2_QPC_BYTE_196_SQ_MAX_PSN_M,
++ V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
++
++ roce_set_field(context->byte_220_retry_psn_msn,
++ V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
++ V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
++ roce_set_field(qpc_mask->byte_220_retry_psn_msn,
++ V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
++ V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
++
++ roce_set_field(context->byte_224_retry_msg,
++ V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
++ V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
++ attr->sq_psn >> 16);
++ roce_set_field(qpc_mask->byte_224_retry_msg,
++ V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
++ V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
++
++ roce_set_field(context->byte_224_retry_msg,
++ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
++ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S,
++ attr->sq_psn);
++ roce_set_field(qpc_mask->byte_224_retry_msg,
++ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
++ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
++
++ roce_set_field(context->byte_244_rnr_rxack,
++ V2_QPC_BYTE_244_RX_ACK_EPSN_M,
++ V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
++ roce_set_field(qpc_mask->byte_244_rnr_rxack,
++ V2_QPC_BYTE_244_RX_ACK_EPSN_M,
++ V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
++ }
++
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
+ set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
+
diff --git a/patches.drivers/RDMA-hns-RDMA-hns-Assign-rq-head-pointer-when-enable.patch b/patches.drivers/RDMA-hns-RDMA-hns-Assign-rq-head-pointer-when-enable.patch
new file mode 100644
index 0000000000..87a93b911f
--- /dev/null
+++ b/patches.drivers/RDMA-hns-RDMA-hns-Assign-rq-head-pointer-when-enable.patch
@@ -0,0 +1,62 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 12 Jan 2019 18:36:29 +0800
+Subject: RDMA/hns: RDMA/hns: Assign rq head pointer when enable rq record db
+Patch-mainline: v5.1-rc1
+Git-commit: de77503a59403e7045c18c6bb0a10c245a99b648
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+When flush cqe, it needs to get the pointer of rq and sq from db address
+space of user and update it into qp context by modified qp. if rq does not
+exist, it will not get the value from db address space of user.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -676,6 +676,10 @@ static int hns_roce_create_qp_common(str
+ dev_err(dev, "rq record doorbell map failed!\n");
+ goto err_sq_dbmap;
+ }
++
++ /* indicate kernel supports rq record db */
++ resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
++ hr_qp->rdb_en = 1;
+ }
+ } else {
+ if (init_attr->create_flags &
+@@ -784,16 +788,11 @@ static int hns_roce_create_qp_common(str
+ else
+ hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
+
+- if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) &&
+- (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) {
+-
+- /* indicate kernel supports rq record db */
+- resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
+- ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
++ if (udata) {
++ ret = ib_copy_to_udata(udata, &resp,
++ min(udata->outlen, sizeof(resp)));
+ if (ret)
+ goto err_qp;
+-
+- hr_qp->rdb_en = 1;
+ }
+ hr_qp->event = hns_roce_ib_qp_event;
+
+@@ -970,7 +969,9 @@ int hns_roce_modify_qp(struct ib_qp *ibq
+ (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
+ if (hr_qp->sdb_en == 1) {
+ hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
+- hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
++
++ if (hr_qp->rdb_en == 1)
++ hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
+ } else {
+ dev_warn(dev, "flush cqe is not supported in userspace!\n");
+ goto out;
diff --git a/patches.drivers/RDMA-hns-Remove-jiffies-operation-in-disable-interru.patch b/patches.drivers/RDMA-hns-Remove-jiffies-operation-in-disable-interru.patch
new file mode 100644
index 0000000000..741207d5f6
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Remove-jiffies-operation-in-disable-interru.patch
@@ -0,0 +1,124 @@
+From: Lang Cheng <chenglang@huawei.com>
+Date: Fri, 24 May 2019 15:31:23 +0800
+Subject: RDMA/hns: Remove jiffies operation in disable interrupt context
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
+Git-commit: 669cefb654cb69b280e31380f5fc7e3b5755b0cd
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+In some functions, the jiffies operation is unnecessary, and we can
+control delay using mdelay and udelay functions only. Especially, in
+hns_roce_v1_clear_hem, the function calls spin_lock_irqsave, the context
+disables interrupt, so we can not use jiffies and msleep functions.
+
+Signed-off-by: Lang Cheng <chenglang@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hem.c | 21 +++++++++++----------
+ drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 19 ++++++++++---------
+ 2 files changed, 21 insertions(+), 19 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -376,18 +376,19 @@ static int hns_roce_set_hem(struct hns_r
+
+ bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
+
+- end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
+- while (1) {
+- if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
+- if (!(time_before(jiffies, end))) {
+- dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
+- spin_unlock_irqrestore(lock, flags);
+- return -EBUSY;
+- }
+- } else {
++ end = HW_SYNC_TIMEOUT_MSECS;
++ while (end) {
++ if (!readl(bt_cmd) >> BT_CMD_SYNC_SHIFT)
+ break;
+- }
++
+ mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
++ end -= HW_SYNC_SLEEP_TIME_INTERVAL;
++ }
++
++ if (end <= 0) {
++ dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
++ spin_unlock_irqrestore(lock, flags);
++ return -EBUSY;
+ }
+
+ bt_cmd_l = (u32)bt_ba;
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+@@ -960,8 +960,7 @@ static int hns_roce_v1_recreate_lp_qp(st
+ struct hns_roce_free_mr *free_mr;
+ struct hns_roce_v1_priv *priv;
+ struct completion comp;
+- unsigned long end =
+- msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies;
++ unsigned long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
+ free_mr = &priv->free_mr;
+@@ -981,10 +980,11 @@ static int hns_roce_v1_recreate_lp_qp(st
+
+ queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
+
+- while (time_before_eq(jiffies, end)) {
++ while (end) {
+ if (try_wait_for_completion(&comp))
+ return 0;
+ msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
++ end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE;
+ }
+
+ lp_qp_work->comp_flag = 0;
+@@ -1098,8 +1098,7 @@ static int hns_roce_v1_dereg_mr(struct h
+ struct hns_roce_free_mr *free_mr;
+ struct hns_roce_v1_priv *priv;
+ struct completion comp;
+- unsigned long end =
+- msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
++ unsigned long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
+ unsigned long start = jiffies;
+ int npages;
+ int ret = 0;
+@@ -1129,10 +1128,11 @@ static int hns_roce_v1_dereg_mr(struct h
+
+ queue_work(free_mr->free_mr_wq, &(mr_work->work));
+
+- while (time_before_eq(jiffies, end)) {
++ while (end) {
+ if (try_wait_for_completion(&comp))
+ goto free_mr;
+ msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
++ end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE;
+ }
+
+ mr_work->comp_flag = 0;
+@@ -2502,10 +2502,10 @@ static int hns_roce_v1_clear_hem(struct
+
+ bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
+
+- end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
++ end = HW_SYNC_TIMEOUT_MSECS;
+ while (1) {
+ if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
+- if (!(time_before(jiffies, end))) {
++ if (end < 0) {
+ dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
+ spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
+ flags);
+@@ -2514,7 +2514,8 @@ static int hns_roce_v1_clear_hem(struct
+ } else {
+ break;
+ }
+- msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
++ mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
++ end -= HW_SYNC_SLEEP_TIME_INTERVAL;
+ }
+
+ bt_cmd_val[0] = (__le32)bt_ba;
diff --git a/patches.drivers/RDMA-hns-Remove-set-but-not-used-variable-rst.patch b/patches.drivers/RDMA-hns-Remove-set-but-not-used-variable-rst.patch
new file mode 100644
index 0000000000..02c32d2774
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Remove-set-but-not-used-variable-rst.patch
@@ -0,0 +1,41 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Thu, 31 Jan 2019 15:19:21 +0000
+Subject: RDMA/hns: Remove set but not used variable 'rst'
+Patch-mainline: v5.1-rc1
+Git-commit: da91ddfdc7212e6e716be55a5cf2305ce84a422f
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+Fixes gcc '-Wunused-but-set-variable' warning:
+
+drivers/infiniband/hw/hns/hns_roce_hw_v2.c: In function 'hns_roce_v2_qp_flow_control_init':
+drivers/infiniband/hw/hns/hns_roce_hw_v2.c:4384:33: warning:
+ variable 'rst' set but not used [-Wunused-but-set-variable]
+
+It never used since introduction.
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -4381,7 +4381,7 @@ static int hns_roce_v2_destroy_qp(struct
+ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+ {
+- struct hns_roce_sccc_clr_done *rst, *resp;
++ struct hns_roce_sccc_clr_done *resp;
+ struct hns_roce_sccc_clr *clr;
+ struct hns_roce_cmq_desc desc;
+ int ret, i;
+@@ -4390,7 +4390,6 @@ static int hns_roce_v2_qp_flow_control_i
+
+ /* set scc ctx clear done flag */
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
+- rst = (struct hns_roce_sccc_clr_done *)desc.data;
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret) {
+ dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret);
diff --git a/patches.drivers/RDMA-hns-Set-allocated-memory-to-zero-for-wrid.patch b/patches.drivers/RDMA-hns-Set-allocated-memory-to-zero-for-wrid.patch
new file mode 100644
index 0000000000..5141ca4e8d
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Set-allocated-memory-to-zero-for-wrid.patch
@@ -0,0 +1,33 @@
+From: Yixian Liu <liuyixian@huawei.com>
+Date: Sun, 3 Feb 2019 16:13:05 +0800
+Subject: RDMA/hns: Set allocated memory to zero for wrid
+Patch-mainline: v5.1-rc1
+Git-commit: f7f27a5f03cc9f47cc14f75a5be25f0f26b1b5ff
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+The memory allocated for wrid should be initialized to zero.
+
+Signed-off-by: Yixian Liu <liuyixian@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -764,10 +764,10 @@ static int hns_roce_create_qp_common(str
+ goto err_mtt;
+ }
+
+- hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64),
+- GFP_KERNEL);
+- hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64),
+- GFP_KERNEL);
++ hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64),
++ GFP_KERNEL);
++ hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64),
++ GFP_KERNEL);
+ if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
+ ret = -ENOMEM;
+ goto err_wrid;
diff --git a/patches.drivers/RDMA-hns-Support-to-create-1M-srq-queue.patch b/patches.drivers/RDMA-hns-Support-to-create-1M-srq-queue.patch
new file mode 100644
index 0000000000..406090e514
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Support-to-create-1M-srq-queue.patch
@@ -0,0 +1,29 @@
+From: chenglang <chenglang@huawei.com>
+Date: Sun, 7 Apr 2019 13:23:37 +0800
+Subject: RDMA/hns: Support to create 1M srq queue
+Patch-mainline: v5.2-rc1
+Git-commit: 2b277dae0679c8177f161278dbad035688838d6e
+References: bsc#1104427 FATE#326416
+
+In mhop 0 mode, 64*bt_num queues can be supported.
+In mhop 1 mode, 32K*bt_num queues can be supported.
+Config srqc_hop_num to 1 to support 1M SRQ queues.
+
+Signed-off-by: chenglang <chenglang@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1559,7 +1559,7 @@ static int hns_roce_v2_profile(struct hn
+ caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
+ caps->srqc_ba_pg_sz = 0;
+ caps->srqc_buf_pg_sz = 0;
+- caps->srqc_hop_num = HNS_ROCE_HOP_NUM_0;
++ caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
+ caps->cqc_ba_pg_sz = 0;
+ caps->cqc_buf_pg_sz = 0;
+ caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
diff --git a/patches.drivers/RDMA-hns-Update-CQE-specifications.patch b/patches.drivers/RDMA-hns-Update-CQE-specifications.patch
new file mode 100644
index 0000000000..8ce6152688
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Update-CQE-specifications.patch
@@ -0,0 +1,29 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Fri, 24 May 2019 15:31:21 +0800
+Subject: RDMA/hns: Update CQE specifications
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
+Git-commit: 0502849d0bb133b492eed24fd270441e652c84cc
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+According to hip08 UM, the maximum number of CQEs supported by each CQ is
+4M.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -54,7 +54,7 @@
+ #define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
+ #define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
+ #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
+-#define HNS_ROCE_V2_MAX_CQE_NUM 0x10000
++#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
+ #define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000
+ #define HNS_ROCE_V2_MAX_RQ_SGE_NUM 0x100
+ #define HNS_ROCE_V2_MAX_SQ_SGE_NUM 0xff
diff --git a/patches.drivers/RDMA-hns-Update-the-range-of-raq_psn-field-of-qp-con.patch b/patches.drivers/RDMA-hns-Update-the-range-of-raq_psn-field-of-qp-con.patch
new file mode 100644
index 0000000000..cfa8610d7d
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Update-the-range-of-raq_psn-field-of-qp-con.patch
@@ -0,0 +1,29 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:23 +0800
+Subject: RDMA/hns: Update the range of raq_psn field of qp context
+Patch-mainline: v5.2-rc1
+Git-commit: 834fa8cf6f7002706b02873fc0d16f9b06ef4819
+References: bsc#1104427 FATE#326416
+
+According to hip08 UM(User Manual), the raq_psn field size is [23:0].
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -719,8 +719,8 @@ struct hns_roce_v2_qp_context {
+ #define V2_QPC_BYTE_148_RAQ_SYNDROME_S 24
+ #define V2_QPC_BYTE_148_RAQ_SYNDROME_M GENMASK(31, 24)
+
+-#define V2_QPC_BYTE_152_RAQ_PSN_S 8
+-#define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(31, 8)
++#define V2_QPC_BYTE_152_RAQ_PSN_S 0
++#define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(23, 0)
+
+ #define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S 24
+ #define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M GENMASK(31, 24)
diff --git a/patches.drivers/RDMA-i40iw-Handle-workqueue-allocation-failure.patch b/patches.drivers/RDMA-i40iw-Handle-workqueue-allocation-failure.patch
new file mode 100644
index 0000000000..39e88293ae
--- /dev/null
+++ b/patches.drivers/RDMA-i40iw-Handle-workqueue-allocation-failure.patch
@@ -0,0 +1,90 @@
+From: Kangjie Lu <kjlu@umn.edu>
+Date: Fri, 15 Mar 2019 01:57:14 -0500
+Subject: RDMA/i40iw: Handle workqueue allocation failure
+Patch-mainline: v5.2-rc1
+Git-commit: e2a438bd7116889af36304903b92e56d0f347228
+References: jsc#SLE-4793
+
+alloc_ordered_workqueue may fail and return NULL. The fix captures the
+failure and handles it properly to avoid potential NULL pointer
+dereferences.
+
+Signed-off-by: Kangjie Lu <kjlu@umn.edu>
+Reviewed-by: Shiraz, Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/i40iw/i40iw.h | 2 +-
+ drivers/infiniband/hw/i40iw/i40iw_cm.c | 18 +++++++++++++++---
+ drivers/infiniband/hw/i40iw/i40iw_main.c | 5 ++++-
+ 3 files changed, 20 insertions(+), 5 deletions(-)
+
+--- a/drivers/infiniband/hw/i40iw/i40iw.h
++++ b/drivers/infiniband/hw/i40iw/i40iw.h
+@@ -552,7 +552,7 @@ enum i40iw_status_code i40iw_obj_aligned
+
+ void i40iw_request_reset(struct i40iw_device *iwdev);
+ void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev);
+-void i40iw_setup_cm_core(struct i40iw_device *iwdev);
++int i40iw_setup_cm_core(struct i40iw_device *iwdev);
+ void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core);
+ void i40iw_process_ceq(struct i40iw_device *, struct i40iw_ceq *iwceq);
+ void i40iw_process_aeq(struct i40iw_device *);
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -3237,7 +3237,7 @@ void i40iw_receive_ilq(struct i40iw_sc_v
+ * core
+ * @iwdev: iwarp device structure
+ */
+-void i40iw_setup_cm_core(struct i40iw_device *iwdev)
++int i40iw_setup_cm_core(struct i40iw_device *iwdev)
+ {
+ struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+
+@@ -3257,9 +3257,19 @@ void i40iw_setup_cm_core(struct i40iw_de
+
+ cm_core->event_wq = alloc_ordered_workqueue("iwewq",
+ WQ_MEM_RECLAIM);
++ if (!cm_core->event_wq)
++ goto error;
+
+ cm_core->disconn_wq = alloc_ordered_workqueue("iwdwq",
+ WQ_MEM_RECLAIM);
++ if (!cm_core->disconn_wq)
++ goto error;
++
++ return 0;
++error:
++ i40iw_cleanup_cm_core(&iwdev->cm_core);
++
++ return -ENOMEM;
+ }
+
+ /**
+@@ -3279,8 +3289,10 @@ void i40iw_cleanup_cm_core(struct i40iw_
+ del_timer_sync(&cm_core->tcp_timer);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+- destroy_workqueue(cm_core->event_wq);
+- destroy_workqueue(cm_core->disconn_wq);
++ if (cm_core->event_wq)
++ destroy_workqueue(cm_core->event_wq);
++ if (cm_core->disconn_wq)
++ destroy_workqueue(cm_core->disconn_wq);
+ }
+
+ /**
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -1641,7 +1641,10 @@ static int i40iw_open(struct i40e_info *
+ iwdev = &hdl->device;
+ iwdev->hdl = hdl;
+ dev = &iwdev->sc_dev;
+- i40iw_setup_cm_core(iwdev);
++ if (i40iw_setup_cm_core(iwdev)) {
++ kfree(iwdev->hdl);
++ return -ENOMEM;
++ }
+
+ dev->back_dev = (void *)iwdev;
+ iwdev->ldev = &hdl->ldev;
diff --git a/patches.drivers/RDMA-iw_cxgb4-Always-disconnect-when-QP-is-transitio.patch b/patches.drivers/RDMA-iw_cxgb4-Always-disconnect-when-QP-is-transitio.patch
new file mode 100644
index 0000000000..d50ffb990d
--- /dev/null
+++ b/patches.drivers/RDMA-iw_cxgb4-Always-disconnect-when-QP-is-transitio.patch
@@ -0,0 +1,39 @@
+From: Potnuri Bharat Teja <bharat@chelsio.com>
+Date: Tue, 2 Apr 2019 14:46:11 +0530
+Subject: RDMA/iw_cxgb4: Always disconnect when QP is transitioning to
+ TERMINATE state
+Patch-mainline: v5.2-rc1
+Git-commit: d2c33370ae73105c7c7df8f7048d20653991b4cb
+References: bsc#1136348 jsc#SLE-4684
+
+On receiving a TERM from tje peer, Host moves the QP to TERMINATE state
+and then moves the adapter out of RDMA mode. After issuing a TERM, peer
+issues a CLOSE and at this point of time if the connectivity between peer
+and host is lost for a significant amount of time, the QP remains in
+TERMINATE state.
+
+Therefore c4iw_modify_qp() needs to initiate a close on entering terminate
+state.
+
+Signed-off-by: Potnuri Bharat Teja <bharat@chelsio.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/cxgb4/qp.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -1975,10 +1975,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp,
+ qhp->attr.layer_etype = attrs->layer_etype;
+ qhp->attr.ecode = attrs->ecode;
+ ep = qhp->ep;
++ c4iw_get_ep(&ep->com);
++ disconnect = 1;
+ if (!internal) {
+- c4iw_get_ep(&qhp->ep->com);
+ terminate = 1;
+- disconnect = 1;
+ } else {
+ terminate = qhp->attr.send_term;
+ ret = rdma_fini(rhp, qhp, ep);
diff --git a/patches.drivers/RDMA-iwcm-add-tos_set-bool-to-iw_cm-struct.patch b/patches.drivers/RDMA-iwcm-add-tos_set-bool-to-iw_cm-struct.patch
new file mode 100644
index 0000000000..5d0eeb8fa4
--- /dev/null
+++ b/patches.drivers/RDMA-iwcm-add-tos_set-bool-to-iw_cm-struct.patch
@@ -0,0 +1,47 @@
+From: Steve Wise <swise@opengridcomputing.com>
+Date: Fri, 1 Feb 2019 12:44:32 -0800
+Subject: RDMA/iwcm: add tos_set bool to iw_cm struct
+Patch-mainline: v5.1-rc1
+Git-commit: 926ba19b3574f6a80823a42484877ed65e91da9c
+References: bsc#1136348 jsc#SLE-4684
+
+This allows drivers to know the tos was actively set by the application.
+
+Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/core/cma.c | 2 ++
+ include/rdma/iw_cm.h | 3 ++-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -2407,6 +2407,7 @@ static int cma_iw_listen(struct rdma_id_
+ return PTR_ERR(id);
+
+ id->tos = id_priv->tos;
++ id->tos_set = id_priv->tos_set;
+ id_priv->cm_id.iw = id;
+
+ memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
+@@ -3795,6 +3796,7 @@ static int cma_connect_iw(struct rdma_id
+ return PTR_ERR(cm_id);
+
+ cm_id->tos = id_priv->tos;
++ cm_id->tos_set = id_priv->tos_set;
+ id_priv->cm_id.iw = cm_id;
+
+ memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
+--- a/include/rdma/iw_cm.h
++++ b/include/rdma/iw_cm.h
+@@ -94,7 +94,8 @@ struct iw_cm_id {
+ void (*add_ref)(struct iw_cm_id *);
+ void (*rem_ref)(struct iw_cm_id *);
+ u8 tos;
+- bool mapped;
++ bool tos_set:1;
++ bool mapped:1;
+ };
+
+ struct iw_cm_conn_param {
diff --git a/patches.drivers/RDMA-qedr-Fix-incorrect-device-rate.patch b/patches.drivers/RDMA-qedr-Fix-incorrect-device-rate.patch
new file mode 100644
index 0000000000..fa9bb4ed91
--- /dev/null
+++ b/patches.drivers/RDMA-qedr-Fix-incorrect-device-rate.patch
@@ -0,0 +1,89 @@
+From: Sagiv Ozeri <sagiv.ozeri@marvell.com>
+Date: Mon, 20 May 2019 12:33:20 +0300
+Subject: RDMA/qedr: Fix incorrect device rate.
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
+Git-commit: 69054666df0a9b4e8331319f98b6b9a88bc3fcc4
+References: bsc#1136188
+
+Use the correct enum value introduced in commit 12113a35ada6 ("IB/core:
+Add HDR speed enum") Prior to this change a 50Gbps port would show 40Gbps.
+
+This patch also cleaned up the redundant redefiniton of ib speeds for
+qedr.
+
+Fixes: 12113a35ada6 ("IB/core: Add HDR speed enum")
+Signed-off-by: Sagiv Ozeri <sagiv.ozeri@marvell.com>
+Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/qedr/verbs.c | 25 +++++++++----------------
+ 1 file changed, 9 insertions(+), 16 deletions(-)
+
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -158,54 +158,47 @@ int qedr_query_device(struct ib_device *
+ return 0;
+ }
+
+-#define QEDR_SPEED_SDR (1)
+-#define QEDR_SPEED_DDR (2)
+-#define QEDR_SPEED_QDR (4)
+-#define QEDR_SPEED_FDR10 (8)
+-#define QEDR_SPEED_FDR (16)
+-#define QEDR_SPEED_EDR (32)
+-
+ static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
+ u8 *ib_width)
+ {
+ switch (speed) {
+ case 1000:
+- *ib_speed = QEDR_SPEED_SDR;
++ *ib_speed = IB_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+ case 10000:
+- *ib_speed = QEDR_SPEED_QDR;
++ *ib_speed = IB_SPEED_QDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 20000:
+- *ib_speed = QEDR_SPEED_DDR;
++ *ib_speed = IB_SPEED_DDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 25000:
+- *ib_speed = QEDR_SPEED_EDR;
++ *ib_speed = IB_SPEED_EDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 40000:
+- *ib_speed = QEDR_SPEED_QDR;
++ *ib_speed = IB_SPEED_QDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 50000:
+- *ib_speed = QEDR_SPEED_QDR;
+- *ib_width = IB_WIDTH_4X;
++ *ib_speed = IB_SPEED_HDR;
++ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 100000:
+- *ib_speed = QEDR_SPEED_EDR;
++ *ib_speed = IB_SPEED_EDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ default:
+ /* Unsupported */
+- *ib_speed = QEDR_SPEED_SDR;
++ *ib_speed = IB_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ }
+ }
diff --git a/patches.drivers/RDMA-qedr-Fix-out-of-bounds-index-check-in-query-pke.patch b/patches.drivers/RDMA-qedr-Fix-out-of-bounds-index-check-in-query-pke.patch
new file mode 100644
index 0000000000..295ca5ef83
--- /dev/null
+++ b/patches.drivers/RDMA-qedr-Fix-out-of-bounds-index-check-in-query-pke.patch
@@ -0,0 +1,30 @@
+From: Gal Pressman <galpress@amazon.com>
+Date: Mon, 7 Jan 2019 17:27:56 +0200
+Subject: RDMA/qedr: Fix out of bounds index check in query pkey
+Patch-mainline: v5.1-rc1
+Git-commit: dbe30dae487e1a232158c24b432d45281c2805b7
+References: bsc#1136456 jsc#SLE-4689
+
+The pkey table size is QEDR_ROCE_PKEY_TABLE_LEN, index should be tested
+for >= QEDR_ROCE_PKEY_TABLE_LEN instead of > QEDR_ROCE_PKEY_TABLE_LEN.
+
+Fixes: a7efd7773e31 ("qedr: Add support for PD,PKEY and CQ verbs")
+Signed-off-by: Gal Pressman <galpress@amazon.com>
+Acked-by: Michal Kalderon <michal.kalderon@marvell.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/qedr/verbs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -67,7 +67,7 @@ static inline int qedr_ib_copy_to_udata(
+
+ int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+ {
+- if (index > QEDR_ROCE_PKEY_TABLE_LEN)
++ if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
+ return -EINVAL;
+
+ *pkey = QEDR_ROCE_PKEY_DEFAULT;
diff --git a/patches.drivers/RDMA-rdmavt-Use-correct-sizing-on-buffers-holding-pa.patch b/patches.drivers/RDMA-rdmavt-Use-correct-sizing-on-buffers-holding-pa.patch
new file mode 100644
index 0000000000..f74a3d145d
--- /dev/null
+++ b/patches.drivers/RDMA-rdmavt-Use-correct-sizing-on-buffers-holding-pa.patch
@@ -0,0 +1,36 @@
+From: Shiraz Saleem <shiraz.saleem@intel.com>
+Date: Thu, 28 Mar 2019 11:49:47 -0500
+Subject: RDMA/rdmavt: Use correct sizing on buffers holding page DMA addresses
+Patch-mainline: v5.2-rc1
+Git-commit: 629e6f9db6bf4c5702212dd77da534b838f14859
+References: jsc#SLE-4925
+
+The buffer that holds the page DMA addresses is sized off umem->nmap.
+This can potentially cause out of bound accesses on the PBL array when
+iterating the umem DMA-mapped SGL. This is because if umem pages are
+combined, umem->nmap can be much lower than the number of system pages
+in umem.
+
+Use ib_umem_num_pages() to size this buffer.
+
+Cc: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Cc: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Cc: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/sw/rdmavt/mr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/sw/rdmavt/mr.c
++++ b/drivers/infiniband/sw/rdmavt/mr.c
+@@ -393,7 +393,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_
+ if (IS_ERR(umem))
+ return (void *)umem;
+
+- n = umem->nmap;
++ n = ib_umem_num_pages(umem);
+
+ mr = __rvt_alloc_mr(n, pd);
+ if (IS_ERR(mr)) {
diff --git a/patches.drivers/Revert-net-phy-marvell-avoid-pause-mode-on-SGMII-to-.patch b/patches.drivers/Revert-net-phy-marvell-avoid-pause-mode-on-SGMII-to-.patch
index 360417b2c6..514839040c 100644
--- a/patches.drivers/Revert-net-phy-marvell-avoid-pause-mode-on-SGMII-to-.patch
+++ b/patches.drivers/Revert-net-phy-marvell-avoid-pause-mode-on-SGMII-to-.patch
@@ -22,7 +22,7 @@ Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
-@@ -826,8 +826,6 @@ static int m88e1510_config_init(struct p
+@@ -842,8 +842,6 @@ static int m88e1510_config_init(struct p
/* SGMII-to-Copper mode initialization */
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
@@ -31,7 +31,7 @@ Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
/* Select page 18 */
err = marvell_set_page(phydev, 18);
if (err < 0)
-@@ -850,16 +848,6 @@ static int m88e1510_config_init(struct p
+@@ -866,16 +864,6 @@ static int m88e1510_config_init(struct p
err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
@@ -47,4 +47,4 @@ Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
- phydev->advertising &= ~pause;
}
- return m88e1121_config_init(phydev);
+ return m88e1318_config_init(phydev);
diff --git a/patches.drivers/arm64-fix-ACPI-dependencies.patch b/patches.drivers/arm64-fix-ACPI-dependencies.patch
index a80fe9f45f..5e5b11a20c 100644
--- a/patches.drivers/arm64-fix-ACPI-dependencies.patch
+++ b/patches.drivers/arm64-fix-ACPI-dependencies.patch
@@ -3,7 +3,7 @@ Date: Tue, 24 Jul 2018 11:48:45 +0200
Subject: arm64: fix ACPI dependencies
Git-commit: 2c870e61132c082a03769d2ac0a2849ba33c10e3
Patch-mainline: v4.19-rc1
-References: bsc#1117158
+References: bsc#1117158 bsc#1134671
Kconfig reports a warning on x86 builds after the ARM64 dependency
was added.
diff --git a/patches.drivers/arm64-mm-efi-Account-for-GICv3-LPI-tables-in-static-.patch b/patches.drivers/arm64-mm-efi-Account-for-GICv3-LPI-tables-in-static-.patch
index cca10d7d4a..2c0e1262ea 100644
--- a/patches.drivers/arm64-mm-efi-Account-for-GICv3-LPI-tables-in-static-.patch
+++ b/patches.drivers/arm64-mm-efi-Account-for-GICv3-LPI-tables-in-static-.patch
@@ -4,7 +4,7 @@ Subject: arm64, mm, efi: Account for GICv3 LPI tables in static memblock
reserve table
Git-commit: 8a5b403d71affa098009cc3dff1b2c45113021ad
Patch-mainline: v5.0-rc7
-References: bsc#1117158
+References: bsc#1117158 bsc#1134671
In the irqchip and EFI code, we have what basically amounts to a quirk
to work around a peculiarity in the GICv3 architecture, which permits
diff --git a/patches.drivers/bitmap-Add-bitmap_alloc-bitmap_zalloc-and-bitmap_fre.patch b/patches.drivers/bitmap-Add-bitmap_alloc-bitmap_zalloc-and-bitmap_fre.patch
new file mode 100644
index 0000000000..f0355c1bc9
--- /dev/null
+++ b/patches.drivers/bitmap-Add-bitmap_alloc-bitmap_zalloc-and-bitmap_fre.patch
@@ -0,0 +1,78 @@
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Wed, 1 Aug 2018 15:42:56 -0700
+Subject: bitmap: Add bitmap_alloc(), bitmap_zalloc() and bitmap_free()
+Patch-mainline: v4.19-rc1
+Git-commit: c42b65e363ce97a828f81b59033c3558f8fa7f70
+References: jsc#SLE-4797
+
+A lot of code become ugly because of open coding allocations for bitmaps.
+
+Introduce three helpers to allow users be more clear of intention
+and keep their code neat.
+
+Note, due to multiple circular dependencies we may not provide
+the helpers as inliners. For now we keep them exported and, perhaps,
+at some point in the future we will sort out header inclusion and
+inheritance.
+
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/linux/bitmap.h | 8 ++++++++
+ lib/bitmap.c | 20 ++++++++++++++++++++
+ 2 files changed, 28 insertions(+)
+
+--- a/include/linux/bitmap.h
++++ b/include/linux/bitmap.h
+@@ -99,6 +99,14 @@
+ */
+
+ /*
++ * Allocation and deallocation of bitmap.
++ * Provided in lib/bitmap.c to avoid circular dependency.
++ */
++extern unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags);
++extern unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags);
++extern void bitmap_free(const unsigned long *bitmap);
++
++/*
+ * lib/bitmap.c provides these functions:
+ */
+
+--- a/lib/bitmap.c
++++ b/lib/bitmap.c
+@@ -13,6 +13,7 @@
+ #include <linux/bitops.h>
+ #include <linux/bug.h>
+ #include <linux/kernel.h>
++#include <linux/slab.h>
+ #include <linux/string.h>
+ #include <linux/uaccess.h>
+
+@@ -1134,6 +1135,25 @@ void bitmap_copy_le(unsigned long *dst,
+ EXPORT_SYMBOL(bitmap_copy_le);
+ #endif
+
++unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags)
++{
++ return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long),
++ flags);
++}
++EXPORT_SYMBOL(bitmap_alloc);
++
++unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags)
++{
++ return bitmap_alloc(nbits, flags | __GFP_ZERO);
++}
++EXPORT_SYMBOL(bitmap_zalloc);
++
++void bitmap_free(const unsigned long *bitmap)
++{
++ kfree(bitmap);
++}
++EXPORT_SYMBOL(bitmap_free);
++
+ #if BITS_PER_LONG == 64
+ /**
+ * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap
diff --git a/patches.drivers/bnx2x-Add-support-for-detection-of-P2P-event-packets.patch b/patches.drivers/bnx2x-Add-support-for-detection-of-P2P-event-packets.patch
new file mode 100644
index 0000000000..e02a6fd3ef
--- /dev/null
+++ b/patches.drivers/bnx2x-Add-support-for-detection-of-P2P-event-packets.patch
@@ -0,0 +1,60 @@
+From: Sudarsana Reddy Kalluru <skalluru@marvell.com>
+Date: Tue, 16 Apr 2019 01:46:13 -0700
+Subject: bnx2x: Add support for detection of P2P event packets.
+Patch-mainline: v5.2-rc1
+Git-commit: 00165c25fa3e5814f399f9a4fdd998066a06330c
+References: bsc#1136498 jsc#SLE-4699
+
+The patch adds support for detecting the P2P (peer-to-peer) event packets.
+This is required for timestamping the PTP packets in peer delay mode.
+Unmask the below bits (set to 0) for device to detect the p2p packets.
+ NIG_REG_P0/1_LLH_PTP_PARAM_MASK
+ NIG_REG_P0/1_TLLH_PTP_PARAM_MASK
+ bit 1 - IPv4 DA 1 of 224.0.0.107.
+ bit 3 - IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B.
+ bit 9 - MAC DA 1 of 0x01-80-C2-00-00-0E.
+ NIG_REG_P0/1_LLH_PTP_RULE_MASK
+ NIG_REG_P0/1_TLLH_PTP_RULE_MASK
+ bit 2 - {IPv4 DA 1; UDP DP 0}
+ bit 6 - MAC Ethertype 0 of 0x88F7.
+ bit 9 - MAC DA 1 of 0x01-80-C2-00-00-0E.
+
+Signed-off-by: Sudarsana Reddy Kalluru <skalluru@marvell.com>
+Signed-off-by: Ariel Elior <aelior@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 22 ++++++++++++----------
+ 1 file changed, 12 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -15384,16 +15384,18 @@ static int bnx2x_enable_ptp_packets(stru
+ return 0;
+ }
+
+-#define BNX2X_PTP_TX_ON_PARAM_MASK 0x6AA
+-#define BNX2X_PTP_TX_ON_RULE_MASK 0x3EEE
+-#define BNX2X_PTP_V1_L4_PARAM_MASK 0x7EE
+-#define BNX2X_PTP_V1_L4_RULE_MASK 0x3FFE
+-#define BNX2X_PTP_V2_L4_PARAM_MASK 0x7EA
+-#define BNX2X_PTP_V2_L4_RULE_MASK 0x3FEE
+-#define BNX2X_PTP_V2_L2_PARAM_MASK 0x6BF
+-#define BNX2X_PTP_V2_L2_RULE_MASK 0x3EFF
+-#define BNX2X_PTP_V2_PARAM_MASK 0x6AA
+-#define BNX2X_PTP_V2_RULE_MASK 0x3EEE
++#define BNX2X_P2P_DETECT_PARAM_MASK 0x5F5
++#define BNX2X_P2P_DETECT_RULE_MASK 0x3DBB
++#define BNX2X_PTP_TX_ON_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
++#define BNX2X_PTP_TX_ON_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
++#define BNX2X_PTP_V1_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EE)
++#define BNX2X_PTP_V1_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FFE)
++#define BNX2X_PTP_V2_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EA)
++#define BNX2X_PTP_V2_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FEE)
++#define BNX2X_PTP_V2_L2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6BF)
++#define BNX2X_PTP_V2_L2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EFF)
++#define BNX2X_PTP_V2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
++#define BNX2X_PTP_V2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
+
+ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
+ {
diff --git a/patches.drivers/bnx2x-Bump-up-driver-version-to-1.713.36.patch b/patches.drivers/bnx2x-Bump-up-driver-version-to-1.713.36.patch
new file mode 100644
index 0000000000..ee6b18b4d5
--- /dev/null
+++ b/patches.drivers/bnx2x-Bump-up-driver-version-to-1.713.36.patch
@@ -0,0 +1,30 @@
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Tue, 22 Jan 2019 03:05:20 -0800
+Subject: bnx2x: Bump up driver version to 1.713.36
+Patch-mainline: v5.1-rc1
+Git-commit: f116465385344294edda66970734a26f5dd59a93
+References: bsc#1136498 jsc#SLE-4699
+
+Recently, there were bunch of fixes to bnx2x driver, the code is now
+aligned to out-of-box driver version 1.713.36. This patch updates
+bnx2x driver version to 1.713.36.
+
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -32,7 +32,7 @@
+ * (you will need to reboot afterwards) */
+ /* #define BNX2X_STOP_ON_ERROR */
+
+-#define DRV_MODULE_VERSION "1.712.30-0"
++#define DRV_MODULE_VERSION "1.713.36-0"
+ #define DRV_MODULE_RELDATE "2014/02/10"
+ #define BNX2X_BC_VER 0x040200
+
diff --git a/patches.drivers/bnx2x-Remove-set-but-not-used-variable-mfw_vn.patch b/patches.drivers/bnx2x-Remove-set-but-not-used-variable-mfw_vn.patch
new file mode 100644
index 0000000000..52c83e7b8e
--- /dev/null
+++ b/patches.drivers/bnx2x-Remove-set-but-not-used-variable-mfw_vn.patch
@@ -0,0 +1,47 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Mon, 18 Feb 2019 12:19:54 +0000
+Subject: bnx2x: Remove set but not used variable 'mfw_vn'
+Patch-mainline: v5.1-rc1
+Git-commit: c9b747dbc2036c917b1067fbb78dc38b105c4454
+References: bsc#1136498 jsc#SLE-4699
+
+Fixes gcc '-Wunused-but-set-variable' warning:
+
+drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c: In function 'bnx2x_get_hwinfo':
+drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c:11940:10: warning:
+ variable 'mfw_vn' set but not used [-Wunused-but-set-variable]
+
+It's never used since introduction.
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Acked-by: Sudarsana Reddy Kalluru <skalluru@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -11998,7 +11998,7 @@ static void validate_set_si_mode(struct
+ static int bnx2x_get_hwinfo(struct bnx2x *bp)
+ {
+ int /*abs*/func = BP_ABS_FUNC(bp);
+- int vn, mfw_vn;
++ int vn;
+ u32 val = 0, val2 = 0;
+ int rc = 0;
+
+@@ -12083,12 +12083,10 @@ static int bnx2x_get_hwinfo(struct bnx2x
+ /*
+ * Initialize MF configuration
+ */
+-
+ bp->mf_ov = 0;
+ bp->mf_mode = 0;
+ bp->mf_sub_mode = 0;
+ vn = BP_VN(bp);
+- mfw_vn = BP_FW_MB_IDX(bp);
+
+ if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
+ BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
diff --git a/patches.drivers/bnx2x-Replace-magic-numbers-with-macro-definitions.patch b/patches.drivers/bnx2x-Replace-magic-numbers-with-macro-definitions.patch
new file mode 100644
index 0000000000..a3cb2d5daf
--- /dev/null
+++ b/patches.drivers/bnx2x-Replace-magic-numbers-with-macro-definitions.patch
@@ -0,0 +1,120 @@
+From: Sudarsana Reddy Kalluru <skalluru@marvell.com>
+Date: Tue, 16 Apr 2019 01:46:12 -0700
+Subject: bnx2x: Replace magic numbers with macro definitions.
+Patch-mainline: v5.2-rc1
+Git-commit: b320532c9990e6d8360fcc6831c33da46289e27d
+References: bsc#1136498 jsc#SLE-4699
+
+This patch performs code cleanup by defining macros for the ptp-timestamp
+filters.
+
+Signed-off-by: Sudarsana Reddy Kalluru <skalluru@marvell.com>
+Signed-off-by: Ariel Elior <aelior@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 50 +++++++++++++----------
+ 1 file changed, 30 insertions(+), 20 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -15384,27 +15384,45 @@ static int bnx2x_enable_ptp_packets(stru
+ return 0;
+ }
+
++#define BNX2X_PTP_TX_ON_PARAM_MASK 0x6AA
++#define BNX2X_PTP_TX_ON_RULE_MASK 0x3EEE
++#define BNX2X_PTP_V1_L4_PARAM_MASK 0x7EE
++#define BNX2X_PTP_V1_L4_RULE_MASK 0x3FFE
++#define BNX2X_PTP_V2_L4_PARAM_MASK 0x7EA
++#define BNX2X_PTP_V2_L4_RULE_MASK 0x3FEE
++#define BNX2X_PTP_V2_L2_PARAM_MASK 0x6BF
++#define BNX2X_PTP_V2_L2_RULE_MASK 0x3EFF
++#define BNX2X_PTP_V2_PARAM_MASK 0x6AA
++#define BNX2X_PTP_V2_RULE_MASK 0x3EEE
++
+ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
+ {
+ int port = BP_PORT(bp);
++ u32 param, rule;
+ int rc;
+
+ if (!bp->hwtstamp_ioctl_called)
+ return 0;
+
++ param = port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
++ NIG_REG_P0_TLLH_PTP_PARAM_MASK;
++ rule = port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
++ NIG_REG_P0_TLLH_PTP_RULE_MASK;
+ switch (bp->tx_type) {
+ case HWTSTAMP_TX_ON:
+ bp->flags |= TX_TIMESTAMPING_EN;
+- REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+- NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
+- REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+- NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
++ REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK);
++ REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK);
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ BNX2X_ERR("One-step timestamping is not supported\n");
+ return -ERANGE;
+ }
+
++ param = port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
++ NIG_REG_P0_LLH_PTP_PARAM_MASK;
++ rule = port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
++ NIG_REG_P0_LLH_PTP_RULE_MASK;
+ switch (bp->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+@@ -15418,30 +15436,24 @@ int bnx2x_configure_ptp_filters(struct b
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ /* Initialize PTP detection for UDP/IPv4 events */
+- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
+- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
++ REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK);
++ REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
+- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
+- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
++ REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK);
++ REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ /* Initialize PTP detection L2 events */
+- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
+- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
++ REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK);
++ REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK);
+
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+@@ -15449,10 +15461,8 @@ int bnx2x_configure_ptp_filters(struct b
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
+- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
+- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
++ REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK);
++ REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK);
+ break;
+ }
+
diff --git a/patches.drivers/bnx2x-Use-struct_size-in-kzalloc.patch b/patches.drivers/bnx2x-Use-struct_size-in-kzalloc.patch
new file mode 100644
index 0000000000..8ecff6d1c8
--- /dev/null
+++ b/patches.drivers/bnx2x-Use-struct_size-in-kzalloc.patch
@@ -0,0 +1,54 @@
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Thu, 7 Feb 2019 21:29:10 -0600
+Subject: bnx2x: Use struct_size() in kzalloc()
+Patch-mainline: v5.1-rc1
+Git-commit: 370600afdd2e33665c84d06f34e7c223d5379b4a
+References: bsc#1136498 jsc#SLE-4699
+
+One of the more common cases of allocation size calculations is finding
+the size of a structure that has a zero-sized array at the end, along
+with memory for some number of elements for that array. For example:
+
+struct foo {
+ int stuff;
+ struct boo entry[];
+};
+
+size = sizeof(struct foo) + count * sizeof(struct boo);
+instance = kzalloc(size, GFP_KERNEL)
+
+Instead of leaving these open-coded and prone to type mistakes, we can
+now use the new struct_size() helper:
+
+instance = kzalloc(struct_size(instance, entry, count), GFP_KERNEL)
+
+Notice that, in this case, variable fsz is not necessary, hence
+it is removed.
+
+This code was detected with the help of Coccinelle.
+
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+@@ -1654,13 +1654,9 @@ static int bnx2x_vf_mbx_macvlan_list(str
+ {
+ int i, j;
+ struct bnx2x_vf_mac_vlan_filters *fl = NULL;
+- size_t fsz;
+
+- fsz = tlv->n_mac_vlan_filters *
+- sizeof(struct bnx2x_vf_mac_vlan_filter) +
+- sizeof(struct bnx2x_vf_mac_vlan_filters);
+-
+- fl = kzalloc(fsz, GFP_KERNEL);
++ fl = kzalloc(struct_size(fl, filters, tlv->n_mac_vlan_filters),
++ GFP_KERNEL);
+ if (!fl)
+ return -ENOMEM;
+
diff --git a/patches.drivers/bnx2x-Utilize-FW-7.13.11.0.patch b/patches.drivers/bnx2x-Utilize-FW-7.13.11.0.patch
new file mode 100644
index 0000000000..d356804a68
--- /dev/null
+++ b/patches.drivers/bnx2x-Utilize-FW-7.13.11.0.patch
@@ -0,0 +1,93 @@
+From: Sudarsana Reddy Kalluru <skalluru@marvell.com>
+Date: Wed, 27 Mar 2019 04:40:43 -0700
+Subject: bnx2x: Utilize FW 7.13.11.0.
+Patch-mainline: v5.2-rc1
+Git-commit: 32705592f944f0f7a3ec58ffd562d828b24f659a
+References: bsc#1136498 jsc#SLE-4699
+
+Commit 8fcf0ec44c11f "bnx2x: Add FW 7.13.11.0" added said .bin FW to
+linux-firmware; This patch incorporates the FW in the bnx2x driver.
+This introduces few FW fixes and the support for Tx VLAN filtering.
+
+Please consider applying it to 'net-next' tree.
+
+Signed-off-by: Sudarsana Reddy Kalluru <skalluru@marvell.com>
+Signed-off-by: Ariel Elior <aelior@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h | 21 ++++++++++++++-------
+ 1 file changed, 14 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+@@ -3024,7 +3024,7 @@ struct afex_stats {
+
+ #define BCM_5710_FW_MAJOR_VERSION 7
+ #define BCM_5710_FW_MINOR_VERSION 13
+-#define BCM_5710_FW_REVISION_VERSION 1
++#define BCM_5710_FW_REVISION_VERSION 11
+ #define BCM_5710_FW_ENGINEERING_VERSION 0
+ #define BCM_5710_FW_COMPILE_FLAGS 1
+
+@@ -3639,8 +3639,10 @@ struct client_init_rx_data {
+ #define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1
+ #define CLIENT_INIT_RX_DATA_TPA_MODE (0x1<<2)
+ #define CLIENT_INIT_RX_DATA_TPA_MODE_SHIFT 2
+-#define CLIENT_INIT_RX_DATA_RESERVED5 (0x1F<<3)
+-#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 3
++#define CLIENT_INIT_RX_DATA_TPA_OVER_VLAN_DISABLE (0x1<<3)
++#define CLIENT_INIT_RX_DATA_TPA_OVER_VLAN_DISABLE_SHIFT 3
++#define CLIENT_INIT_RX_DATA_RESERVED5 (0xF<<4)
++#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 4
+ u8 vmqueue_mode_en_flg;
+ u8 extra_data_over_sgl_en_flg;
+ u8 cache_line_alignment_log_size;
+@@ -3831,7 +3833,7 @@ struct eth_classify_cmd_header {
+ */
+ struct eth_classify_header {
+ u8 rule_cnt;
+- u8 reserved0;
++ u8 warning_on_error;
+ __le16 reserved1;
+ __le32 echo;
+ };
+@@ -4752,6 +4754,8 @@ struct tpa_update_ramrod_data {
+ __le32 sge_page_base_hi;
+ __le16 sge_pause_thr_low;
+ __le16 sge_pause_thr_high;
++ u8 tpa_over_vlan_disable;
++ u8 reserved[7];
+ };
+
+
+@@ -4946,7 +4950,7 @@ struct fairness_vars_per_port {
+ u32 upper_bound;
+ u32 fair_threshold;
+ u32 fairness_timeout;
+- u32 reserved0;
++ u32 size_thr;
+ };
+
+ /*
+@@ -5415,7 +5419,9 @@ struct function_start_data {
+ u8 sd_vlan_force_pri_val;
+ u8 c2s_pri_tt_valid;
+ u8 c2s_pri_default;
+- u8 reserved2[6];
++ u8 tx_vlan_filtering_enable;
++ u8 tx_vlan_filtering_use_pvid;
++ u8 reserved2[4];
+ struct c2s_pri_trans_table_entry c2s_pri_trans_table;
+ };
+
+@@ -5448,7 +5454,8 @@ struct function_update_data {
+ u8 reserved1;
+ __le16 sd_vlan_tag;
+ __le16 sd_vlan_eth_type;
+- __le16 reserved0;
++ u8 tx_vlan_filtering_pvid_change_flg;
++ u8 reserved0;
+ __le32 reserved2;
+ };
+
diff --git a/patches.drivers/bnx2x-fix-spelling-mistake-dicline-decline.patch b/patches.drivers/bnx2x-fix-spelling-mistake-dicline-decline.patch
new file mode 100644
index 0000000000..9f97379e57
--- /dev/null
+++ b/patches.drivers/bnx2x-fix-spelling-mistake-dicline-decline.patch
@@ -0,0 +1,27 @@
+From: Colin Ian King <colin.king@canonical.com>
+Date: Mon, 15 Apr 2019 16:47:03 +0100
+Subject: bnx2x: fix spelling mistake "dicline" -> "decline"
+Patch-mainline: v5.1-rc6
+Git-commit: 614c70f35cd77a9af8e2ca841dcdb121cec3068f
+References: bsc#1136498 jsc#SLE-4699
+
+There is a spelling mistake in a BNX2X_ERR message, fix it.
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+@@ -957,7 +957,7 @@ int bnx2x_vfpf_update_vlan(struct bnx2x
+ bnx2x_sample_bulletin(bp);
+
+ if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
+- BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
++ BNX2X_ERR("Hypervisor will decline the request, avoiding\n");
+ rc = -EINVAL;
+ goto out;
+ }
diff --git a/patches.drivers/bnx2x-fix-various-indentation-issues.patch b/patches.drivers/bnx2x-fix-various-indentation-issues.patch
new file mode 100644
index 0000000000..617fffc1b3
--- /dev/null
+++ b/patches.drivers/bnx2x-fix-various-indentation-issues.patch
@@ -0,0 +1,324 @@
+From: Colin Ian King <colin.king@canonical.com>
+Date: Mon, 14 Jan 2019 15:15:16 +0000
+Subject: bnx2x: fix various indentation issues
+Patch-mainline: v5.1-rc1
+Git-commit: 9fb0969f75823e59f1af14d587aec279c66bf4a7
+References: bsc#1136498 jsc#SLE-4699
+
+There are lines that have indentation issues, fix these.
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h | 2
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | 178 +++++++++++------------
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 2
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | 12 -
+ 4 files changed, 95 insertions(+), 99 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+@@ -449,7 +449,7 @@ static inline void bnx2x_init_fw_wrr(con
+ ccd[cos] =
+ (u32)input_data->cos_min_rate[cos] * 100 *
+ (T_FAIR_COEF / (8 * 100 * cosWeightSum));
+- if (ccd[cos] < pdata->fair_vars.fair_threshold
++ if (ccd[cos] < pdata->fair_vars.fair_threshold
+ + MIN_ABOVE_THRESH) {
+ ccd[cos] =
+ pdata->fair_vars.fair_threshold +
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+@@ -837,49 +837,45 @@ static int bnx2x_ets_e3b0_set_cos_bw(str
+
+ switch (cos_entry) {
+ case 0:
+- nig_reg_adress_crd_weight =
+- (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
+- pbf_reg_adress_crd_weight = (port) ?
+- PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
+- break;
++ nig_reg_adress_crd_weight =
++ (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
++ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
++ pbf_reg_adress_crd_weight = (port) ?
++ PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
++ break;
+ case 1:
+- nig_reg_adress_crd_weight = (port) ?
+- NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
+- pbf_reg_adress_crd_weight = (port) ?
+- PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
+- break;
++ nig_reg_adress_crd_weight = (port) ?
++ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
++ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
++ pbf_reg_adress_crd_weight = (port) ?
++ PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
++ break;
+ case 2:
+- nig_reg_adress_crd_weight = (port) ?
+- NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
++ nig_reg_adress_crd_weight = (port) ?
++ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
++ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
+
+- pbf_reg_adress_crd_weight = (port) ?
+- PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
+- break;
++ pbf_reg_adress_crd_weight = (port) ?
++ PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
++ break;
+ case 3:
+- if (port)
++ if (port)
+ return -EINVAL;
+- nig_reg_adress_crd_weight =
+- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
+- pbf_reg_adress_crd_weight =
+- PBF_REG_COS3_WEIGHT_P0;
+- break;
++ nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
++ pbf_reg_adress_crd_weight = PBF_REG_COS3_WEIGHT_P0;
++ break;
+ case 4:
+- if (port)
+- return -EINVAL;
+- nig_reg_adress_crd_weight =
+- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
+- pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
+- break;
++ if (port)
++ return -EINVAL;
++ nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
++ pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
++ break;
+ case 5:
+- if (port)
+- return -EINVAL;
+- nig_reg_adress_crd_weight =
+- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
+- pbf_reg_adress_crd_weight = PBF_