Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPetr Tesarik <ptesarik@suse.cz>2019-06-07 09:29:52 +0200
committerPetr Tesarik <ptesarik@suse.cz>2019-06-07 09:29:52 +0200
commit187af105002d4396a677826f774d8f73cd303ebe (patch)
treec2d3991b7156f4bb54a6630a014489429c809712
parentea4c9f7671c3d3efbeeb3ca2e342f2e98a646627 (diff)
parente25c260940d21e846bacd9d18ea8476e220ab627 (diff)
Merge branch 'SLE15-SP1' into SLE12-SP5
- Delete patches.kabi/* Conflicts: kabi/severities patches.kabi/arch-arm64-acpi-KABI-ignore-includes.patch series.conf
-rw-r--r--blacklist.conf19
-rw-r--r--config/arm64/default1
-rw-r--r--kabi/severities7
-rw-r--r--patches.arch/0002-efi-arm-libstub-add-a-root-memreserve-config-table.patch2
-rw-r--r--patches.arch/0003-efi-add-API-to-reserve-memory-persistently-across-ke.patch2
-rw-r--r--patches.arch/ARM-iop-don-t-use-using-64-bit-DMA-masks.patch154
-rw-r--r--patches.arch/ARM-orion-don-t-use-using-64-bit-DMA-masks.patch53
-rw-r--r--patches.arch/arm64-acpi-fix-alignment-fault-in-accessing-ACPI.patch2
-rw-r--r--patches.arch/crypto-vmx-ghash-do-nosimd-fallback-manually.patch312
-rw-r--r--patches.arch/crypto-vmx-return-correct-error-code-on-failed-setke.patch112
-rw-r--r--patches.arch/efi-arm-Don-t-mark-ACPI-reclaim-memory-as-MEMBLOCK_N.patch2
-rw-r--r--patches.arch/x86-cpu-hygon-fix-phys_proc_id-calculation-logic-for-multi-die-processors.patch122
-rw-r--r--patches.drivers/0001-drivers-perf-Add-Cavium-ThunderX2-SoC-UNCORE-PMU-dri.patch928
-rw-r--r--patches.drivers/0001-efi-honour-memory-reservations-passed-via-a-linux-sp.patch (renamed from patches.arch/0001-efi-honour-memory-reservations-passed-via-a-linux-sp.patch)6
-rw-r--r--patches.drivers/0002-Documentation-perf-Add-documentation-for-ThunderX2-P.patch72
-rw-r--r--patches.drivers/ACPI-fix-menuconfig-presentation-of-ACPI-submenu.patch2
-rw-r--r--patches.drivers/ALSA-hda-realtek-Enable-micmute-LED-for-Huawei-lapto.patch66
-rw-r--r--patches.drivers/ALSA-hda-realtek-Improve-the-headset-mic-for-Acer-As.patch72
-rw-r--r--patches.drivers/ASoC-Intel-Add-machine-driver-for-Cherrytrail-CX2072119
-rw-r--r--patches.drivers/ASoC-Intel-add-support-for-CX2072x-machine-driver36
-rw-r--r--patches.drivers/ASoC-add-support-for-Conexant-CX2072X-CODEC72
-rw-r--r--patches.drivers/ASoC-cx2072x-Add-DT-bingings-documentation-for-CX20757
-rw-r--r--patches.drivers/ASoC-intel-Add-headset-jack-support-to-cht-cx2072x111
-rw-r--r--patches.drivers/IB-hfi1-Add-debugfs-to-control-expansion-ROM-write-p.patch118
-rw-r--r--patches.drivers/IB-hfi1-Add-selected-Rcv-counters.patch56
-rw-r--r--patches.drivers/IB-hfi1-Close-VNIC-sdma_progress-sleep-window.patch83
-rw-r--r--patches.drivers/IB-hfi1-Consider-LMC-in-16B-bypass-ingress-packet-ch.patch31
-rw-r--r--patches.drivers/IB-hfi1-Correctly-process-FECN-and-BECN-in-packets.patch459
-rw-r--r--patches.drivers/IB-hfi1-Dump-pio-info-for-non-user-send-contexts.patch142
-rw-r--r--patches.drivers/IB-hfi1-Eliminate-opcode-tests-on-mr-deref.patch47
-rw-r--r--patches.drivers/IB-hfi1-Failed-to-drain-send-queue-when-QP-is-put-in.patch58
-rw-r--r--patches.drivers/IB-hfi1-Fix-WQ_MEM_RECLAIM-warning.patch57
-rw-r--r--patches.drivers/IB-hfi1-Fix-the-allocation-of-RSM-table.patch96
-rw-r--r--patches.drivers/IB-hfi1-Fix-two-format-strings.patch57
-rw-r--r--patches.drivers/IB-hfi1-Ignore-LNI-errors-before-DC8051-transitions-.patch115
-rw-r--r--patches.drivers/IB-hfi1-Incorrect-sizing-of-sge-for-PIO-will-OOPs.patch53
-rw-r--r--patches.drivers/IB-hfi1-Limit-VNIC-use-of-SDMA-engines-to-the-availa.patch43
-rw-r--r--patches.drivers/IB-hfi1-Reduce-lock-contention-on-iowait_lock-for-sd.patch287
-rw-r--r--patches.drivers/IB-hfi1-Remove-WARN_ON-when-freeing-expected-receive.patch40
-rw-r--r--patches.drivers/IB-hfi1-Remove-overly-conservative-VM_EXEC-flag-chec.patch44
-rw-r--r--patches.drivers/IB-hfi1-Unreserve-a-reserved-request-when-it-is-comp.patch41
-rw-r--r--patches.drivers/IB-hw-Remove-unneeded-semicolons.patch102
-rw-r--r--patches.drivers/IB-rdmavt-Add-wc_flags-and-wc_immdata-to-cq-entry-tr.patch59
-rw-r--r--patches.drivers/IB-rdmavt-Fix-frwr-memory-registration.patch80
-rw-r--r--patches.drivers/IB-rdmavt-Fix-loopback-send-with-invalidate-ordering.patch73
-rw-r--r--patches.drivers/IB-rdmavt-hfi1-Miscellaneous-comment-fixes.patch40
-rw-r--r--patches.drivers/RDMA-cxbg-Use-correct-sizing-on-buffers-holding-page.patch46
-rw-r--r--patches.drivers/RDMA-cxgb4-Fix-null-pointer-dereference-on-alloc_skb.patch33
-rw-r--r--patches.drivers/RDMA-cxgb4-Fix-spelling-mistake-immedate-immediate.patch27
-rw-r--r--patches.drivers/RDMA-cxgb4-Remove-kref-accounting-for-sync-operation.patch112
-rw-r--r--patches.drivers/RDMA-i40iw-Handle-workqueue-allocation-failure.patch90
-rw-r--r--patches.drivers/RDMA-iw_cxgb4-Always-disconnect-when-QP-is-transitio.patch39
-rw-r--r--patches.drivers/RDMA-iwcm-add-tos_set-bool-to-iw_cm-struct.patch47
-rw-r--r--patches.drivers/RDMA-qedr-Fix-out-of-bounds-index-check-in-query-pke.patch30
-rw-r--r--patches.drivers/RDMA-rdmavt-Use-correct-sizing-on-buffers-holding-pa.patch36
-rw-r--r--patches.drivers/arm64-fix-ACPI-dependencies.patch2
-rw-r--r--patches.drivers/arm64-mm-efi-Account-for-GICv3-LPI-tables-in-static-.patch2
-rw-r--r--patches.drivers/bitmap-Add-bitmap_alloc-bitmap_zalloc-and-bitmap_fre.patch78
-rw-r--r--patches.drivers/bnx2x-Add-support-for-detection-of-P2P-event-packets.patch60
-rw-r--r--patches.drivers/bnx2x-Bump-up-driver-version-to-1.713.36.patch30
-rw-r--r--patches.drivers/bnx2x-Remove-set-but-not-used-variable-mfw_vn.patch47
-rw-r--r--patches.drivers/bnx2x-Replace-magic-numbers-with-macro-definitions.patch120
-rw-r--r--patches.drivers/bnx2x-Use-struct_size-in-kzalloc.patch54
-rw-r--r--patches.drivers/bnx2x-Utilize-FW-7.13.11.0.patch93
-rw-r--r--patches.drivers/bnx2x-fix-spelling-mistake-dicline-decline.patch27
-rw-r--r--patches.drivers/bnx2x-fix-various-indentation-issues.patch324
-rw-r--r--patches.drivers/bnxt_en-Fix-aggregation-buffer-leak-under-OOM-condit.patch35
-rw-r--r--patches.drivers/bnxt_en-Fix-possible-BUG-condition-when-calling-pci_.patch101
-rw-r--r--patches.drivers/bnxt_en-Fix-possible-crash-in-bnxt_hwrm_ring_free-un.patch65
-rw-r--r--patches.drivers/bnxt_en-Fix-statistics-context-reservation-logic.patch62
-rw-r--r--patches.drivers/bnxt_en-Fix-uninitialized-variable-usage-in-bnxt_rx_.patch53
-rw-r--r--patches.drivers/bnxt_en-Improve-NQ-reservations.patch40
-rw-r--r--patches.drivers/bnxt_en-Improve-multicast-address-setup-logic.patch41
-rw-r--r--patches.drivers/bnxt_en-Pass-correct-extended-TX-port-statistics-siz.patch51
-rw-r--r--patches.drivers/bnxt_en-Reduce-memory-usage-when-running-in-kdump-ke.patch61
-rw-r--r--patches.drivers/broadcom-Mark-expected-switch-fall-throughs.patch75
-rw-r--r--patches.drivers/chelsio-use-BUG-instead-of-BUG_ON-1.patch68
-rw-r--r--patches.drivers/crypto-chcr-ESN-for-Inline-IPSec-Tx.patch353
-rw-r--r--patches.drivers/crypto-chcr-small-packet-Tx-stalls-the-queue.patch34
-rw-r--r--patches.drivers/crypto-chelsio-Fix-NULL-pointer-dereference.patch33
-rw-r--r--patches.drivers/crypto-chelsio-Fix-passing-zero-to-PTR_ERR-warning-i.patch34
-rw-r--r--patches.drivers/crypto-chelsio-Fix-softlockup-with-heavy-I-O.patch59
-rw-r--r--patches.drivers/crypto-chelsio-Fix-wrong-error-counter-increments.patch82
-rw-r--r--patches.drivers/crypto-chelsio-Fixed-Traffic-Stall.patch132
-rw-r--r--patches.drivers/crypto-chelsio-Handle-PCI-shutdown-event.patch761
-rw-r--r--patches.drivers/crypto-chelsio-Inline-single-pdu-only.patch28
-rw-r--r--patches.drivers/crypto-chelsio-Reset-counters-on-cxgb4-Detach.patch36
-rw-r--r--patches.drivers/crypto-chelsio-Swap-location-of-AAD-and-IV-sent-in-W.patch537
-rw-r--r--patches.drivers/crypto-chelsio-Use-same-value-for-both-channel-in-si.patch91
-rw-r--r--patches.drivers/crypto-chelsio-avoid-using-sa_entry-imm.patch59
-rw-r--r--patches.drivers/crypto-chelsio-check-set_msg_len-overflow-in-generat.patch47
-rw-r--r--patches.drivers/crypto-chelsio-clean-up-various-indentation-issues.patch64
-rw-r--r--patches.drivers/crypto-chelsio-cleanup-send-addr-as-value-in-functio.patch83
-rw-r--r--patches.drivers/crypto-chelsio-count-incomplete-block-in-IV.patch31
-rw-r--r--patches.drivers/crypto-chelsio-remove-set-but-not-used-variable-kctx.patch42
-rw-r--r--patches.drivers/crypto-chelsio-remove-set-but-not-used-variables-ada.patch41
-rw-r--r--patches.drivers/crypto-chtls-remove-cdev_list_lock.patch30
-rw-r--r--patches.drivers/crypto-chtls-remove-set-but-not-used-variables-err-a.patch105
-rw-r--r--patches.drivers/cxgb4-Add-VF-Link-state-support.patch127
-rw-r--r--patches.drivers/cxgb4-Add-new-T6-PCI-device-ids-0x608b.patch24
-rw-r--r--patches.drivers/cxgb4-Delete-all-hash-and-TCAM-filters-before-resour.patch111
-rw-r--r--patches.drivers/cxgb4-Don-t-return-EAGAIN-when-TCAM-is-full.patch52
-rw-r--r--patches.drivers/cxgb4-Enable-outer-UDP-checksum-offload-for-T6.patch33
-rw-r--r--patches.drivers/cxgb4-Fix-error-path-in-cxgb4_init_module.patch80
-rw-r--r--patches.drivers/cxgb4-Revert-cxgb4-Remove-SGE_HOST_PAGE_SIZE-depende.patch44
-rw-r--r--patches.drivers/cxgb4-TLS-record-offload-enable.patch28
-rw-r--r--patches.drivers/cxgb4-Update-1.23.3.0-as-the-latest-firmware-support.patch52
-rw-r--r--patches.drivers/cxgb4-add-tcb-flags-and-tcb-rpl-struct.patch68
-rw-r--r--patches.drivers/cxgb4-chtls-Prefix-adapter-flags-with-CXGB4.patch828
-rw-r--r--patches.drivers/cxgb4-cxgb4vf-Display-advertised-FEC-in-ethtool.patch302
-rw-r--r--patches.drivers/cxgb4-cxgb4vf-Fix-up-netdev-hw_features.patch75
-rw-r--r--patches.drivers/cxgb4-cxgb4vf_main-Mark-expected-switch-fall-through.patch50
-rw-r--r--patches.drivers/cxgb4-free-mac_hlist-properly.patch42
-rw-r--r--patches.drivers/cxgb4-kfree-mhp-after-the-debug-print.patch32
-rw-r--r--patches.drivers/cxgb4-offload-VLAN-flows-regardless-of-VLAN-ethtype.patch42
-rw-r--r--patches.drivers/cxgb4-remove-DEFINE_SIMPLE_DEBUGFS_FILE.patch298
-rw-r--r--patches.drivers/cxgb4-remove-set-but-not-used-variables-multitrc-spe.patch63
-rw-r--r--patches.drivers/cxgb4vf-Call-netif_carrier_off-properly-in-pci_probe.patch35
-rw-r--r--patches.drivers/cxgb4vf-Enter-debugging-mode-if-FW-is-inaccessible.patch144
-rw-r--r--patches.drivers/cxgb4vf-Prefix-adapter-flags-with-CXGB4VF.patch297
-rw-r--r--patches.drivers/cxgb4vf-Revert-force-link-up-behaviour.patch57
-rw-r--r--patches.drivers/cxgb4vf-free-mac_hlist-properly.patch40
-rw-r--r--patches.drivers/drivers-acpi-add-dependency-of-EFI-for-arm64.patch (renamed from patches.arch/drivers-acpi-add-dependency-of-EFI-for-arm64.patch)2
-rw-r--r--patches.drivers/e1000e-Disable-runtime-PM-on-CNP.patch39
-rw-r--r--patches.drivers/e1000e-Exclude-device-from-suspend-direct-complete-o.patch36
-rw-r--r--patches.drivers/e1000e-fix-a-missing-check-for-return-value.patch74
-rw-r--r--patches.drivers/efi-Permit-calling-efi_mem_reserve_persistent-from-a.patch2
-rw-r--r--patches.drivers/efi-Permit-multiple-entries-in-persistent-memreserve.patch2
-rw-r--r--patches.drivers/efi-Prevent-GICv3-WARN-by-mapping-the-memreserve-tab.patch2
-rw-r--r--patches.drivers/efi-Reduce-the-amount-of-memblock-reservations-for-p.patch2
-rw-r--r--patches.drivers/efi-arm-Defer-persistent-reservations-until-after-pa.patch2
-rw-r--r--patches.drivers/efi-arm-Revert-Defer-persistent-reservations-until-a.patch2
-rw-r--r--patches.drivers/efi-arm-Revert-deferred-unmap-of-early-memmap-mappin.patch2
-rw-r--r--patches.drivers/efi-arm-map-UEFI-memory-map-even-w-o-runtime-service.patch (renamed from patches.arch/efi-arm-map-UEFI-memory-map-even-w-o-runtime-service.patch)2
-rw-r--r--patches.drivers/efi-arm-preserve-early-mapping-of-UEFI-memory-map-lo.patch (renamed from patches.arch/efi-arm-preserve-early-mapping-of-UEFI-memory-map-lo.patch)2
-rw-r--r--patches.drivers/fm10k-TRIVIAL-cleanup-of-extra-spacing-in-function-c.patch29
-rw-r--r--patches.drivers/fm10k-use-struct_size-in-kzalloc.patch55
-rw-r--r--patches.drivers/hid-core-move-usage-page-concatenation-to-main-item.patch145
-rw-r--r--patches.drivers/i40e-Able-to-add-up-to-16-MAC-filters-on-an-untruste.patch36
-rw-r--r--patches.drivers/i40e-Add-support-FEC-configuration-for-Fortville-25G.patch400
-rw-r--r--patches.drivers/i40e-Add-support-for-X710-B-P-SFP-cards.patch252
-rw-r--r--patches.drivers/i40e-Change-unmatched-function-types.patch56
-rw-r--r--patches.drivers/i40e-Changed-maximum-supported-FW-API-version-to-1.8.patch42
-rw-r--r--patches.drivers/i40e-Fix-for-10G-ports-LED-not-blinking.patch83
-rw-r--r--patches.drivers/i40e-Fix-for-allowing-too-many-MDD-events-on-VF.patch44
-rw-r--r--patches.drivers/i40e-Fix-misleading-error-message.patch30
-rw-r--r--patches.drivers/i40e-Fix-of-memory-leak-and-integer-truncation-in-i4.patch79
-rw-r--r--patches.drivers/i40e-Fix-the-typo-in-adding-40GE-KR4-mode.patch32
-rw-r--r--patches.drivers/i40e-Further-implementation-of-LLDP.patch281
-rw-r--r--patches.drivers/i40e-Implement-DDP-support-in-i40e-driver.patch953
-rw-r--r--patches.drivers/i40e-Introduce-recovery-mode-support.patch501
-rw-r--r--patches.drivers/i40e-Limiting-RSS-queues-to-CPUs.patch28
-rw-r--r--patches.drivers/i40e-Memory-leak-in-i40e_config_iwarp_qvlist.patch80
-rw-r--r--patches.drivers/i40e-Queues-are-reserved-despite-Invalid-argument-er.patch45
-rw-r--r--patches.drivers/i40e-Remove-misleading-messages-for-untrusted-VF.patch48
-rw-r--r--patches.drivers/i40e-Remove-umem-from-VSI.patch241
-rw-r--r--patches.drivers/i40e-Report-advertised-link-modes-on-40GBASE_SR4.patch43
-rw-r--r--patches.drivers/i40e-Report-advertised-link-modes-on-40GBase_LR4-CR4.patch59
-rw-r--r--patches.drivers/i40e-Revert-ShadowRAM-checksum-calculation-change.patch65
-rw-r--r--patches.drivers/i40e-Setting-VF-to-VLAN-0-requires-restart.patch36
-rw-r--r--patches.drivers/i40e-ShadowRAM-checksum-calculation-change.patch63
-rw-r--r--patches.drivers/i40e-The-driver-now-prints-the-API-version-in-error-.patch53
-rw-r--r--patches.drivers/i40e-Update-i40e_init_dcb-to-return-correct-error.patch111
-rw-r--r--patches.drivers/i40e-Use-struct_size-in-kzalloc.patch53
-rw-r--r--patches.drivers/i40e-VF-s-promiscuous-attribute-is-not-kept.patch74
-rw-r--r--patches.drivers/i40e-Wrong-truncation-from-u16-to-u8.patch32
-rw-r--r--patches.drivers/i40e-add-new-pci-id-for-X710-XXV710-N3000-cards.patch52
-rw-r--r--patches.drivers/i40e-add-num_vectors-checker-in-iwarp-handler.patch38
-rw-r--r--patches.drivers/i40e-add-tracking-of-AF_XDP-ZC-state-for-each-queue-.patch100
-rw-r--r--patches.drivers/i40e-change-behavior-on-PF-in-response-to-MDD-event.patch52
-rw-r--r--patches.drivers/i40e-check-queue-pairs-num-in-config-queues-handler.patch44
-rw-r--r--patches.drivers/i40e-clean-up-several-indentation-issues.patch63
-rw-r--r--patches.drivers/i40e-don-t-allow-changes-to-HW-VLAN-stripping-on-act.patch42
-rw-r--r--patches.drivers/i40e-fix-i40e_ptp_adjtime-when-given-a-negative-delt.patch52
-rw-r--r--patches.drivers/i40e-fix-misleading-message-about-promisc-setting-on.patch79
-rw-r--r--patches.drivers/i40e-increase-indentation.patch31
-rw-r--r--patches.drivers/i40e-missing-input-validation-on-VF-message-handling.patch168
-rw-r--r--patches.drivers/i40e-move-i40e_xsk_umem-function.patch74
-rw-r--r--patches.drivers/i40e-print-PCI-vendor-and-device-ID-during-probe.patch44
-rw-r--r--patches.drivers/i40e-remove-debugfs-tx_timeout-support.patch61
-rw-r--r--patches.drivers/i40e-remove-error-msg-when-vf-with-port-vlan-tries-t.patch32
-rw-r--r--patches.drivers/i40e-remove-out-of-range-comparisons-in-i40e_validat.patch54
-rw-r--r--patches.drivers/i40e-save-PTP-time-before-a-device-reset.patch171
-rw-r--r--patches.drivers/i40e-update-version-number-d1fc90a9.patch28
-rw-r--r--patches.drivers/i40e-update-version-number.patch28
-rw-r--r--patches.drivers/i40iw-Avoid-panic-when-handling-the-inetdev-event.patch72
-rw-r--r--patches.drivers/i40iw-remove-support-for-ib_get_vector_affinity.patch62
-rw-r--r--patches.drivers/i40iw-remove-use-of-VLAN_TAG_PRESENT.patch55
-rw-r--r--patches.drivers/ice-Add-52-byte-RSS-hash-key-support.patch60
-rw-r--r--patches.drivers/ice-Add-ability-to-update-rx-usecs-high.patch113
-rw-r--r--patches.drivers/ice-Add-code-for-DCB-initialization-part-1-4.patch366
-rw-r--r--patches.drivers/ice-Add-code-for-DCB-initialization-part-2-4.patch1190
-rw-r--r--patches.drivers/ice-Add-code-for-DCB-initialization-part-3-4.patch1409
-rw-r--r--patches.drivers/ice-Add-code-for-DCB-initialization-part-4-4.patch134
-rw-r--r--patches.drivers/ice-Add-code-for-DCB-rebuild.patch139
-rw-r--r--patches.drivers/ice-Add-code-to-control-FW-LLDP-and-DCBX.patch330
-rw-r--r--patches.drivers/ice-Add-code-to-get-DCB-related-statistics.patch191
-rw-r--r--patches.drivers/ice-Add-code-to-process-LLDP-MIB-change-events.patch127
-rw-r--r--patches.drivers/ice-Add-ethtool-private-flag-to-make-forcing-link-do.patch193
-rw-r--r--patches.drivers/ice-Add-ethtool-set_phys_id-handler.patch147
-rw-r--r--patches.drivers/ice-Add-function-to-program-ethertype-based-filter-r.patch201
-rw-r--r--patches.drivers/ice-Add-missing-PHY-type-to-link-settings.patch31
-rw-r--r--patches.drivers/ice-Add-missing-case-in-print_link_msg-for-printing-.patch33
-rw-r--r--patches.drivers/ice-Add-more-validation-in-ice_vc_cfg_irq_map_msg.patch179
-rw-r--r--patches.drivers/ice-Add-priority-information-into-VLAN-header.patch135
-rw-r--r--patches.drivers/ice-Add-reg_idx-variable-in-ice_q_vector-structure.patch297
-rw-r--r--patches.drivers/ice-Add-support-for-PF-VF-promiscuous-mode.patch802
-rw-r--r--patches.drivers/ice-Add-support-for-adaptive-interrupt-moderation.patch274
-rw-r--r--patches.drivers/ice-Add-support-for-new-PHY-types.patch737
-rw-r--r--patches.drivers/ice-Allow-for-software-timestamping.patch28
-rw-r--r--patches.drivers/ice-Always-free-allocate-q_vectors.patch129
-rw-r--r--patches.drivers/ice-Audit-hotpath-structures-with-pahole.patch123
-rw-r--r--patches.drivers/ice-Bump-driver-version.patch28
-rw-r--r--patches.drivers/ice-Bump-version.patch28
-rw-r--r--patches.drivers/ice-Calculate-ITR-increment-based-on-direct-calculat.patch199
-rw-r--r--patches.drivers/ice-Configure-RSS-LUT-and-HASH-KEY-in-rebuild-path.patch33
-rw-r--r--patches.drivers/ice-Create-a-generic-name-for-the-ice_rx_flg64_bits-.patch103
-rw-r--r--patches.drivers/ice-Create-framework-for-VSI-queue-context.patch473
-rw-r--r--patches.drivers/ice-Determine-descriptor-count-and-ring-size-based-o.patch155
-rw-r--r--patches.drivers/ice-Disable-sniffing-VF-traffic-on-PF.patch72
-rw-r--r--patches.drivers/ice-Do-not-bail-out-when-filter-already-exists.patch35
-rw-r--r--patches.drivers/ice-Do-not-set-LB_EN-for-prune-switch-rules.patch38
-rw-r--r--patches.drivers/ice-Do-not-unnecessarily-initialize-local-variable.patch33
-rw-r--r--patches.drivers/ice-Don-t-let-VF-know-that-it-is-untrusted.patch54
-rw-r--r--patches.drivers/ice-Don-t-remove-VLAN-filters-that-were-never-progra.patch59
-rw-r--r--patches.drivers/ice-Enable-LAN_EN-for-the-right-recipes.patch64
-rw-r--r--patches.drivers/ice-Enable-MAC-anti-spoof-by-default.patch44
-rw-r--r--patches.drivers/ice-Enable-link-events-over-the-ARQ.patch180
-rw-r--r--patches.drivers/ice-Ensure-only-valid-bits-are-set-in-ice_aq_set_phy.patch75
-rw-r--r--patches.drivers/ice-Fix-added-in-VSI-supported-nodes-calc.patch42
-rw-r--r--patches.drivers/ice-Fix-broadcast-traffic-in-port-VLAN-mode.patch149
-rw-r--r--patches.drivers/ice-Fix-for-FC-get-rx-tx-pause-params.patch73
-rw-r--r--patches.drivers/ice-Fix-for-adaptive-interrupt-moderation.patch386
-rw-r--r--patches.drivers/ice-Fix-for-allowing-too-many-MDD-events-on-VF.patch71
-rw-r--r--patches.drivers/ice-Fix-incorrect-use-of-abbreviations.patch2333
-rw-r--r--patches.drivers/ice-Fix-issue-reclaiming-resources-back-to-the-pool-.patch70
-rw-r--r--patches.drivers/ice-Fix-issue-reconfiguring-VF-queues.patch246
-rw-r--r--patches.drivers/ice-Fix-issue-when-adding-more-than-allowed-VLANs.patch87
-rw-r--r--patches.drivers/ice-Fix-issue-with-VF-attempt-to-delete-default-MAC-.patch81
-rw-r--r--patches.drivers/ice-Fix-issue-with-VF-reset-and-multiple-VFs-support.patch67
-rw-r--r--patches.drivers/ice-Fix-the-calculation-of-ICE_MAX_MTU.patch34
-rw-r--r--patches.drivers/ice-Fix-typos-in-code-comments.patch83
-rw-r--r--patches.drivers/ice-Gather-the-rx-buf-clean-up-logic-for-better-reus.patch173
-rw-r--r--patches.drivers/ice-Get-VF-VSI-instances-directly-via-PF.patch169
-rw-r--r--patches.drivers/ice-Get-resources-per-function.patch62
-rw-r--r--patches.drivers/ice-Get-rid-of-ice_pull_tail.patch131
-rw-r--r--patches.drivers/ice-Implement-flow-to-reset-VFs-with-PFR-and-other-r.patch59
-rw-r--r--patches.drivers/ice-Implement-getting-and-setting-ethtool-coalesce.patch334
-rw-r--r--patches.drivers/ice-Implement-pci_error_handler-ops.patch204
-rw-r--r--patches.drivers/ice-Implement-support-for-normal-get_eeprom-_len-eth.patch201
-rw-r--r--patches.drivers/ice-Introduce-bulk-update-for-page-count.patch127
-rw-r--r--patches.drivers/ice-Limit-the-ice_add_rx_frag-to-frag-addition.patch275
-rw-r--r--patches.drivers/ice-Move-aggregator-list-into-ice_hw-instance.patch141
-rw-r--r--patches.drivers/ice-Offload-SCTP-checksum.patch66
-rw-r--r--patches.drivers/ice-Preserve-VLAN-Rx-stripping-settings.patch33
-rw-r--r--patches.drivers/ice-Prevent-unintended-multiple-chain-resets.patch45
-rw-r--r--patches.drivers/ice-Pull-out-page-reuse-checks-onto-separate-functio.patch123
-rw-r--r--patches.drivers/ice-Put-__ICE_PREPARED_FOR_RESET-check-in-ice_prepar.patch47
-rw-r--r--patches.drivers/ice-Reduce-scope-of-variable-in-ice_vsi_cfg_rxqs.patch56
-rw-r--r--patches.drivers/ice-Refactor-a-few-Tx-scheduler-functions.patch253
-rw-r--r--patches.drivers/ice-Refactor-getting-setting-coalesce.patch233
-rw-r--r--patches.drivers/ice-Refactor-link-event-flow.patch211
-rw-r--r--patches.drivers/ice-Remove-2-BITS-comment.patch41
-rw-r--r--patches.drivers/ice-Remove-__always_unused-attribute.patch30
-rw-r--r--patches.drivers/ice-Remove-runtime-change-of-PFINT_OICR_ENA-register.patch94
-rw-r--r--patches.drivers/ice-Remove-unnecessary-braces.patch30
-rw-r--r--patches.drivers/ice-Remove-unnecessary-newlines-from-log-messages.patch42
-rw-r--r--patches.drivers/ice-Remove-unnecessary-wait-when-disabling-enabling-.patch47
-rw-r--r--patches.drivers/ice-Remove-unused-function-prototype-10c7e4c5.patch30
-rw-r--r--patches.drivers/ice-Remove-unused-function-prototype.patch46
-rw-r--r--patches.drivers/ice-Remove-unused-vsi_id-field.patch27
-rw-r--r--patches.drivers/ice-Reset-all-VFs-with-VFLR-during-SR-IOV-init-flow.patch35
-rw-r--r--patches.drivers/ice-Resolve-static-analysis-reported-issue.patch58
-rw-r--r--patches.drivers/ice-Restore-VLAN-switch-rule-if-port-VLAN-existed-be.patch33
-rw-r--r--patches.drivers/ice-Retrieve-rx_buf-in-separate-function.patch169
-rw-r--r--patches.drivers/ice-Return-configuration-error-without-queue-to-disa.patch46
-rw-r--r--patches.drivers/ice-Rework-queue-management-code-for-reuse.patch521
-rw-r--r--patches.drivers/ice-Separate-if-conditions-for-ice_set_features.patch46
-rw-r--r--patches.drivers/ice-Set-LAN_EN-for-all-directional-rules.patch50
-rw-r--r--patches.drivers/ice-Set-physical-link-up-down-when-an-interface-is-s.patch184
-rw-r--r--patches.drivers/ice-Suppress-false-positive-style-issues-reported-by.patch33
-rw-r--r--patches.drivers/ice-Update-comment-regarding-the-ITR_GRAN_S.patch30
-rw-r--r--patches.drivers/ice-Update-function-header-for-__ice_vsi_get_qs.patch34
-rw-r--r--patches.drivers/ice-Update-rings-based-on-TC-information.patch130
-rw-r--r--patches.drivers/ice-Use-bitfields-where-possible.patch53
-rw-r--r--patches.drivers/ice-Use-dev_err-when-ice_cfg_vsi_lan-fails.patch43
-rw-r--r--patches.drivers/ice-Use-ice_for_each_q_vector-macro-where-possible.patch101
-rw-r--r--patches.drivers/ice-Use-more-efficient-structures.patch42
-rw-r--r--patches.drivers/ice-Use-pf-instead-of-vsi-back.patch247
-rw-r--r--patches.drivers/ice-Validate-ring-existence-and-its-q_vector-per-VSI.patch33
-rw-r--r--patches.drivers/ice-add-and-use-new-ice_for_each_traffic_class-macro.patch76
-rw-r--r--patches.drivers/ice-add-const-qualifier-to-mac_addr-parameter.patch56
-rw-r--r--patches.drivers/ice-avoid-multiple-unnecessary-de-references-in-prob.patch128
-rw-r--r--patches.drivers/ice-change-VF-VSI-tc-info-along-with-num_queues.patch31
-rw-r--r--patches.drivers/ice-check-for-a-leaf-node-presence.patch71
-rw-r--r--patches.drivers/ice-clear-VF-ARQLEN-register-on-reset.patch52
-rw-r--r--patches.drivers/ice-code-cleanup-in-ice_sched.c.patch86
-rw-r--r--patches.drivers/ice-configure-GLINT_ITR-to-always-have-an-ITR-gran-o.patch137
-rw-r--r--patches.drivers/ice-don-t-spam-VFs-with-link-messages.patch36
-rw-r--r--patches.drivers/ice-enable-VF-admin-queue-interrupts.patch45
-rw-r--r--patches.drivers/ice-fix-ice_remove_rule_internal-vsi_list-handling.patch76
-rw-r--r--patches.drivers/ice-fix-issue-where-host-reboots-on-unload-when-iomm.patch141
-rw-r--r--patches.drivers/ice-fix-numeric-overflow-warning.patch55
-rw-r--r--patches.drivers/ice-fix-overlong-string-update-stats-output.patch117
-rw-r--r--patches.drivers/ice-fix-some-function-prototype-and-signature-style-.patch532
-rw-r--r--patches.drivers/ice-fix-stack-hogs-from-struct-ice_vsi_ctx-structure.patch375
-rw-r--r--patches.drivers/ice-fix-static-analysis-warnings.patch66
-rw-r--r--patches.drivers/ice-fix-the-divide-by-zero-issue.patch42
-rw-r--r--patches.drivers/ice-flush-Tx-pipe-on-disable-queue-timeout.patch72
-rw-r--r--patches.drivers/ice-map-Rx-buffer-pages-with-DMA-attributes.patch97
-rw-r--r--patches.drivers/ice-only-use-the-VF-for-ICE_VSI_VF-in-ice_vsi_releas.patch39
-rw-r--r--patches.drivers/ice-remove-redundant-variable-and-if-condition.patch46
-rw-r--r--patches.drivers/ice-sizeof-type-should-be-avoided.patch328
-rw-r--r--patches.drivers/ice-update-VSI-config-dynamically.patch134
-rw-r--r--patches.drivers/ice-use-absolute-vector-ID-for-VFs.patch42
-rw-r--r--patches.drivers/ice-use-ice_for_each_vsi-macro-when-possible.patch111
-rw-r--r--patches.drivers/ice-use-irq_num-var-in-ice_vsi_req_irq_msix.patch35
-rw-r--r--patches.drivers/ice-use-virt-channel-status-codes.patch929
-rw-r--r--patches.drivers/igb-Bump-version-number.patch29
-rw-r--r--patches.drivers/igb-Exclude-device-from-suspend-direct-complete-opti.patch37
-rw-r--r--patches.drivers/igb-Fix-WARN_ONCE-on-runtime-suspend.patch148
-rw-r--r--patches.drivers/igb-fix-various-indentation-issues.patch30
-rw-r--r--patches.drivers/igb-use-struct_size-helper.patch54
-rw-r--r--patches.drivers/igc-Add-ethtool-support.patch1420
-rw-r--r--patches.drivers/igc-Add-multiple-receive-queues-control-supporting.patch157
-rw-r--r--patches.drivers/igc-Add-support-for-statistics.patch472
-rw-r--r--patches.drivers/igc-Add-support-for-the-ntuple-feature.patch140
-rw-r--r--patches.drivers/igc-Extend-the-ethtool-supporting.patch947
-rw-r--r--patches.drivers/igc-Fix-code-redundancy.patch87
-rw-r--r--patches.drivers/igc-Fix-the-typo-in-igc_base.h-header-definition.patch30
-rw-r--r--patches.drivers/igc-Remove-the-igc_get_phy_id_base-method.patch52
-rw-r--r--patches.drivers/igc-Remove-the-igc_read_mac_addr_base-method.patch49
-rw-r--r--patches.drivers/igc-Remove-unneeded-code.patch57
-rw-r--r--patches.drivers/igc-Remove-unneeded-hw_dbg-prints.patch38
-rw-r--r--patches.drivers/igc-Remove-unreachable-code-from-igc_phy.c-file.patch46
-rw-r--r--patches.drivers/igc-Remove-unused-code.patch49
-rw-r--r--patches.drivers/igc-Use-struct_size-helper.patch65
-rw-r--r--patches.drivers/include-linux-bitops.h-introduce-BITS_PER_TYPE.patch47
-rw-r--r--patches.drivers/infiniband-hfi1-drop-crazy-DEBUGFS_SEQ_FILE_CREATE-m.patch152
-rw-r--r--patches.drivers/infiniband-hfi1-no-need-to-check-return-value-of-deb.patch105
-rw-r--r--patches.drivers/infiniband-qedr-Potential-null-ptr-dereference-of-qp.patch29
-rw-r--r--patches.drivers/intel-correct-return-from-set-features-callback.patch80
-rw-r--r--patches.drivers/iommu-arm-smmu-v3-Abort-all-transactions-if-SMMU-is-.patch2
-rw-r--r--patches.drivers/iommu-arm-smmu-v3-Don-t-disable-SMMU-in-kdump-kernel.patch2
-rw-r--r--patches.drivers/iw_cxgb-kzalloc-the-iwcm-verbs-struct.patch39
-rw-r--r--patches.drivers/iw_cxgb4-Check-for-send-WR-also-while-posting-write-.patch72
-rw-r--r--patches.drivers/iw_cxgb4-Make-function-read_tcb-static.patch32
-rw-r--r--patches.drivers/iw_cxgb4-complete-the-cached-SRQ-buffers.patch303
-rw-r--r--patches.drivers/iw_cxgb4-fix-srqidx-leak-during-connection-abort.patch55
-rw-r--r--patches.drivers/iw_cxgb4-use-listening-ep-tos-when-accepting-new-con.patch51
-rw-r--r--patches.drivers/iw_cxgb4-use-tos-when-finding-ipv6-routes.patch39
-rw-r--r--patches.drivers/iw_cxgb4-use-tos-when-importing-the-endpoint.patch28
-rw-r--r--patches.drivers/ixgbe-Use-struct_size-helper.patch66
-rw-r--r--patches.drivers/ixgbe-fix-mdio-bus-registration.patch58
-rw-r--r--patches.drivers/ixgbe-fix-older-devices-that-do-not-support-IXGBE_MR.patch36
-rw-r--r--patches.drivers/ixgbe-register-a-mdiobus.patch410
-rw-r--r--patches.drivers/ixgbe-remove-magic-constant-in-ixgbe_reset_hw_82599.patch31
-rw-r--r--patches.drivers/ixgbe-use-mii_bus-to-handle-MII-related-ioctls.patch59
-rw-r--r--patches.drivers/libcxgb-fix-incorrect-ppmax-calculation.patch46
-rw-r--r--patches.drivers/mmc-block-Delete-gendisk-before-cleaning-up-the-requ.patch93
-rw-r--r--patches.drivers/net-chelsio-Add-a-missing-check-on-cudg_get_buffer.patch31
-rw-r--r--patches.drivers/net-cxgb4-fix-various-indentation-issues.patch57
-rw-r--r--patches.drivers/net-tls-free-ctx-in-sock-destruct.patch107
-rw-r--r--patches.drivers/nvme-rdma-fix-possible-free-of-a-non-allocated-async.patch5
-rw-r--r--patches.drivers/qed-Add-API-for-SmartAN-query.patch88
-rw-r--r--patches.drivers/qed-Add-iWARP-100g-support.patch111
-rw-r--r--patches.drivers/qed-Add-infrastructure-for-error-detection-and-recov.patch507
-rw-r--r--patches.drivers/qed-Add-llh-ppfid-interface-and-100g-support-for-off.patch1904
-rw-r--r--patches.drivers/qed-Add-qed-devlink-parameters-table.patch173
-rw-r--r--patches.drivers/qed-Change-hwfn-used-for-sb-initialization.patch163
-rw-r--r--patches.drivers/qed-Define-new-MF-bit-for-no_vlan-config.patch66
-rw-r--r--patches.drivers/qed-Delete-redundant-doorbell-recovery-types.patch174
-rw-r--r--patches.drivers/qed-Fix-iWARP-buffer-size-provided-for-syn-packet-pr.patch83
-rw-r--r--patches.drivers/qed-Fix-iWARP-syn-packet-mac-address-validation.patch37
-rw-r--r--patches.drivers/qed-Fix-missing-DORQ-attentions.patch103
-rw-r--r--patches.drivers/qed-Fix-static-checker-warning-8e2ea3ea.patch72
-rw-r--r--patches.drivers/qed-Fix-the-DORQ-s-attentions-handling.patch159
-rw-r--r--patches.drivers/qed-Fix-the-doorbell-address-sanity-check.patch69
-rw-r--r--patches.drivers/qed-Mark-expected-switch-fall-through.patch36
-rw-r--r--patches.drivers/qed-Modify-api-for-performing-a-dmae-to-another-PF.patch385
-rw-r--r--patches.drivers/qed-Modify-offload-protocols-to-use-the-affined-engi.patch926
-rw-r--r--patches.drivers/qed-Read-device-port-count-from-the-shmem.patch222
-rw-r--r--patches.drivers/qed-Reduce-the-severity-of-ptp-debug-message.patch32
-rw-r--r--patches.drivers/qed-Revise-load-sequence-to-avoid-PCI-errors.patch519
-rw-r--r--patches.drivers/qed-Set-the-doorbell-address-correctly.patch99
-rw-r--r--patches.drivers/qed-fix-indentation-issue-with-statements-in-an-if-b.patch38
-rw-r--r--patches.drivers/qed-fix-spelling-mistake-faspath-fastpath.patch28
-rw-r--r--patches.drivers/qed-fix-spelling-mistake-inculde-include.patch32
-rw-r--r--patches.drivers/qed-remove-duplicated-include-from-qed_if.h.patch27
-rw-r--r--patches.drivers/qed-remove-redundant-assignment-to-rc.patch31
-rw-r--r--patches.drivers/qede-Add-ethtool-interface-for-SmartAN-query.patch51
-rw-r--r--patches.drivers/qede-Error-recovery-process.patch636
-rw-r--r--patches.drivers/qede-Fix-internal-loopback-failure-with-jumbo-mtu-co.patch35
-rw-r--r--patches.drivers/qede-Handle-infinite-driver-spinning-for-Tx-timestam.patch159
-rw-r--r--patches.drivers/qede-Populate-mbi-version-in-ethtool-driver-query-da.patch63
-rw-r--r--patches.drivers/qede-fix-write-to-free-d-pointer-error-and-double-fr.patch48
-rw-r--r--patches.drivers/qede-place-ethtool_rx_flow_spec-after-code-after-TC-.patch306
-rw-r--r--patches.drivers/qedr-Change-the-MSI-X-vectors-selection-to-be-based-.patch89
-rw-r--r--patches.drivers/qlcnic-remove-assumption-that-vlan_tci-0.patch49
-rw-r--r--patches.drivers/qlcnic-remove-set-but-not-used-variables-cur_rings-m.patch67
-rw-r--r--patches.drivers/qlcnic-remove-set-but-not-used-variables-op-cmd_op.patch54
-rw-r--r--patches.drivers/rtc-da9063-set-uie_unsupported-when-relevant.patch44
-rw-r--r--patches.drivers/rtc-sh-Fix-invalid-alarm-warning-for-non-enabled-ala.patch48
-rw-r--r--patches.drivers/scsi-hisi_sas-add-host-reset-interface-for-test73
-rw-r--r--patches.drivers/scsi-hisi_sas-add-softreset-in-hisi_sas_i_t_nexus_reset45
-rw-r--r--patches.drivers/scsi-hisi_sas-adjust-the-printk-format-of-functions-hisi_sas_init_device39
-rw-r--r--patches.drivers/scsi-hisi_sas-allocate-different-sas-address-for-directly-attached-situation43
-rw-r--r--patches.drivers/scsi-hisi_sas-don-t-fail-it-nexus-reset-for-open-reject-timeout81
-rw-r--r--patches.drivers/scsi-hisi_sas-don-t-hard-reset-disk-during-controller-reset38
-rw-r--r--patches.drivers/scsi-hisi_sas-fix-for-setting-the-phy-linkrate-when-disconnected237
-rw-r--r--patches.drivers/scsi-hisi_sas-remedy-inconsistent-phy-down-state-in-software47
-rw-r--r--patches.drivers/scsi-hisi_sas-remove-the-check-of-sas_dev-status-in-hisi_sas_i_t_nexus_reset121
-rw-r--r--patches.drivers/scsi-hisi_sas-send-hard-reset-to-clear-the-previous-affiliation-of-stp-target-port172
-rw-r--r--patches.drivers/scsi-hisi_sas-set-phy-linkrate-when-disconnected79
-rw-r--r--patches.drivers/scsi-hisi_sas-some-misc-tidy-up315
-rw-r--r--patches.drivers/scsi-hisi_sas-support-all-ras-events-with-msi-interrupts611
-rw-r--r--patches.drivers/scsi-libsas-do-discovery-on-empty-phy-to-update-phy-info53
-rw-r--r--patches.drivers/scsi-libsas-improve-vague-log-in-sas-rediscovery90
-rw-r--r--patches.drivers/scsi-libsas-inject-revalidate-event-for-root-port-event110
-rw-r--r--patches.drivers/scsi-libsas-print-expander-phy-indexes-in-decimal255
-rw-r--r--patches.drivers/scsi-libsas-stop-hardcoding-sas-address-length152
-rw-r--r--patches.drivers/scsi-libsas-support-sata-phy-connection-rate-unmatch-fixing-during-discovery97
-rw-r--r--patches.drivers/scsi-libsas-try-to-retain-programmed-min-linkrate-for-sata-min-pathway-unmatch-fixing50
-rw-r--r--patches.drivers/scsi-qedf-fixup-bit-operations.patch77
-rw-r--r--patches.drivers/scsi-qedf-fixup-locking-in-qedf_restart_rport.patch42
-rw-r--r--patches.drivers/scsi-qedf-missing-kref_put-in-qedf_xmit.patch37
-rw-r--r--patches.drivers/scsi-qla2xxx-Add-First-Burst-support-for-FC-NVMe-dev.patch181
-rw-r--r--patches.drivers/scsi-qla2xxx-Add-new-FW-dump-template-entry-types.patch535
-rw-r--r--patches.drivers/scsi-qla2xxx-Add-protection-mask-module-parameters.patch82
-rw-r--r--patches.drivers/scsi-qla2xxx-Add-support-for-setting-port-speed.patch288
-rw-r--r--patches.drivers/scsi-qla2xxx-Check-for-FW-started-flag-before-aborti.patch39
-rw-r--r--patches.drivers/scsi-qla2xxx-Declare-local-functions-static.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-Fix-DMA-error-when-the-DIF-sg-buffer-cr.patch790
-rw-r--r--patches.drivers/scsi-qla2xxx-Fix-LUN-discovery-if-loop-id-is-not-ass.patch48
-rw-r--r--patches.drivers/scsi-qla2xxx-Fix-code-indentation-for-qla27xx_fwdt_e.patch80
-rw-r--r--patches.drivers/scsi-qla2xxx-Fix-panic-from-use-after-free-in-qla2x0.patch44
-rw-r--r--patches.drivers/scsi-qla2xxx-Fix-unload-when-NVMe-devices-are-config.patch71
-rw-r--r--patches.drivers/scsi-qla2xxx-Improve-several-kernel-doc-headers.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-Introduce-a-switch-case-statement-in-ql.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-Make-qla2x00_sysfs_write_nvram-easier-t.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-Make-sure-that-qlafx00_ioctl_iosb_entry.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-Move-debug-messages-before-sending-srb-.patch305
-rw-r--r--patches.drivers/scsi-qla2xxx-Move-marker-request-behind-QPair.patch393
-rw-r--r--patches.drivers/scsi-qla2xxx-NULL-check-before-some-freeing-function.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-Prevent-SysFS-access-when-chip-is-down.patch35
-rw-r--r--patches.drivers/scsi-qla2xxx-Prevent-multiple-ADISC-commands-per-ses.patch38
-rw-r--r--patches.drivers/scsi-qla2xxx-Remove-a-set-but-not-used-variable.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-Remove-two-arguments-from-qlafx00_error.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-Set-remote-port-devloss-timeout-to-0.patch66
-rw-r--r--patches.drivers/scsi-qla2xxx-Split-the-__qla2x00_abort_all_cmds-func.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-Update-driver-version-to-10.00.00.14-k.patch30
-rw-r--r--patches.drivers/scsi-qla2xxx-avoid-printf-format-warning.patch66
-rw-r--r--patches.drivers/scsi-qla2xxx-check-for-kstrtol-failure.patch37
-rw-r--r--patches.drivers/scsi-qla2xxx-fully-convert-to-the-generic-DMA-API.patch2
-rw-r--r--patches.drivers/scsi-qla2xxx-remove-redundant-null-check-on-pointer-.patch63
-rw-r--r--patches.drivers/scsi-qla2xxx-use-lower_32_bits-and-upper_32_bits-ins.patch2
-rw-r--r--patches.drivers/xsk-export-xdp_get_umem_from_qid.patch31
-rw-r--r--patches.drm/0001-drm-vmwgfx-NULL-pointer-dereference-from-vmw_cmd_dx_.patch36
-rw-r--r--patches.drm/0001-fbdev-fix-WARNING-in-__alloc_pages_nodemask-bug.patch54
-rw-r--r--patches.drm/0001-fbdev-fix-divide-error-in-fb_var_to_videomode.patch84
-rw-r--r--patches.drm/0002-drm-i915-gvt-Tiled-Resources-mmios-are-in-context-mm.patch41
-rw-r--r--patches.drm/0003-drm-i915-gvt-add-0x4dfc-to-gen9-save-restore-list.patch30
-rw-r--r--patches.drm/0004-drm-etnaviv-lock-MMU-while-dumping-core.patch51
-rw-r--r--patches.drm/drm-edid-Fix-a-missing-check-bug-in-drm_load_edid_firmware.patch38
-rw-r--r--patches.fixes/0001-Documentation-Add-MDS-vulnerability-documentation.patch353
-rw-r--r--patches.fixes/0001-dt-bindings-clock-r8a7795-Remove-CSIREF-clock.patch38
-rw-r--r--patches.fixes/0001-dt-bindings-clock-r8a7796-Remove-CSIREF-clock.patch38
-rw-r--r--patches.fixes/0001-dt-bindings-net-Add-binding-for-the-external-clock-f.patch48
-rw-r--r--patches.fixes/0001-dt-bindings-rtc-sun6i-rtc-Fix-register-range-in-exam.patch30
-rw-r--r--patches.fixes/0001-keys-safe-concurrent-user-session-uid-_keyring-acces.patch165
-rw-r--r--patches.fixes/0001-mm-hwpoison-fix-thp-split-handing-in-soft_offline_in.patch76
-rw-r--r--patches.fixes/0001-mwifiex-Abort-at-too-short-BSS-descriptor-element.patch89
-rw-r--r--patches.fixes/0001-mwifiex-Fix-heap-overflow-in-mwifiex_uap_parse_tail_.patch115
-rw-r--r--patches.fixes/0001-mwifiex-Fix-possible-buffer-overflows-at-parsing-bss.patch50
-rw-r--r--patches.fixes/0001-of-fix-clang-Wunsequenced-for-be32_to_cpu.patch59
-rw-r--r--patches.fixes/0001-p54-drop-device-reference-count-if-fails-to-enable-d.patch45
-rw-r--r--patches.fixes/0001-xenbus-drop-useless-LIST_HEAD-in-xenbus_write_watch-.patch45
-rw-r--r--patches.fixes/0002-btrfs-qgroup-Check-bg-while-resuming-relocation-to-a.patch93
-rw-r--r--patches.fixes/KVM-s390-fix-memory-overwrites-when-not-using-SCA-en.patch40
-rw-r--r--patches.fixes/KVM-s390-provide-io-interrupt-kvm_stat.patch29
-rw-r--r--patches.fixes/KVM-s390-use-created_vcpus-in-more-places.patch44
-rw-r--r--patches.fixes/KVM-s390-vsie-fix-8k-check-for-the-itdba.patch41
-rw-r--r--patches.fixes/RDMA-rxe-Consider-skb-reserve-space-based-on-netdev-.patch32
-rw-r--r--patches.fixes/bpf-add-map_lookup_elem_sys_only-for-lookups-from-sy.patch51
-rw-r--r--patches.fixes/bpf-lru-avoid-messing-with-eviction-heuristics-upon-.patch104
-rw-r--r--patches.fixes/configfs-Fix-use-after-free-when-accessing-sd-s_dent.patch60
-rw-r--r--patches.fixes/ext4-avoid-panic-during-forced-reboot-due-to-aborted.patch39
-rw-r--r--patches.fixes/ext4-fix-data-corruption-caused-by-overlapping-unali.patch54
-rw-r--r--patches.fixes/ext4-make-sanity-check-in-mballoc-more-strict.patch39
-rw-r--r--patches.fixes/ext4-wait-for-outstanding-dio-during-truncate-in-noj.patch62
-rw-r--r--patches.fixes/fs-prevent-page-refcount-overflow-in-pipe_buf_get.patch161
-rw-r--r--patches.fixes/fs-sync.c-sync_file_range-2-may-use-WB_SYNC_ALL-writ.patch87
-rw-r--r--patches.fixes/fs-writeback.c-use-rcu_barrier-to-wait-for-inflight-.patch75
-rw-r--r--patches.fixes/indirect-call-wrappers-helpers-to-speed-up-indirect-.patch85
-rw-r--r--patches.fixes/jbd2-check-superblock-mapped-prior-to-committing.patch53
-rw-r--r--patches.fixes/mm-add-try_get_page-helper-function.patch53
-rw-r--r--patches.fixes/mm-fix-__gup_device_huge-vs-unmap.patch6
-rw-r--r--patches.fixes/mm-gup-ensure-real-head-page-is-ref-counted-when-using-hugepages.patch101
-rw-r--r--patches.fixes/mm-gup-remove-broken-vm_bug_on_page-compound-check-for-hugepages.patch67
-rw-r--r--patches.fixes/mm-make-page-ref-count-overflow-check-tighter-and-more-explicit.patch49
-rw-r--r--patches.fixes/mm-prevent-get_user_pages-from-overflowing-page-refcount.patch234
-rw-r--r--patches.fixes/mount-copy-the-port-field-into-the-cloned-nfs_server.patch31
-rw-r--r--patches.fixes/net-unbreak-CONFIG_RETPOLINE-n-builds.patch37
-rw-r--r--patches.fixes/net-use-indirect-call-wrappers-at-GRO-network-layer.patch111
-rw-r--r--patches.fixes/net-use-indirect-call-wrappers-at-GRO-transport-laye.patch290
-rw-r--r--patches.fixes/ocfs2-fix-ocfs2-read-inode-data-panic-in-ocfs2_iget.patch184
-rw-r--r--patches.fixes/scsi-qla2xxx-fix-driver-unload-by-shutting-down-chip.patch83
-rw-r--r--patches.fixes/udp-use-indirect-call-wrappers-for-GRO-socket-lookup.patch52
-rw-r--r--patches.fixes/xfs-serialize-unaligned-dio-writes-against-all-other.patch94
-rw-r--r--patches.kernel.org/4.4.164-131-mm-thp-relax-__GFP_THISNODE-for-MADV_HUGEPAGE.patch233
-rw-r--r--patches.suse/0001-btrfs-extent-tree-Fix-a-bug-that-btrfs-is-unable-to-.patch86
-rw-r--r--patches.suse/PCI-Factor-out-pcie_retrain_link-function.patch85
-rw-r--r--patches.suse/PCI-Work-around-Pericom-PCIe-to-PCI-bridge-Retrain-L.patch100
-rw-r--r--patches.suse/PCI-endpoint-Use-EPC-s-device-in-dma_alloc_coherent-.patch82
-rw-r--r--patches.suse/bonding-fix-event-handling-for-stacked-bonds.patch45
-rw-r--r--patches.suse/btrfs-don-t-double-unlock-on-error-in-btrfs_punch_ho.patch42
-rw-r--r--patches.suse/btrfs-fix-fsync-not-persisting-changed-attributes-of.patch99
-rw-r--r--patches.suse/btrfs-fix-race-between-ranged-fsync-and-writeback-of.patch245
-rw-r--r--patches.suse/btrfs-fix-race-updating-log-root-item-during-fsync.patch126
-rw-r--r--patches.suse/btrfs-fix-wrong-ctime-and-mtime-of-a-directory-after.patch85
-rw-r--r--patches.suse/btrfs-reloc-also-queue-orphan-reloc-tree-for-cleanup-to-avoid-bug_on.patch137
-rw-r--r--patches.suse/btrfs-tree-checker-detect-file-extent-items-with-ove.patch114
-rw-r--r--patches.suse/ftrace-x86_64-emulate-call-function-while-updating-in-breakpoint-handler.patch150
-rw-r--r--patches.suse/ipv4-add-sanity-checks-in-ipv4_link_failure.patch152
-rw-r--r--patches.suse/ipv4-ensure-rcu_read_lock-in-ipv4_link_failure.patch86
-rw-r--r--patches.suse/ipv4-ip_do_fragment-Preserve-skb_iif-during-fragment.patch40
-rw-r--r--patches.suse/ipv4-recompile-ip-options-in-ipv4_link_failure.patch40
-rw-r--r--patches.suse/ipv4-set-the-tcp_min_rtt_wlen-range-from-0-to-one-da.patch88
-rw-r--r--patches.suse/kernel-sys.c-prctl-fix-false-positive-in-validate_pr.patch49
-rw-r--r--patches.suse/livepatch-convert-error-about-unsupported-reliable-stacktrace-into-a-warning.patch47
-rw-r--r--patches.suse/livepatch-remove-custom-kobject-state-handling.patch215
-rw-r--r--patches.suse/livepatch-remove-duplicated-code-for-early-initialization.patch127
-rw-r--r--patches.suse/memcg-make-it-work-on-sparse-non-0-node-systems.patch93
-rw-r--r--patches.suse/mlxsw-spectrum-Fix-autoneg-status-in-ethtool.patch42
-rw-r--r--patches.suse/net-atm-Fix-potential-Spectre-v1-vulnerabilities.patch51
-rw-r--r--patches.suse/net-dsa-bcm_sf2-fix-buffer-overflow-doing-set_rxnfc.patch41
-rw-r--r--patches.suse/net-dsa-mv88e6xxx-fix-handling-of-upper-half-of-STAT.patch31
-rw-r--r--patches.suse/net-fou-do-not-use-guehdr-after-iptunnel_pull_offloa.patch47
-rw-r--r--patches.suse/net-mlx5e-ethtool-Remove-unsupported-SFP-EEPROM-high.patch47
-rw-r--r--patches.suse/net-phy-marvell-Fix-buffer-overrun-with-stats-counte.patch49
-rw-r--r--patches.suse/net-rds-exchange-of-8K-and-1M-pool.patch78
-rw-r--r--patches.suse/net-rose-fix-unbound-loop-in-rose_loopback_timer.patch161
-rw-r--r--patches.suse/net-stmmac-move-stmmac_check_ether_addr-to-driver-pr.patch44
-rw-r--r--patches.suse/net-thunderx-don-t-allow-jumbo-frames-with-XDP.patch39
-rw-r--r--patches.suse/net-thunderx-raise-XDP-MTU-to-1508.patch53
-rw-r--r--patches.suse/nvme-flush-scan_work-when-resetting-controller.patch38
-rw-r--r--patches.suse/objtool-fix-function-fallthrough-detection.patch59
-rw-r--r--patches.suse/ptrace-take-into-account-saved_sigmask-in-PTRACE-GET.patch127
-rw-r--r--patches.suse/scsi-zfcp-make-dix-experimental-disabled-and-independent-of-dif81
-rw-r--r--patches.suse/sctp-avoid-running-the-sctp-state-machine-recursivel.patch163
-rw-r--r--patches.suse/signal-Always-notice-exiting-tasks.patch62
-rw-r--r--patches.suse/signal-Better-detection-of-synchronous-signals.patch115
-rw-r--r--patches.suse/signal-Restore-the-stop-PTRACE_EVENT_EXIT.patch55
-rw-r--r--patches.suse/stmmac-pci-Adjust-IOT2000-matching.patch50
-rw-r--r--patches.suse/switchtec-Fix-unintended-mask-of-MRPC-event.patch43
-rw-r--r--patches.suse/tcp-tcp_grow_window-needs-to-respect-tcp_space.patch61
-rw-r--r--patches.suse/team-fix-possible-recursive-locking-when-add-slaves.patch52
-rw-r--r--patches.suse/tipc-missing-entries-in-name-table-of-publications.patch41
-rw-r--r--patches.suse/tracing-fix-buffer_ref-pipe-ops.patch27
-rw-r--r--patches.suse/tracing-fix-partial-reading-of-trace-event-s-id-file.patch77
-rw-r--r--patches.suse/userfaultfd-use-RCU-to-free-the-task-struct-when-for.patch132
-rw-r--r--patches.suse/vhost-reject-zero-size-iova-range.patch36
-rw-r--r--patches.suse/x86_64-add-gap-to-int3-to-allow-for-call-emulation.patch73
-rw-r--r--patches.suse/x86_64-allow-breakpoints-to-emulate-call-instructions.patch91
-rwxr-xr-xscripts/bugzilla-create1
-rw-r--r--scripts/git_sort/README.md13
-rwxr-xr-xscripts/git_sort/git_sort.py15
-rw-r--r--scripts/git_sort/lib.py12
-rwxr-xr-xscripts/git_sort/merge_tool.py3
-rw-r--r--scripts/git_sort/pygit2_wrapper.py30
-rwxr-xr-xscripts/git_sort/qcp.py8
-rwxr-xr-xscripts/git_sort/qdupcheck.py3
-rwxr-xr-xscripts/git_sort/series_insert.py3
-rwxr-xr-xscripts/git_sort/series_sort.py9
-rw-r--r--scripts/git_sort/tests/sle12-sp2/Dockerfile2
-rw-r--r--scripts/git_sort/tests/support.py3
-rwxr-xr-xscripts/git_sort/tests/test_git_sort.py3
-rwxr-xr-xscripts/git_sort/tests/test_quilt_mode.py3
-rwxr-xr-xscripts/git_sort/tests/test_series_insert.py3
-rwxr-xr-xscripts/git_sort/tests/test_series_sort.py3
-rwxr-xr-xscripts/git_sort/update_clone.py3
-rw-r--r--series.conf534
-rw-r--r--supported.conf1
583 files changed, 68297 insertions, 417 deletions
diff --git a/blacklist.conf b/blacklist.conf
index b937c73a6e..657dbeab25 100644
--- a/blacklist.conf
+++ b/blacklist.conf
@@ -1046,3 +1046,22 @@ c8ea3663f7a8e6996d44500ee818c9330ac4fd88 # virt/fsl: no supported platform
6a024330650e24556b8a18cc654ad00cfecf6c6c # virt/fsl: no supported platform
92ff42645028fa6f9b8aa767718457b9264316b4 # ipvlan: reverted in below
918150cbd6103199fe326e8b1462a7f0d81475e4 # ipvlan: reverting the above
+2100e3ca3676e894fa48b8f6f01d01733387fe81 # Kconfig only and our kernels compile
+e5d01196c0428a206f307e9ee5f6842964098ff0 # bug requires e50e5129f384 "ext4: xattr-in-inode support"
+08fc98a4d6424af66eb3ac4e2cedd2fc927ed436 # bug requires e08ac99fa2a2 "ext4: add largedir feature"
+8ea58f1e8b11cca3087b294779bf5959bf89cc10 # not needed. We can happily use just AR.
+0294e6f4a0006856e1f36b8cd8fa088d9e499e98 # kbuild: not a bugfix
+954b4b752a4c4e963b017ed8cef4c453c5ed308d # ARCH_RENESAS = n
+be20bbcb0a8cb5597cc62b3e28d275919f3431df # ARCH_RENESAS = n
+b80a2bfce85e1051056d98d04ecb2d0b55cbbc1c # fixes 2610e8894663 which we don't have
+d8f945bf8096375f458683b5718722a2d5dda2f0 # Temporarily disabling until qla2xxx has been updated
+43a0541e312f7136e081e6bf58f6c8a2e9672688 # We don't build the tegra-smmu driver
+8069053880e0ee3a75fd6d7e0a30293265fe3de4 # sm712fb driver not enabled: fbdev: sm712fb: fix white screen of death on reboot, don't set CR3B-CR3F
+5481115e25e42b9215f2619452aa99c95f08492f # sm712fb driver not enabled: fbdev: sm712fb: fix brightness control on reboot, don't set SR30
+dcf9070595e100942c539e229dde4770aaeaa4e9 # sm712fb driver not enabled: fbdev: sm712fb: fix VRAM detection, don't set SR70/71/74/75
+ec1587d5073f29820e358f3a383850d61601d981 # sm712fb driver not enabled: fbdev: sm712fb: fix boot screen glitch when sm712fb replaces VGA
+9e0e59993df0601cddb95c4f6c61aa3d5e753c00 # sm712fb driver not enabled: fbdev: sm712fb: fix crashes during framebuffer writes by correctly mapping VRAM
+f627caf55b8e735dcec8fa6538e9668632b55276 # sm712fb driver not enabled: fbdev: sm712fb: fix crashes and garbled display during DPMS modesetting
+6053d3a4793e5bde6299ac5388e76a3bf679ff65 # sm712fb driver not enabled: fbdev: sm712fb: fix support for 1024x768-16 mode
+4ed7d2ccb7684510ec5f7a8f7ef534bc6a3d55b2 # sm712fb driver not enabled: fbdev: sm712fb: use 1024x768 by default on non-MIPS, fix garbled display
+2f0799a0ffc033bf3cc82d5032acc3ec633464c2 # this reverts ac5b2c18911f and there is a disagreement over this policy. We want to have ac5b2c18911f applied
diff --git a/config/arm64/default b/config/arm64/default
index e01bfbe2f6..ddb356a667 100644
--- a/config/arm64/default
+++ b/config/arm64/default
@@ -7036,6 +7036,7 @@ CONFIG_ARM_DSU_PMU=m
CONFIG_HISI_PMU=y
CONFIG_QCOM_L2_PMU=y
CONFIG_QCOM_L3_PMU=y
+CONFIG_THUNDERX2_PMU=m
CONFIG_XGENE_PMU=y
CONFIG_ARM_SPE_PMU=m
CONFIG_RAS=y
diff --git a/kabi/severities b/kabi/severities
index 07bae79255..51ff3157f2 100644
--- a/kabi/severities
+++ b/kabi/severities
@@ -59,3 +59,10 @@ fs/ceph PASS
drivers/net/ethernet/chelsio/cxgb4/* PASS
drivers/net/ethernet/chelsio/cxgb4vf/* PASS
drivers/net/ethernet/chelsio/libcxgb/* PASS
+
+# inter-module symbols for qed/qede/qedf/qedi/qedr
+drivers/net/ethernet/qlogic/qed/* PASS
+drivers/net/ethernet/qlogic/qede/* PASS
+drivers/scsi/qedf/* PASS
+drivers/scsi/qedi/* PASS
+drivers/infiniband/hw/qedr/* PASS
diff --git a/patches.arch/0002-efi-arm-libstub-add-a-root-memreserve-config-table.patch b/patches.arch/0002-efi-arm-libstub-add-a-root-memreserve-config-table.patch
index 15fa23008c..96adb9660e 100644
--- a/patches.arch/0002-efi-arm-libstub-add-a-root-memreserve-config-table.patch
+++ b/patches.arch/0002-efi-arm-libstub-add-a-root-memreserve-config-table.patch
@@ -3,7 +3,7 @@ Date: Fri, 21 Sep 2018 09:32:45 -0700
Subject: efi/arm: libstub: add a root memreserve config table
Git-commit: b844470f22061e8cd646cb355e85d2f518b2c913
Patch-mainline: v4.20-rc1
-References: bsc#1111147
+References: bsc#1111147 bsc#1117158 bsc#1134671
Installing UEFI configuration tables can only be done before calling
ExitBootServices(), so if we want to use the new MEMRESRVE config table
diff --git a/patches.arch/0003-efi-add-API-to-reserve-memory-persistently-across-ke.patch b/patches.arch/0003-efi-add-API-to-reserve-memory-persistently-across-ke.patch
index e78dc375aa..1ee43dc177 100644
--- a/patches.arch/0003-efi-add-API-to-reserve-memory-persistently-across-ke.patch
+++ b/patches.arch/0003-efi-add-API-to-reserve-memory-persistently-across-ke.patch
@@ -3,7 +3,7 @@ Date: Fri, 21 Sep 2018 09:32:46 -0700
Subject: efi: add API to reserve memory persistently across kexec reboot
Git-commit: a23d3bb05ccbd815c79293d2207fedede0b3515d
Patch-mainline: v4.20-rc1
-References: bsc#1111147
+References: bsc#1111147 bsc#1117158 bsc#1134671
Add kernel plumbing to reserve memory regions persistently on a EFI
system by adding entries to the MEMRESERVE linked list.
diff --git a/patches.arch/ARM-iop-don-t-use-using-64-bit-DMA-masks.patch b/patches.arch/ARM-iop-don-t-use-using-64-bit-DMA-masks.patch
new file mode 100644
index 0000000000..c20b3b865b
--- /dev/null
+++ b/patches.arch/ARM-iop-don-t-use-using-64-bit-DMA-masks.patch
@@ -0,0 +1,154 @@
+From 2125801ccce19249708ca3245d48998e70569ab8 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Mon, 25 Mar 2019 16:50:43 +0100
+Subject: [PATCH] ARM: iop: don't use using 64-bit DMA masks
+Git-commit: 2125801ccce19249708ca3245d48998e70569ab8
+Patch-mainline: v5.1-rc4
+References: bsc#1051510
+
+clang warns about statically defined DMA masks from the DMA_BIT_MASK
+macro with length 64:
+
+ arch/arm/mach-iop13xx/setup.c:303:35: error: shift count >= width of type [-Werror,-Wshift-count-overflow]
+ static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
+ ^~~~~~~~~~~~~~~~
+ include/linux/dma-mapping.h:141:54: note: expanded from macro 'DMA_BIT_MASK'
+ #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+ ^ ~~~
+
+The ones in iop shouldn't really be 64 bit masks, so changing them
+to what the driver can support avoids the warning.
+
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Olof Johansson <olof@lixom.net>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ arch/arm/mach-iop13xx/setup.c | 8 ++++----
+ arch/arm/mach-iop13xx/tpmi.c | 10 +++++-----
+ arch/arm/plat-iop/adma.c | 6 +++---
+ 3 files changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c
+index 53c316f7301e..fe4932fda01d 100644
+--- a/arch/arm/mach-iop13xx/setup.c
++++ b/arch/arm/mach-iop13xx/setup.c
+@@ -300,7 +300,7 @@ static struct resource iop13xx_adma_2_resources[] = {
+ }
+ };
+
+-static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
++static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(32);
+ static struct iop_adma_platform_data iop13xx_adma_0_data = {
+ .hw_id = 0,
+ .pool_size = PAGE_SIZE,
+@@ -324,7 +324,7 @@ static struct platform_device iop13xx_adma_0_channel = {
+ .resource = iop13xx_adma_0_resources,
+ .dev = {
+ .dma_mask = &iop13xx_adma_dmamask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = (void *) &iop13xx_adma_0_data,
+ },
+ };
+@@ -336,7 +336,7 @@ static struct platform_device iop13xx_adma_1_channel = {
+ .resource = iop13xx_adma_1_resources,
+ .dev = {
+ .dma_mask = &iop13xx_adma_dmamask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = (void *) &iop13xx_adma_1_data,
+ },
+ };
+@@ -348,7 +348,7 @@ static struct platform_device iop13xx_adma_2_channel = {
+ .resource = iop13xx_adma_2_resources,
+ .dev = {
+ .dma_mask = &iop13xx_adma_dmamask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = (void *) &iop13xx_adma_2_data,
+ },
+ };
+diff --git a/arch/arm/mach-iop13xx/tpmi.c b/arch/arm/mach-iop13xx/tpmi.c
+index db511ec2b1df..116feb6b261e 100644
+--- a/arch/arm/mach-iop13xx/tpmi.c
++++ b/arch/arm/mach-iop13xx/tpmi.c
+@@ -152,7 +152,7 @@ static struct resource iop13xx_tpmi_3_resources[] = {
+ }
+ };
+
+-u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64);
++u64 iop13xx_tpmi_mask = DMA_BIT_MASK(32);
+ static struct platform_device iop13xx_tpmi_0_device = {
+ .name = "iop-tpmi",
+ .id = 0,
+@@ -160,7 +160,7 @@ static struct platform_device iop13xx_tpmi_0_device = {
+ .resource = iop13xx_tpmi_0_resources,
+ .dev = {
+ .dma_mask = &iop13xx_tpmi_mask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ };
+
+@@ -171,7 +171,7 @@ static struct platform_device iop13xx_tpmi_1_device = {
+ .resource = iop13xx_tpmi_1_resources,
+ .dev = {
+ .dma_mask = &iop13xx_tpmi_mask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ };
+
+@@ -182,7 +182,7 @@ static struct platform_device iop13xx_tpmi_2_device = {
+ .resource = iop13xx_tpmi_2_resources,
+ .dev = {
+ .dma_mask = &iop13xx_tpmi_mask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ };
+
+@@ -193,7 +193,7 @@ static struct platform_device iop13xx_tpmi_3_device = {
+ .resource = iop13xx_tpmi_3_resources,
+ .dev = {
+ .dma_mask = &iop13xx_tpmi_mask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ },
+ };
+
+diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c
+index a4d1f8de3b5b..d9612221e484 100644
+--- a/arch/arm/plat-iop/adma.c
++++ b/arch/arm/plat-iop/adma.c
+@@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = {
+ .resource = iop3xx_dma_0_resources,
+ .dev = {
+ .dma_mask = &iop3xx_adma_dmamask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = (void *) &iop3xx_dma_0_data,
+ },
+ };
+@@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = {
+ .resource = iop3xx_dma_1_resources,
+ .dev = {
+ .dma_mask = &iop3xx_adma_dmamask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = (void *) &iop3xx_dma_1_data,
+ },
+ };
+@@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = {
+ .resource = iop3xx_aau_resources,
+ .dev = {
+ .dma_mask = &iop3xx_adma_dmamask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = (void *) &iop3xx_aau_data,
+ },
+ };
+--
+2.16.4
+
diff --git a/patches.arch/ARM-orion-don-t-use-using-64-bit-DMA-masks.patch b/patches.arch/ARM-orion-don-t-use-using-64-bit-DMA-masks.patch
new file mode 100644
index 0000000000..68460d2c69
--- /dev/null
+++ b/patches.arch/ARM-orion-don-t-use-using-64-bit-DMA-masks.patch
@@ -0,0 +1,53 @@
+From cd92d74d67c811dc22544430b9ac3029f5bd64c5 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Mon, 25 Mar 2019 16:50:42 +0100
+Subject: [PATCH] ARM: orion: don't use using 64-bit DMA masks
+Git-commit: cd92d74d67c811dc22544430b9ac3029f5bd64c5
+Patch-mainline: v5.1-rc4
+References: bsc#1051510
+
+clang warns about statically defined DMA masks from the DMA_BIT_MASK
+macro with length 64:
+
+arch/arm/plat-orion/common.c:625:29: error: shift count >= width of type [-Werror,-Wshift-count-overflow]
+ .coherent_dma_mask = DMA_BIT_MASK(64),
+ ^~~~~~~~~~~~~~~~
+include/linux/dma-mapping.h:141:54: note: expanded from macro 'DMA_BIT_MASK'
+ #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+
+The ones in orion shouldn't really be 64 bit masks, so changing them
+to what the driver can support avoids the warning.
+
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Olof Johansson <olof@lixom.net>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ arch/arm/plat-orion/common.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
+index a6c81ce00f52..8647cb80a93b 100644
+--- a/arch/arm/plat-orion/common.c
++++ b/arch/arm/plat-orion/common.c
+@@ -622,7 +622,7 @@ static struct platform_device orion_xor0_shared = {
+ .resource = orion_xor0_shared_resources,
+ .dev = {
+ .dma_mask = &orion_xor_dmamask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &orion_xor0_pdata,
+ },
+ };
+@@ -683,7 +683,7 @@ static struct platform_device orion_xor1_shared = {
+ .resource = orion_xor1_shared_resources,
+ .dev = {
+ .dma_mask = &orion_xor_dmamask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
++ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .platform_data = &orion_xor1_pdata,
+ },
+ };
+--
+2.16.4
+
diff --git a/patches.arch/arm64-acpi-fix-alignment-fault-in-accessing-ACPI.patch b/patches.arch/arm64-acpi-fix-alignment-fault-in-accessing-ACPI.patch
index f6cadadeb0..45dd19c57b 100644
--- a/patches.arch/arm64-acpi-fix-alignment-fault-in-accessing-ACPI.patch
+++ b/patches.arch/arm64-acpi-fix-alignment-fault-in-accessing-ACPI.patch
@@ -3,7 +3,7 @@ Date: Mon, 23 Jul 2018 10:57:32 +0900
Subject: arm64: acpi: fix alignment fault in accessing ACPI
Git-commit: 09ffcb0d718a0b100f0bed029b830987ecf53fab
Patch-mainline: v4.19-rc1
-References: bsc#1117158
+References: bsc#1117158 bsc#1134671
This is a fix against the issue that crash dump kernel may hang up
during booting, which can happen on any ACPI-based system with "ACPI
diff --git a/patches.arch/crypto-vmx-ghash-do-nosimd-fallback-manually.patch b/patches.arch/crypto-vmx-ghash-do-nosimd-fallback-manually.patch
new file mode 100644
index 0000000000..026e011e2b
--- /dev/null
+++ b/patches.arch/crypto-vmx-ghash-do-nosimd-fallback-manually.patch
@@ -0,0 +1,312 @@
+From 357d065a44cdd77ed5ff35155a989f2a763e96ef Mon Sep 17 00:00:00 2001
+From: Daniel Axtens <dja@axtens.net>
+Date: Fri, 17 May 2019 01:40:02 +1000
+Subject: [PATCH] crypto: vmx - ghash: do nosimd fallback manually
+
+References: bsc#1135661, bsc#1137162
+Patch-mainline: v5.2-rc2
+Git-commit: 357d065a44cdd77ed5ff35155a989f2a763e96ef
+
+VMX ghash was using a fallback that did not support interleaving simd
+and nosimd operations, leading to failures in the extended test suite.
+
+If I understood correctly, Eric's suggestion was to use the same
+data format that the generic code uses, allowing us to call into it
+with the same contexts. I wasn't able to get that to work - I think
+there's a very different key structure and data layout being used.
+
+So instead steal the arm64 approach and perform the fallback
+operations directly if required.
+
+Fixes: cc333cd68dfa ("crypto: vmx - Adding GHASH routines for VMX module")
+Cc: stable@vger.kernel.org # v4.1+
+Reported-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Daniel Axtens <dja@axtens.net>
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Tested-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ drivers/crypto/vmx/ghash.c | 211 +++++++++++++++----------------------
+ 1 file changed, 86 insertions(+), 125 deletions(-)
+
+--- a/drivers/crypto/vmx/ghash.c
++++ b/drivers/crypto/vmx/ghash.c
+@@ -1,22 +1,14 @@
++// SPDX-License-Identifier: GPL-2.0
+ /**
+ * GHASH routines supporting VMX instructions on the Power 8
+ *
+- * Copyright (C) 2015 International Business Machines Inc.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; version 2 only.
+- *
+- * This program is distributed in the hope that it will be useful,
+- * but WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+- * GNU General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program; if not, write to the Free Software
+- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
++ * Copyright (C) 2015, 2019 International Business Machines Inc.
+ *
+ * Author: Marcelo Henrique Cerri <mhcerri@br.ibm.com>
++ *
++ * Extended by Daniel Axtens <dja@axtens.net> to replace the fallback
++ * mechanism. The new approach is based on arm64 code, which is:
++ * Copyright (C) 2014 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ */
+
+ #include <linux/types.h>
+@@ -39,71 +31,25 @@ void gcm_ghash_p8(u64 Xi[2], const u128
+ const u8 *in, size_t len);
+
+ struct p8_ghash_ctx {
++ /* key used by vector asm */
+ u128 htable[16];
+- struct crypto_shash *fallback;
++ /* key used by software fallback */
++ be128 key;
+ };
+
+ struct p8_ghash_desc_ctx {
+ u64 shash[2];
+ u8 buffer[GHASH_DIGEST_SIZE];
+ int bytes;
+- struct shash_desc fallback_desc;
+ };
+
+-static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
+-{
+- const char *alg = "ghash-generic";
+- struct crypto_shash *fallback;
+- struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm);
+- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+-
+- fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+- if (IS_ERR(fallback)) {
+- printk(KERN_ERR
+- "Failed to allocate transformation for '%s': %ld\n",
+- alg, PTR_ERR(fallback));
+- return PTR_ERR(fallback);
+- }
+-
+- crypto_shash_set_flags(fallback,
+- crypto_shash_get_flags((struct crypto_shash
+- *) tfm));
+-
+- /* Check if the descsize defined in the algorithm is still enough. */
+- if (shash_tfm->descsize < sizeof(struct p8_ghash_desc_ctx)
+- + crypto_shash_descsize(fallback)) {
+- printk(KERN_ERR
+- "Desc size of the fallback implementation (%s) does not match the expected value: %lu vs %u\n",
+- alg,
+- shash_tfm->descsize - sizeof(struct p8_ghash_desc_ctx),
+- crypto_shash_descsize(fallback));
+- return -EINVAL;
+- }
+- ctx->fallback = fallback;
+-
+- return 0;
+-}
+-
+-static void p8_ghash_exit_tfm(struct crypto_tfm *tfm)
+-{
+- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+-
+- if (ctx->fallback) {
+- crypto_free_shash(ctx->fallback);
+- ctx->fallback = NULL;
+- }
+-}
+-
+ static int p8_ghash_init(struct shash_desc *desc)
+ {
+- struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+ struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ dctx->bytes = 0;
+ memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
+- dctx->fallback_desc.tfm = ctx->fallback;
+- dctx->fallback_desc.flags = desc->flags;
+- return crypto_shash_init(&dctx->fallback_desc);
++ return 0;
+ }
+
+ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
+@@ -121,7 +67,51 @@ static int p8_ghash_setkey(struct crypto
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+- return crypto_shash_setkey(ctx->fallback, key, keylen);
++
++ memcpy(&ctx->key, key, GHASH_BLOCK_SIZE);
++
++ return 0;
++}
++
++static inline void __ghash_block(struct p8_ghash_ctx *ctx,
++ struct p8_ghash_desc_ctx *dctx)
++{
++ if (!IN_INTERRUPT) {
++ preempt_disable();
++ pagefault_disable();
++ enable_kernel_vsx();
++ gcm_ghash_p8(dctx->shash, ctx->htable,
++ dctx->buffer, GHASH_DIGEST_SIZE);
++ disable_kernel_vsx();
++ pagefault_enable();
++ preempt_enable();
++ } else {
++ crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE);
++ gf128mul_lle((be128 *)dctx->shash, &ctx->key);
++ }
++}
++
++static inline void __ghash_blocks(struct p8_ghash_ctx *ctx,
++ struct p8_ghash_desc_ctx *dctx,
++ const u8 *src, unsigned int srclen)
++{
++ if (!IN_INTERRUPT) {
++ preempt_disable();
++ pagefault_disable();
++ enable_kernel_vsx();
++ gcm_ghash_p8(dctx->shash, ctx->htable,
++ src, srclen);
++ disable_kernel_vsx();
++ pagefault_enable();
++ preempt_enable();
++ } else {
++ while (srclen >= GHASH_BLOCK_SIZE) {
++ crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE);
++ gf128mul_lle((be128 *)dctx->shash, &ctx->key);
++ srclen -= GHASH_BLOCK_SIZE;
++ src += GHASH_BLOCK_SIZE;
++ }
++ }
+ }
+
+ static int p8_ghash_update(struct shash_desc *desc,
+@@ -131,49 +121,33 @@ static int p8_ghash_update(struct shash_
+ struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+ struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+- if (IN_INTERRUPT) {
+- return crypto_shash_update(&dctx->fallback_desc, src,
+- srclen);
+- } else {
+- if (dctx->bytes) {
+- if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
+- memcpy(dctx->buffer + dctx->bytes, src,
+- srclen);
+- dctx->bytes += srclen;
+- return 0;
+- }
++ if (dctx->bytes) {
++ if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
+ memcpy(dctx->buffer + dctx->bytes, src,
+- GHASH_DIGEST_SIZE - dctx->bytes);
+- preempt_disable();
+- pagefault_disable();
+- enable_kernel_vsx();
+- gcm_ghash_p8(dctx->shash, ctx->htable,
+- dctx->buffer, GHASH_DIGEST_SIZE);
+- disable_kernel_vsx();
+- pagefault_enable();
+- preempt_enable();
+- src += GHASH_DIGEST_SIZE - dctx->bytes;
+- srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
+- dctx->bytes = 0;
++ srclen);
++ dctx->bytes += srclen;
++ return 0;
+ }
+- len = srclen & ~(GHASH_DIGEST_SIZE - 1);
+- if (len) {
+- preempt_disable();
+- pagefault_disable();
+- enable_kernel_vsx();
+- gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
+- disable_kernel_vsx();
+- pagefault_enable();
+- preempt_enable();
+- src += len;
+- srclen -= len;
+- }
+- if (srclen) {
+- memcpy(dctx->buffer, src, srclen);
+- dctx->bytes = srclen;
+- }
+- return 0;
++ memcpy(dctx->buffer + dctx->bytes, src,
++ GHASH_DIGEST_SIZE - dctx->bytes);
++
++ __ghash_block(ctx, dctx);
++
++ src += GHASH_DIGEST_SIZE - dctx->bytes;
++ srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
++ dctx->bytes = 0;
++ }
++ len = srclen & ~(GHASH_DIGEST_SIZE - 1);
++ if (len) {
++ __ghash_blocks(ctx, dctx, src, len);
++ src += len;
++ srclen -= len;
+ }
++ if (srclen) {
++ memcpy(dctx->buffer, src, srclen);
++ dctx->bytes = srclen;
++ }
++ return 0;
+ }
+
+ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
+@@ -182,25 +156,14 @@ static int p8_ghash_final(struct shash_d
+ struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+ struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+- if (IN_INTERRUPT) {
+- return crypto_shash_final(&dctx->fallback_desc, out);
+- } else {
+- if (dctx->bytes) {
+- for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
+- dctx->buffer[i] = 0;
+- preempt_disable();
+- pagefault_disable();
+- enable_kernel_vsx();
+- gcm_ghash_p8(dctx->shash, ctx->htable,
+- dctx->buffer, GHASH_DIGEST_SIZE);
+- disable_kernel_vsx();
+- pagefault_enable();
+- preempt_enable();
+- dctx->bytes = 0;
+- }
+- memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
+- return 0;
++ if (dctx->bytes) {
++ for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
++ dctx->buffer[i] = 0;
++ __ghash_block(ctx, dctx);
++ dctx->bytes = 0;
+ }
++ memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
++ return 0;
+ }
+
+ struct shash_alg p8_ghash_alg = {
+@@ -215,11 +178,9 @@ struct shash_alg p8_ghash_alg = {
+ .cra_name = "ghash",
+ .cra_driver_name = "p8_ghash",
+ .cra_priority = 1000,
+- .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK,
++ .cra_flags = CRYPTO_ALG_TYPE_SHASH,
+ .cra_blocksize = GHASH_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct p8_ghash_ctx),
+ .cra_module = THIS_MODULE,
+- .cra_init = p8_ghash_init_tfm,
+- .cra_exit = p8_ghash_exit_tfm,
+ },
+ };
diff --git a/patches.arch/crypto-vmx-return-correct-error-code-on-failed-setke.patch b/patches.arch/crypto-vmx-return-correct-error-code-on-failed-setke.patch
new file mode 100644
index 0000000000..8ea5c7af21
--- /dev/null
+++ b/patches.arch/crypto-vmx-return-correct-error-code-on-failed-setke.patch
@@ -0,0 +1,112 @@
+From 5749f687b62ea74a42aaf0723da49a18247649db Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Tue, 9 Apr 2019 23:46:35 -0700
+Subject: [PATCH] crypto: vmx - return correct error code on failed setkey
+
+References: bsc#1135661, bsc#1137162
+Patch-mainline: v5.2-rc1
+Git-commit: 694e0db6600c12f8172efb51cd4b4bbade958562
+
+In the VMX implementations of AES and AES modes, return -EINVAL when an
+invalid key length is provided, rather than some unusual error code
+determined via a series of additions. This makes the behavior match the
+other AES implementations in the kernel's crypto API.
+
+Cc: Daniel Axtens <dja@axtens.net>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ drivers/crypto/vmx/aes.c | 7 ++++---
+ drivers/crypto/vmx/aes_cbc.c | 7 ++++---
+ drivers/crypto/vmx/aes_ctr.c | 5 +++--
+ drivers/crypto/vmx/aes_xts.c | 9 +++++----
+ 4 files changed, 16 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
+index b0cd5aff3822..5e85dfca8242 100644
+--- a/drivers/crypto/vmx/aes.c
++++ b/drivers/crypto/vmx/aes.c
+@@ -83,13 +83,14 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
+ pagefault_disable();
+ enable_kernel_vsx();
+ ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+- ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
++ ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+- ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
+- return ret;
++ ret |= crypto_cipher_setkey(ctx->fallback, key, keylen);
++
++ return ret ? -EINVAL : 0;
+ }
+
+ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
+index 668e285f1a64..bb01e62700af 100644
+--- a/drivers/crypto/vmx/aes_cbc.c
++++ b/drivers/crypto/vmx/aes_cbc.c
+@@ -86,13 +86,14 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
+ pagefault_disable();
+ enable_kernel_vsx();
+ ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+- ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
++ ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+- return ret;
++ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
++
++ return ret ? -EINVAL : 0;
+ }
+
+ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
+diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
+index 386943e65a20..a9bac01ba2fb 100644
+--- a/drivers/crypto/vmx/aes_ctr.c
++++ b/drivers/crypto/vmx/aes_ctr.c
+@@ -88,8 +88,9 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
+ pagefault_enable();
+ preempt_enable();
+
+- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+- return ret;
++ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
++
++ return ret ? -EINVAL : 0;
+ }
+
+ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
+diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
+index 16f6c0cef4ac..f9c224192802 100644
+--- a/drivers/crypto/vmx/aes_xts.c
++++ b/drivers/crypto/vmx/aes_xts.c
+@@ -91,14 +91,15 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
+ pagefault_disable();
+ enable_kernel_vsx();
+ ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key);
+- ret += aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
+- ret += aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
++ ret |= aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key);
++ ret |= aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key);
+ disable_kernel_vsx();
+ pagefault_enable();
+ preempt_enable();
+
+- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+- return ret;
++ ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen);
++
++ return ret ? -EINVAL : 0;
+ }
+
+ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
+--
+2.20.1
+
diff --git a/patches.arch/efi-arm-Don-t-mark-ACPI-reclaim-memory-as-MEMBLOCK_N.patch b/patches.arch/efi-arm-Don-t-mark-ACPI-reclaim-memory-as-MEMBLOCK_N.patch
index 6767501ce6..44b7c2efaf 100644
--- a/patches.arch/efi-arm-Don-t-mark-ACPI-reclaim-memory-as-MEMBLOCK_N.patch
+++ b/patches.arch/efi-arm-Don-t-mark-ACPI-reclaim-memory-as-MEMBLOCK_N.patch
@@ -3,7 +3,7 @@ Date: Fri, 18 Aug 2017 20:49:34 +0100
Subject: efi/arm: Don't mark ACPI reclaim memory as MEMBLOCK_NOMAP
Git-commit: f56ab9a5b73ca2aee777ccdf2d355ae2dd31db5a
Patch-mainline: v4.14-rc1
-References: bsc#1117158 bsc#1115688 bsc#1120566
+References: bsc#1117158 bsc#1115688 bsc#1120566 bsc#1134671
On ARM, regions of memory that are described by UEFI as having special
significance to the firmware itself are omitted from the linear mapping.
diff --git a/patches.arch/x86-cpu-hygon-fix-phys_proc_id-calculation-logic-for-multi-die-processors.patch b/patches.arch/x86-cpu-hygon-fix-phys_proc_id-calculation-logic-for-multi-die-processors.patch
new file mode 100644
index 0000000000..8fcb7cb394
--- /dev/null
+++ b/patches.arch/x86-cpu-hygon-fix-phys_proc_id-calculation-logic-for-multi-die-processors.patch
@@ -0,0 +1,122 @@
+From: Pu Wen <puwen@hygon.cn>
+Date: Sat, 23 Mar 2019 23:42:20 +0800
+Subject: x86/CPU/hygon: Fix phys_proc_id calculation logic for multi-die processors
+Git-commit: e0ceeae708cebf22c990c3d703a4ca187dc837f5
+Patch-mainline: v5.2-rc1
+References: fate#327735
+
+The Hygon family 18h multi-die processor platform supports 1, 2 or
+4-Dies per socket. The topology looks like this:
+
+ System View (with 1-Die 2-Socket):
+ |------------|
+ ------ -----
+ SOCKET0 | D0 | | D1 | SOCKET1
+ ------ -----
+
+ System View (with 2-Die 2-socket):
+ --------------------
+ | -------------|------
+ | | | |
+ ------------ ------------
+ SOCKET0 | D1 -- D0 | | D3 -- D2 | SOCKET1
+ ------------ ------------
+
+ System View (with 4-Die 2-Socket) :
+ --------------------
+ | -------------|------
+ | | | |
+ ------------ ------------
+ | D1 -- D0 | | D7 -- D6 |
+ | | \/ | | | | \/ | |
+ SOCKET0 | | /\ | | | | /\ | | SOCKET1
+ | D2 -- D3 | | D4 -- D5 |
+ ------------ ------------
+ | | | |
+ ------|------------| |
+ --------------------
+
+Currently
+
+ phys_proc_id = initial_apicid >> bits
+
+calculates the physical processor ID from the initial_apicid by shifting
+*bits*.
+
+However, this does not work for 1-Die and 2-Die 2-socket systems.
+
+According to document [1] section 2.1.11.1, the bits is the value of
+CPUID_Fn80000008_ECX[12:15]. The possible values are 4, 5 or 6 which
+mean:
+
+ 4 - 1 die
+ 5 - 2 dies
+ 6 - 3/4 dies.
+
+Hygon programs the initial ApicId the same way as AMD. The ApicId is
+read from CPUID_Fn00000001_EBX (see section 2.1.11.1 of referrence [1])
+and the definition is as below (see section 2.1.10.2.1.3 of [1]):
+
+ -------------------------------------------------
+ Bit | 6 | 5 4 | 3 | 2 1 0 |
+ |-----------|---------|--------|----------------|
+ IDs | Socket ID | Node ID | CCX ID | Core/Thread ID |
+ -------------------------------------------------
+
+So for 3/4-Die configurations, the bits variable is 6, which is the same
+as the ApicID definition field.
+
+For 1-Die and 2-Die configurations, bits is 4 or 5, which will cause the
+right shifted result to not be exactly the value of socket ID.
+
+However, the socket ID should be obtained from ApicId[6]. To fix the
+problem and match the ApicID field definition, set the shift bits to 6
+for all Hygon family 18h multi-die CPUs.
+
+Because AMD doesn't have 2-Socket systems with 1-Die/2-Die processors
+(see reference [2]), this doesn't need to be changed on the AMD side but
+only for Hygon.
+
+Refs:
+[1] https://www.amd.com/system/files/TechDocs/54945_PPR_Family_17h_Models_00h-0Fh.pdf
+[2] https://www.amd.com/en/products/specifications/processors
+
+ [bp: heavily massage commit message. ]
+
+Signed-off-by: Pu Wen <puwen@hygon.cn>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Thomas Lendacky <Thomas.Lendacky@amd.com>
+Cc: Yazen Ghannam <yazen.ghannam@amd.com>
+Cc: x86-ml <x86@kernel.org>
+Link: https://lkml.kernel.org/r/1553355740-19999-1-git-send-email-puwen@hygon.cn
+---
+ arch/x86/kernel/cpu/hygon.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
+index cf25405444ab..415621ddb8a2 100644
+--- a/arch/x86/kernel/cpu/hygon.c
++++ b/arch/x86/kernel/cpu/hygon.c
+@@ -19,6 +19,8 @@
+
+ #include "cpu.h"
+
++#define APICID_SOCKET_ID_BIT 6
++
+ /*
+ * nodes_per_socket: Stores the number of nodes per socket.
+ * Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8]
+@@ -87,6 +89,9 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
+ if (!err)
+ c->x86_coreid_bits = get_count_order(c->x86_max_cores);
+
++ /* Socket ID is ApicId[6] for these processors. */
++ c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
++
+ cacheinfo_hygon_init_llc_id(c, cpu, node_id);
+ } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
+ u64 value;
+
diff --git a/patches.drivers/0001-drivers-perf-Add-Cavium-ThunderX2-SoC-UNCORE-PMU-dri.patch b/patches.drivers/0001-drivers-perf-Add-Cavium-ThunderX2-SoC-UNCORE-PMU-dri.patch
new file mode 100644
index 0000000000..13b705d15b
--- /dev/null
+++ b/patches.drivers/0001-drivers-perf-Add-Cavium-ThunderX2-SoC-UNCORE-PMU-dri.patch
@@ -0,0 +1,928 @@
+From: "Kulkarni, Ganapatrao" <Ganapatrao.Kulkarni@cavium.com>
+Date: Thu, 6 Dec 2018 11:51:31 +0000
+Subject: drivers/perf: Add Cavium ThunderX2 SoC UNCORE PMU driver
+
+Git-commit: 69c32972d59388c041268e8206e8eb1acff29b9a
+Patch-mainline: v5.0-rc1
+References: fate#323052,bsc#1117114
+
+This patch adds a perf driver for the PMU UNCORE devices DDR4 Memory
+Controller(DMC) and Level 3 Cache(L3C). Each PMU supports up to 4
+counters. All counters lack overflow interrupt and are
+sampled periodically.
+
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Signed-off-by: Ganapatrao Kulkarni <ganapatrao.kulkarni@cavium.com>
+[will: consistent enum cpuhp_state naming]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+
+Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
+---
+ drivers/perf/Kconfig | 9
+ drivers/perf/Makefile | 1
+ drivers/perf/thunderx2_pmu.c | 861 +++++++++++++++++++++++++++++++++++++++++++
+ include/linux/cpuhotplug.h | 1
+ 4 files changed, 872 insertions(+)
+ create mode 100644 drivers/perf/thunderx2_pmu.c
+
+--- a/drivers/perf/Kconfig
++++ b/drivers/perf/Kconfig
+@@ -87,6 +87,15 @@ config QCOM_L3_PMU
+ Adds the L3 cache PMU into the perf events subsystem for
+ monitoring L3 cache events.
+
++config THUNDERX2_PMU
++ tristate "Cavium ThunderX2 SoC PMU UNCORE"
++ depends on ARCH_THUNDER2 && ARM64 && ACPI && NUMA
++ default m
++ help
++ Provides support for ThunderX2 UNCORE events.
++ The SoC has PMU support in its L3 cache controller (L3C) and
++ in the DDR4 Memory Controller (DMC).
++
+ config XGENE_PMU
+ depends on ARCH_XGENE
+ bool "APM X-Gene SoC PMU"
+--- a/drivers/perf/Makefile
++++ b/drivers/perf/Makefile
+@@ -6,5 +6,6 @@ obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_ac
+ obj-$(CONFIG_HISI_PMU) += hisilicon/
+ obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o
+ obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o
++obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o
+ obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o
+ obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o
+--- /dev/null
++++ b/drivers/perf/thunderx2_pmu.c
+@@ -0,0 +1,861 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * CAVIUM THUNDERX2 SoC PMU UNCORE
++ * Copyright (C) 2018 Cavium Inc.
++ * Author: Ganapatrao Kulkarni <gkulkarni@cavium.com>
++ */
++
++#include <linux/acpi.h>
++#include <linux/cpuhotplug.h>
++#include <linux/perf_event.h>
++#include <linux/platform_device.h>
++
++/* Each ThunderX2(TX2) Socket has a L3C and DMC UNCORE PMU device.
++ * Each UNCORE PMU device consists of 4 independent programmable counters.
++ * Counters are 32 bit and do not support overflow interrupt,
++ * they need to be sampled before overflow(i.e, at every 2 seconds).
++ */
++
++#define TX2_PMU_MAX_COUNTERS 4
++#define TX2_PMU_DMC_CHANNELS 8
++#define TX2_PMU_L3_TILES 16
++
++#define TX2_PMU_HRTIMER_INTERVAL (2 * NSEC_PER_SEC)
++#define GET_EVENTID(ev) ((ev->hw.config) & 0x1f)
++#define GET_COUNTERID(ev) ((ev->hw.idx) & 0x3)
++ /* 1 byte per counter(4 counters).
++ * Event id is encoded in bits [5:1] of a byte,
++ */
++#define DMC_EVENT_CFG(idx, val) ((val) << (((idx) * 8) + 1))
++
++#define L3C_COUNTER_CTL 0xA8
++#define L3C_COUNTER_DATA 0xAC
++#define DMC_COUNTER_CTL 0x234
++#define DMC_COUNTER_DATA 0x240
++
++/* L3C event IDs */
++#define L3_EVENT_READ_REQ 0xD
++#define L3_EVENT_WRITEBACK_REQ 0xE
++#define L3_EVENT_INV_N_WRITE_REQ 0xF
++#define L3_EVENT_INV_REQ 0x10
++#define L3_EVENT_EVICT_REQ 0x13
++#define L3_EVENT_INV_N_WRITE_HIT 0x14
++#define L3_EVENT_INV_HIT 0x15
++#define L3_EVENT_READ_HIT 0x17
++#define L3_EVENT_MAX 0x18
++
++/* DMC event IDs */
++#define DMC_EVENT_COUNT_CYCLES 0x1
++#define DMC_EVENT_WRITE_TXNS 0xB
++#define DMC_EVENT_DATA_TRANSFERS 0xD
++#define DMC_EVENT_READ_TXNS 0xF
++#define DMC_EVENT_MAX 0x10
++
++enum tx2_uncore_type {
++ PMU_TYPE_L3C,
++ PMU_TYPE_DMC,
++ PMU_TYPE_INVALID,
++};
++
++/*
++ * pmu on each socket has 2 uncore devices(dmc and l3c),
++ * each device has 4 counters.
++ */
++struct tx2_uncore_pmu {
++ struct hlist_node hpnode;
++ struct list_head entry;
++ struct pmu pmu;
++ char *name;
++ int node;
++ int cpu;
++ u32 max_counters;
++ u32 prorate_factor;
++ u32 max_events;
++ u64 hrtimer_interval;
++ void __iomem *base;
++ DECLARE_BITMAP(active_counters, TX2_PMU_MAX_COUNTERS);
++ struct perf_event *events[TX2_PMU_MAX_COUNTERS];
++ struct device *dev;
++ struct hrtimer hrtimer;
++ const struct attribute_group **attr_groups;
++ enum tx2_uncore_type type;
++ void (*init_cntr_base)(struct perf_event *event,
++ struct tx2_uncore_pmu *tx2_pmu);
++ void (*stop_event)(struct perf_event *event);
++ void (*start_event)(struct perf_event *event, int flags);
++};
++
++static LIST_HEAD(tx2_pmus);
++
++static inline struct tx2_uncore_pmu *pmu_to_tx2_pmu(struct pmu *pmu)
++{
++ return container_of(pmu, struct tx2_uncore_pmu, pmu);
++}
++
++PMU_FORMAT_ATTR(event, "config:0-4");
++
++static struct attribute *l3c_pmu_format_attrs[] = {
++ &format_attr_event.attr,
++ NULL,
++};
++
++static struct attribute *dmc_pmu_format_attrs[] = {
++ &format_attr_event.attr,
++ NULL,
++};
++
++static const struct attribute_group l3c_pmu_format_attr_group = {
++ .name = "format",
++ .attrs = l3c_pmu_format_attrs,
++};
++
++static const struct attribute_group dmc_pmu_format_attr_group = {
++ .name = "format",
++ .attrs = dmc_pmu_format_attrs,
++};
++
++/*
++ * sysfs event attributes
++ */
++static ssize_t tx2_pmu_event_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct dev_ext_attribute *eattr;
++
++ eattr = container_of(attr, struct dev_ext_attribute, attr);
++ return sprintf(buf, "event=0x%lx\n", (unsigned long) eattr->var);
++}
++
++#define TX2_EVENT_ATTR(name, config) \
++ PMU_EVENT_ATTR(name, tx2_pmu_event_attr_##name, \
++ config, tx2_pmu_event_show)
++
++TX2_EVENT_ATTR(read_request, L3_EVENT_READ_REQ);
++TX2_EVENT_ATTR(writeback_request, L3_EVENT_WRITEBACK_REQ);
++TX2_EVENT_ATTR(inv_nwrite_request, L3_EVENT_INV_N_WRITE_REQ);
++TX2_EVENT_ATTR(inv_request, L3_EVENT_INV_REQ);
++TX2_EVENT_ATTR(evict_request, L3_EVENT_EVICT_REQ);
++TX2_EVENT_ATTR(inv_nwrite_hit, L3_EVENT_INV_N_WRITE_HIT);
++TX2_EVENT_ATTR(inv_hit, L3_EVENT_INV_HIT);
++TX2_EVENT_ATTR(read_hit, L3_EVENT_READ_HIT);
++
++static struct attribute *l3c_pmu_events_attrs[] = {
++ &tx2_pmu_event_attr_read_request.attr.attr,
++ &tx2_pmu_event_attr_writeback_request.attr.attr,
++ &tx2_pmu_event_attr_inv_nwrite_request.attr.attr,
++ &tx2_pmu_event_attr_inv_request.attr.attr,
++ &tx2_pmu_event_attr_evict_request.attr.attr,
++ &tx2_pmu_event_attr_inv_nwrite_hit.attr.attr,
++ &tx2_pmu_event_attr_inv_hit.attr.attr,
++ &tx2_pmu_event_attr_read_hit.attr.attr,
++ NULL,
++};
++
++TX2_EVENT_ATTR(cnt_cycles, DMC_EVENT_COUNT_CYCLES);
++TX2_EVENT_ATTR(write_txns, DMC_EVENT_WRITE_TXNS);
++TX2_EVENT_ATTR(data_transfers, DMC_EVENT_DATA_TRANSFERS);
++TX2_EVENT_ATTR(read_txns, DMC_EVENT_READ_TXNS);
++
++static struct attribute *dmc_pmu_events_attrs[] = {
++ &tx2_pmu_event_attr_cnt_cycles.attr.attr,
++ &tx2_pmu_event_attr_write_txns.attr.attr,
++ &tx2_pmu_event_attr_data_transfers.attr.attr,
++ &tx2_pmu_event_attr_read_txns.attr.attr,
++ NULL,
++};
++
++static const struct attribute_group l3c_pmu_events_attr_group = {
++ .name = "events",
++ .attrs = l3c_pmu_events_attrs,
++};
++
++static const struct attribute_group dmc_pmu_events_attr_group = {
++ .name = "events",
++ .attrs = dmc_pmu_events_attrs,
++};
++
++/*
++ * sysfs cpumask attributes
++ */
++static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct tx2_uncore_pmu *tx2_pmu;
++
++ tx2_pmu = pmu_to_tx2_pmu(dev_get_drvdata(dev));
++ return cpumap_print_to_pagebuf(true, buf, cpumask_of(tx2_pmu->cpu));
++}
++static DEVICE_ATTR_RO(cpumask);
++
++static struct attribute *tx2_pmu_cpumask_attrs[] = {
++ &dev_attr_cpumask.attr,
++ NULL,
++};
++
++static const struct attribute_group pmu_cpumask_attr_group = {
++ .attrs = tx2_pmu_cpumask_attrs,
++};
++
++/*
++ * Per PMU device attribute groups
++ */
++static const struct attribute_group *l3c_pmu_attr_groups[] = {
++ &l3c_pmu_format_attr_group,
++ &pmu_cpumask_attr_group,
++ &l3c_pmu_events_attr_group,
++ NULL
++};
++
++static const struct attribute_group *dmc_pmu_attr_groups[] = {
++ &dmc_pmu_format_attr_group,
++ &pmu_cpumask_attr_group,
++ &dmc_pmu_events_attr_group,
++ NULL
++};
++
++static inline u32 reg_readl(unsigned long addr)
++{
++ return readl((void __iomem *)addr);
++}
++
++static inline void reg_writel(u32 val, unsigned long addr)
++{
++ writel(val, (void __iomem *)addr);
++}
++
++static int alloc_counter(struct tx2_uncore_pmu *tx2_pmu)
++{
++ int counter;
++
++ counter = find_first_zero_bit(tx2_pmu->active_counters,
++ tx2_pmu->max_counters);
++ if (counter == tx2_pmu->max_counters)
++ return -ENOSPC;
++
++ set_bit(counter, tx2_pmu->active_counters);
++ return counter;
++}
++
++static inline void free_counter(struct tx2_uncore_pmu *tx2_pmu, int counter)
++{
++ clear_bit(counter, tx2_pmu->active_counters);
++}
++
++static void init_cntr_base_l3c(struct perf_event *event,
++ struct tx2_uncore_pmu *tx2_pmu)
++{
++ struct hw_perf_event *hwc = &event->hw;
++
++ /* counter ctrl/data reg offset at 8 */
++ hwc->config_base = (unsigned long)tx2_pmu->base
++ + L3C_COUNTER_CTL + (8 * GET_COUNTERID(event));
++ hwc->event_base = (unsigned long)tx2_pmu->base
++ + L3C_COUNTER_DATA + (8 * GET_COUNTERID(event));
++}
++
++static void init_cntr_base_dmc(struct perf_event *event,
++ struct tx2_uncore_pmu *tx2_pmu)
++{
++ struct hw_perf_event *hwc = &event->hw;
++
++ hwc->config_base = (unsigned long)tx2_pmu->base
++ + DMC_COUNTER_CTL;
++ /* counter data reg offset at 0xc */
++ hwc->event_base = (unsigned long)tx2_pmu->base
++ + DMC_COUNTER_DATA + (0xc * GET_COUNTERID(event));
++}
++
++static void uncore_start_event_l3c(struct perf_event *event, int flags)
++{
++ u32 val;
++ struct hw_perf_event *hwc = &event->hw;
++
++ /* event id encoded in bits [07:03] */
++ val = GET_EVENTID(event) << 3;
++ reg_writel(val, hwc->config_base);
++ local64_set(&hwc->prev_count, 0);
++ reg_writel(0, hwc->event_base);
++}
++
++static inline void uncore_stop_event_l3c(struct perf_event *event)
++{
++ reg_writel(0, event->hw.config_base);
++}
++
++static void uncore_start_event_dmc(struct perf_event *event, int flags)
++{
++ u32 val;
++ struct hw_perf_event *hwc = &event->hw;
++ int idx = GET_COUNTERID(event);
++ int event_id = GET_EVENTID(event);
++
++ /* enable and start counters.
++ * 8 bits for each counter, bits[05:01] of a counter to set event type.
++ */
++ val = reg_readl(hwc->config_base);
++ val &= ~DMC_EVENT_CFG(idx, 0x1f);
++ val |= DMC_EVENT_CFG(idx, event_id);
++ reg_writel(val, hwc->config_base);
++ local64_set(&hwc->prev_count, 0);
++ reg_writel(0, hwc->event_base);
++}
++
++static void uncore_stop_event_dmc(struct perf_event *event)
++{
++ u32 val;
++ struct hw_perf_event *hwc = &event->hw;
++ int idx = GET_COUNTERID(event);
++
++ /* clear event type(bits[05:01]) to stop counter */
++ val = reg_readl(hwc->config_base);
++ val &= ~DMC_EVENT_CFG(idx, 0x1f);
++ reg_writel(val, hwc->config_base);
++}
++
++static void tx2_uncore_event_update(struct perf_event *event)
++{
++ s64 prev, delta, new = 0;
++ struct hw_perf_event *hwc = &event->hw;
++ struct tx2_uncore_pmu *tx2_pmu;
++ enum tx2_uncore_type type;
++ u32 prorate_factor;
++
++ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
++ type = tx2_pmu->type;
++ prorate_factor = tx2_pmu->prorate_factor;
++
++ new = reg_readl(hwc->event_base);
++ prev = local64_xchg(&hwc->prev_count, new);
++
++ /* handles rollover of 32 bit counter */
++ delta = (u32)(((1UL << 32) - prev) + new);
++
++ /* DMC event data_transfers granularity is 16 Bytes, convert it to 64 */
++ if (type == PMU_TYPE_DMC &&
++ GET_EVENTID(event) == DMC_EVENT_DATA_TRANSFERS)
++ delta = delta/4;
++
++ /* L3C and DMC has 16 and 8 interleave channels respectively.
++ * The sampled value is for channel 0 and multiplied with
++ * prorate_factor to get the count for a device.
++ */
++ local64_add(delta * prorate_factor, &event->count);
++}
++
++static enum tx2_uncore_type get_tx2_pmu_type(struct acpi_device *adev)
++{
++ int i = 0;
++ struct acpi_tx2_pmu_device {
++ __u8 id[ACPI_ID_LEN];
++ enum tx2_uncore_type type;
++ } devices[] = {
++ {"CAV901D", PMU_TYPE_L3C},
++ {"CAV901F", PMU_TYPE_DMC},
++ {"", PMU_TYPE_INVALID}
++ };
++
++ while (devices[i].type != PMU_TYPE_INVALID) {
++ if (!strcmp(acpi_device_hid(adev), devices[i].id))
++ break;
++ i++;
++ }
++
++ return devices[i].type;
++}
++
++static bool tx2_uncore_validate_event(struct pmu *pmu,
++ struct perf_event *event, int *counters)
++{
++ if (is_software_event(event))
++ return true;
++ /* Reject groups spanning multiple HW PMUs. */
++ if (event->pmu != pmu)
++ return false;
++
++ *counters = *counters + 1;
++ return true;
++}
++
++/*
++ * Make sure the group of events can be scheduled at once
++ * on the PMU.
++ */
++static bool tx2_uncore_validate_event_group(struct perf_event *event)
++{
++ struct perf_event *sibling, *leader = event->group_leader;
++ int counters = 0;
++
++ if (event->group_leader == event)
++ return true;
++
++ if (!tx2_uncore_validate_event(event->pmu, leader, &counters))
++ return false;
++
++ for_each_sibling_event(sibling, leader) {
++ if (!tx2_uncore_validate_event(event->pmu, sibling, &counters))
++ return false;
++ }
++
++ if (!tx2_uncore_validate_event(event->pmu, event, &counters))
++ return false;
++
++ /*
++ * If the group requires more counters than the HW has,
++ * it cannot ever be scheduled.
++ */
++ return counters <= TX2_PMU_MAX_COUNTERS;
++}
++
++
++static int tx2_uncore_event_init(struct perf_event *event)
++{
++ struct hw_perf_event *hwc = &event->hw;
++ struct tx2_uncore_pmu *tx2_pmu;
++
++ /* Test the event attr type check for PMU enumeration */
++ if (event->attr.type != event->pmu->type)
++ return -ENOENT;
++
++ /*
++ * SOC PMU counters are shared across all cores.
++ * Therefore, it does not support per-process mode.
++ * Also, it does not support event sampling mode.
++ */
++ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
++ return -EINVAL;
++
++ /* We have no filtering of any kind */
++ if (event->attr.exclude_user ||
++ event->attr.exclude_kernel ||
++ event->attr.exclude_hv ||
++ event->attr.exclude_idle ||
++ event->attr.exclude_host ||
++ event->attr.exclude_guest)
++ return -EINVAL;
++
++ if (event->cpu < 0)
++ return -EINVAL;
++
++ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
++ if (tx2_pmu->cpu >= nr_cpu_ids)
++ return -EINVAL;
++ event->cpu = tx2_pmu->cpu;
++
++ if (event->attr.config >= tx2_pmu->max_events)
++ return -EINVAL;
++
++ /* store event id */
++ hwc->config = event->attr.config;
++
++ /* Validate the group */
++ if (!tx2_uncore_validate_event_group(event))
++ return -EINVAL;
++
++ return 0;
++}
++
++static void tx2_uncore_event_start(struct perf_event *event, int flags)
++{
++ struct hw_perf_event *hwc = &event->hw;
++ struct tx2_uncore_pmu *tx2_pmu;
++
++ hwc->state = 0;
++ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
++
++ tx2_pmu->start_event(event, flags);
++ perf_event_update_userpage(event);
++
++ /* Start timer for first event */
++ if (bitmap_weight(tx2_pmu->active_counters,
++ tx2_pmu->max_counters) == 1) {
++ hrtimer_start(&tx2_pmu->hrtimer,
++ ns_to_ktime(tx2_pmu->hrtimer_interval),
++ HRTIMER_MODE_REL_PINNED);
++ }
++}
++
++static void tx2_uncore_event_stop(struct perf_event *event, int flags)
++{
++ struct hw_perf_event *hwc = &event->hw;
++ struct tx2_uncore_pmu *tx2_pmu;
++
++ if (hwc->state & PERF_HES_UPTODATE)
++ return;
++
++ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
++ tx2_pmu->stop_event(event);
++ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
++ hwc->state |= PERF_HES_STOPPED;
++ if (flags & PERF_EF_UPDATE) {
++ tx2_uncore_event_update(event);
++ hwc->state |= PERF_HES_UPTODATE;
++ }
++}
++
++static int tx2_uncore_event_add(struct perf_event *event, int flags)
++{
++ struct hw_perf_event *hwc = &event->hw;
++ struct tx2_uncore_pmu *tx2_pmu;
++
++ tx2_pmu = pmu_to_tx2_pmu(event->pmu);
++
++ /* Allocate a free counter */
++ hwc->idx = alloc_counter(tx2_pmu);
++ if (hwc->idx < 0)
++ return -EAGAIN;
++
++ tx2_pmu->events[hwc->idx] = event;
++ /* set counter control and data registers base address */
++ tx2_pmu->init_cntr_base(event, tx2_pmu);
++
++ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
++ if (flags & PERF_EF_START)
++ tx2_uncore_event_start(event, flags);
++
++ return 0;
++}
++
++static void tx2_uncore_event_del(struct perf_event *event, int flags)
++{
++ struct tx2_uncore_pmu *tx2_pmu = pmu_to_tx2_pmu(event->pmu);
++ struct hw_perf_event *hwc = &event->hw;
++
++ tx2_uncore_event_stop(event, PERF_EF_UPDATE);
++
++ /* clear the assigned counter */
++ free_counter(tx2_pmu, GET_COUNTERID(event));
++
++ perf_event_update_userpage(event);
++ tx2_pmu->events[hwc->idx] = NULL;
++ hwc->idx = -1;
++}
++
++static void tx2_uncore_event_read(struct perf_event *event)
++{
++ tx2_uncore_event_update(event);
++}
++
++static enum hrtimer_restart tx2_hrtimer_callback(struct hrtimer *timer)
++{
++ struct tx2_uncore_pmu *tx2_pmu;
++ int max_counters, idx;
++
++ tx2_pmu = container_of(timer, struct tx2_uncore_pmu, hrtimer);
++ max_counters = tx2_pmu->max_counters;
++
++ if (bitmap_empty(tx2_pmu->active_counters, max_counters))
++ return HRTIMER_NORESTART;
++
++ for_each_set_bit(idx, tx2_pmu->active_counters, max_counters) {
++ struct perf_event *event = tx2_pmu->events[idx];
++
++ tx2_uncore_event_update(event);
++ }
++ hrtimer_forward_now(timer, ns_to_ktime(tx2_pmu->hrtimer_interval));
++ return HRTIMER_RESTART;
++}
++
++static int tx2_uncore_pmu_register(
++ struct tx2_uncore_pmu *tx2_pmu)
++{
++ struct device *dev = tx2_pmu->dev;
++ char *name = tx2_pmu->name;
++
++ /* Perf event registration */
++ tx2_pmu->pmu = (struct pmu) {
++ .module = THIS_MODULE,
++ .attr_groups = tx2_pmu->attr_groups,
++ .task_ctx_nr = perf_invalid_context,
++ .event_init = tx2_uncore_event_init,
++ .add = tx2_uncore_event_add,
++ .del = tx2_uncore_event_del,
++ .start = tx2_uncore_event_start,
++ .stop = tx2_uncore_event_stop,
++ .read = tx2_uncore_event_read,
++ };
++
++ tx2_pmu->pmu.name = devm_kasprintf(dev, GFP_KERNEL,
++ "%s", name);
++
++ return perf_pmu_register(&tx2_pmu->pmu, tx2_pmu->pmu.name, -1);
++}
++
++static int tx2_uncore_pmu_add_dev(struct tx2_uncore_pmu *tx2_pmu)
++{
++ int ret, cpu;
++
++ cpu = cpumask_any_and(cpumask_of_node(tx2_pmu->node),
++ cpu_online_mask);
++
++ tx2_pmu->cpu = cpu;
++ hrtimer_init(&tx2_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ tx2_pmu->hrtimer.function = tx2_hrtimer_callback;
++
++ ret = tx2_uncore_pmu_register(tx2_pmu);
++ if (ret) {
++ dev_err(tx2_pmu->dev, "%s PMU: Failed to init driver\n",
++ tx2_pmu->name);
++ return -ENODEV;
++ }
++
++ /* register hotplug callback for the pmu */
++ ret = cpuhp_state_add_instance(
++ CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
++ &tx2_pmu->hpnode);
++ if (ret) {
++ dev_err(tx2_pmu->dev, "Error %d registering hotplug", ret);
++ return ret;
++ }
++
++ /* Add to list */
++ list_add(&tx2_pmu->entry, &tx2_pmus);
++
++ dev_dbg(tx2_pmu->dev, "%s PMU UNCORE registered\n",
++ tx2_pmu->pmu.name);
++ return ret;
++}
++
++static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev,
++ acpi_handle handle, struct acpi_device *adev, u32 type)
++{
++ struct tx2_uncore_pmu *tx2_pmu;
++ void __iomem *base;
++ struct resource res;
++ struct resource_entry *rentry;
++ struct list_head list;
++ int ret;
++
++ INIT_LIST_HEAD(&list);
++ ret = acpi_dev_get_resources(adev, &list, NULL, NULL);
++ if (ret <= 0) {
++ dev_err(dev, "failed to parse _CRS method, error %d\n", ret);
++ return NULL;
++ }
++
++ list_for_each_entry(rentry, &list, node) {
++ if (resource_type(rentry->res) == IORESOURCE_MEM) {
++ res = *rentry->res;
++ break;
++ }
++ }
++
++ if (!rentry->res)
++ return NULL;
++
++ acpi_dev_free_resource_list(&list);
++ base = devm_ioremap_resource(dev, &res);
++ if (IS_ERR(base)) {
++ dev_err(dev, "PMU type %d: Fail to map resource\n", type);
++ return NULL;
++ }
++
++ tx2_pmu = devm_kzalloc(dev, sizeof(*tx2_pmu), GFP_KERNEL);
++ if (!tx2_pmu)
++ return NULL;
++
++ tx2_pmu->dev = dev;
++ tx2_pmu->type = type;
++ tx2_pmu->base = base;
++ tx2_pmu->node = dev_to_node(dev);
++ INIT_LIST_HEAD(&tx2_pmu->entry);
++
++ switch (tx2_pmu->type) {
++ case PMU_TYPE_L3C:
++ tx2_pmu->max_counters = TX2_PMU_MAX_COUNTERS;
++ tx2_pmu->prorate_factor = TX2_PMU_L3_TILES;
++ tx2_pmu->max_events = L3_EVENT_MAX;
++ tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
++ tx2_pmu->attr_groups = l3c_pmu_attr_groups;
++ tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
++ "uncore_l3c_%d", tx2_pmu->node);
++ tx2_pmu->init_cntr_base = init_cntr_base_l3c;
++ tx2_pmu->start_event = uncore_start_event_l3c;
++ tx2_pmu->stop_event = uncore_stop_event_l3c;
++ break;
++ case PMU_TYPE_DMC:
++ tx2_pmu->max_counters = TX2_PMU_MAX_COUNTERS;
++ tx2_pmu->prorate_factor = TX2_PMU_DMC_CHANNELS;
++ tx2_pmu->max_events = DMC_EVENT_MAX;
++ tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL;
++ tx2_pmu->attr_groups = dmc_pmu_attr_groups;
++ tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL,
++ "uncore_dmc_%d", tx2_pmu->node);
++ tx2_pmu->init_cntr_base = init_cntr_base_dmc;
++ tx2_pmu->start_event = uncore_start_event_dmc;
++ tx2_pmu->stop_event = uncore_stop_event_dmc;
++ break;
++ case PMU_TYPE_INVALID:
++ devm_kfree(dev, tx2_pmu);
++ return NULL;
++ }
++
++ return tx2_pmu;
++}
++
++static acpi_status tx2_uncore_pmu_add(acpi_handle handle, u32 level,
++ void *data, void **return_value)
++{
++ struct tx2_uncore_pmu *tx2_pmu;
++ struct acpi_device *adev;
++ enum tx2_uncore_type type;
++
++ if (acpi_bus_get_device(handle, &adev))
++ return AE_OK;
++ if (acpi_bus_get_status(adev) || !adev->status.present)
++ return AE_OK;
++
++ type = get_tx2_pmu_type(adev);
++ if (type == PMU_TYPE_INVALID)
++ return AE_OK;
++
++ tx2_pmu = tx2_uncore_pmu_init_dev((struct device *)data,
++ handle, adev, type);
++
++ if (!tx2_pmu)
++ return AE_ERROR;
++
++ if (tx2_uncore_pmu_add_dev(tx2_pmu)) {
++ /* Can't add the PMU device, abort */
++ return AE_ERROR;
++ }
++ return AE_OK;
++}
++
++static int tx2_uncore_pmu_online_cpu(unsigned int cpu,
++ struct hlist_node *hpnode)
++{
++ struct tx2_uncore_pmu *tx2_pmu;
++
++ tx2_pmu = hlist_entry_safe(hpnode,
++ struct tx2_uncore_pmu, hpnode);
++
++ /* Pick this CPU, If there is no CPU/PMU association and both are
++ * from same node.
++ */
++ if ((tx2_pmu->cpu >= nr_cpu_ids) &&
++ (tx2_pmu->node == cpu_to_node(cpu)))
++ tx2_pmu->cpu = cpu;
++
++ return 0;
++}
++
++static int tx2_uncore_pmu_offline_cpu(unsigned int cpu,
++ struct hlist_node *hpnode)
++{
++ int new_cpu;
++ struct tx2_uncore_pmu *tx2_pmu;
++ struct cpumask cpu_online_mask_temp;
++
++ tx2_pmu = hlist_entry_safe(hpnode,
++ struct tx2_uncore_pmu, hpnode);
++
++ if (cpu != tx2_pmu->cpu)
++ return 0;
++
++ hrtimer_cancel(&tx2_pmu->hrtimer);
++ cpumask_copy(&cpu_online_mask_temp, cpu_online_mask);
++ cpumask_clear_cpu(cpu, &cpu_online_mask_temp);
++ new_cpu = cpumask_any_and(
++ cpumask_of_node(tx2_pmu->node),
++ &cpu_online_mask_temp);
++
++ tx2_pmu->cpu = new_cpu;
++ if (new_cpu >= nr_cpu_ids)
++ return 0;
++ perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu);
++
++ return 0;
++}
++
++static const struct acpi_device_id tx2_uncore_acpi_match[] = {
++ {"CAV901C", 0},
++ {},
++};
++MODULE_DEVICE_TABLE(acpi, tx2_uncore_acpi_match);
++
++static int tx2_uncore_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ acpi_handle handle;
++ acpi_status status;
++
++ set_dev_node(dev, acpi_get_node(ACPI_HANDLE(dev)));
++
++ if (!has_acpi_companion(dev))
++ return -ENODEV;
++
++ handle = ACPI_HANDLE(dev);
++ if (!handle)
++ return -EINVAL;
++
++ /* Walk through the tree for all PMU UNCORE devices */
++ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
++ tx2_uncore_pmu_add,
++ NULL, dev, NULL);
++ if (ACPI_FAILURE(status)) {
++ dev_err(dev, "failed to probe PMU devices\n");
++ return_ACPI_STATUS(status);
++ }
++
++ dev_info(dev, "node%d: pmu uncore registered\n", dev_to_node(dev));
++ return 0;
++}
++
++static int tx2_uncore_remove(struct platform_device *pdev)
++{
++ struct tx2_uncore_pmu *tx2_pmu, *temp;
++ struct device *dev = &pdev->dev;
++
++ if (!list_empty(&tx2_pmus)) {
++ list_for_each_entry_safe(tx2_pmu, temp, &tx2_pmus, entry) {
++ if (tx2_pmu->node == dev_to_node(dev)) {
++ cpuhp_state_remove_instance_nocalls(
++ CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
++ &tx2_pmu->hpnode);
++ perf_pmu_unregister(&tx2_pmu->pmu);
++ list_del(&tx2_pmu->entry);
++ }
++ }
++ }
++ return 0;
++}
++
++static struct platform_driver tx2_uncore_driver = {
++ .driver = {
++ .name = "tx2-uncore-pmu",
++ .acpi_match_table = ACPI_PTR(tx2_uncore_acpi_match),
++ },
++ .probe = tx2_uncore_probe,
++ .remove = tx2_uncore_remove,
++};
++
++static int __init tx2_uncore_driver_init(void)
++{
++ int ret;
++
++ ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
++ "perf/tx2/uncore:online",
++ tx2_uncore_pmu_online_cpu,
++ tx2_uncore_pmu_offline_cpu);
++ if (ret) {
++ pr_err("TX2 PMU: setup hotplug failed(%d)\n", ret);
++ return ret;
++ }
++ ret = platform_driver_register(&tx2_uncore_driver);
++ if (ret)
++ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE);
++
++ return ret;
++}
++module_init(tx2_uncore_driver_init);
++
++static void __exit tx2_uncore_driver_exit(void)
++{
++ platform_driver_unregister(&tx2_uncore_driver);
++ cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE);
++}
++module_exit(tx2_uncore_driver_exit);
++
++MODULE_DESCRIPTION("ThunderX2 UNCORE PMU driver");
++MODULE_LICENSE("GPL v2");
++MODULE_AUTHOR("Ganapatrao Kulkarni <gkulkarni@cavium.com>");
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -145,6 +145,7 @@ enum cpuhp_state {
+ CPUHP_AP_PERF_ARM_L2X0_ONLINE,
+ CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
+ CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
++ CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE,
+ CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
+ CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
+ CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
diff --git a/patches.arch/0001-efi-honour-memory-reservations-passed-via-a-linux-sp.patch b/patches.drivers/0001-efi-honour-memory-reservations-passed-via-a-linux-sp.patch
index 4dcac36f8c..bda4242c73 100644
--- a/patches.arch/0001-efi-honour-memory-reservations-passed-via-a-linux-sp.patch
+++ b/patches.drivers/0001-efi-honour-memory-reservations-passed-via-a-linux-sp.patch
@@ -4,7 +4,7 @@ Subject: efi: honour memory reservations passed via a linux specific config
table
Git-commit: 71e0940d52e107748b270213a01d3b1546657d74
Patch-mainline: v4.20-rc1
-References: bsc#1111147
+References: bsc#1111147 bsc#1117158 bsc#1134671
In order to allow the OS to reserve memory persistently across a
kexec, introduce a Linux-specific UEFI configuration table that
@@ -34,7 +34,7 @@ Signed-off-by: Matthias Brugger <mbrugger@suse.com>
};
EXPORT_SYMBOL(efi);
-@@ -463,6 +464,7 @@ static __initdata efi_config_table_type_
+@@ -462,6 +463,7 @@ static __initdata efi_config_table_type_
{EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
{EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
{LINUX_EFI_RANDOM_SEED_TABLE_GUID, "RNG", &efi.rng_seed},
@@ -42,7 +42,7 @@ Signed-off-by: Matthias Brugger <mbrugger@suse.com>
{NULL_GUID, NULL, NULL},
};
-@@ -567,6 +569,29 @@ int __init efi_config_parse_tables(void
+@@ -566,6 +568,29 @@ int __init efi_config_parse_tables(void
early_memunmap(tbl, sizeof(*tbl));
}
diff --git a/patches.drivers/0002-Documentation-perf-Add-documentation-for-ThunderX2-P.patch b/patches.drivers/0002-Documentation-perf-Add-documentation-for-ThunderX2-P.patch
new file mode 100644
index 0000000000..5603f3cef6
--- /dev/null
+++ b/patches.drivers/0002-Documentation-perf-Add-documentation-for-ThunderX2-P.patch
@@ -0,0 +1,72 @@
+From: "Kulkarni, Ganapatrao" <Ganapatrao.Kulkarni@cavium.com>
+Date: Thu, 6 Dec 2018 11:51:27 +0000
+Subject: Documentation: perf: Add documentation for ThunderX2 PMU uncore
+ driver
+
+Git-commit: d6310a3f3396e004bdb7a76787a2a3bbc643d0b7
+Patch-mainline: v5.0-rc1
+References: fate#323052,bsc#1117114
+
+The SoC has PMU support in its L3 cache controller (L3C) and in the
+DDR4 Memory Controller (DMC).
+
+Signed-off-by: Ganapatrao Kulkarni <ganapatrao.kulkarni@cavium.com>
+[will: minor spelling and format fixes, dropped events list]
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+
+Signed-off-by: Mian Yousaf Kaukab <yousaf.kaukab@suse.com>
+---
+ Documentation/perf/thunderx2-pmu.txt | 41 ++++++++++++++++++++++++++++++++++++
+ 1 file changed, 41 insertions(+)
+ create mode 100644 Documentation/perf/thunderx2-pmu.txt
+
+diff --git a/Documentation/perf/thunderx2-pmu.txt b/Documentation/perf/thunderx2-pmu.txt
+new file mode 100644
+index 000000000000..dffc57143736
+--- /dev/null
++++ b/Documentation/perf/thunderx2-pmu.txt
+@@ -0,0 +1,41 @@
++Cavium ThunderX2 SoC Performance Monitoring Unit (PMU UNCORE)
++=============================================================
++
++The ThunderX2 SoC PMU consists of independent, system-wide, per-socket
++PMUs such as the Level 3 Cache (L3C) and DDR4 Memory Controller (DMC).
++
++The DMC has 8 interleaved channels and the L3C has 16 interleaved tiles.
++Events are counted for the default channel (i.e. channel 0) and prorated
++to the total number of channels/tiles.
++
++The DMC and L3C support up to 4 counters. Counters are independently
++programmable and can be started and stopped individually. Each counter
++can be set to a different event. Counters are 32-bit and do not support
++an overflow interrupt; they are read every 2 seconds.
++
++PMU UNCORE (perf) driver:
++
++The thunderx2_pmu driver registers per-socket perf PMUs for the DMC and
++L3C devices. Each PMU can be used to count up to 4 events
++simultaneously. The PMUs provide a description of their available events
++and configuration options under sysfs, see
++/sys/devices/uncore_<l3c_S/dmc_S/>; S is the socket id.
++
++The driver does not support sampling, therefore "perf record" will not
++work. Per-task perf sessions are also not supported.
++
++Examples:
++
++# perf stat -a -e uncore_dmc_0/cnt_cycles/ sleep 1
++
++# perf stat -a -e \
++uncore_dmc_0/cnt_cycles/,\
++uncore_dmc_0/data_transfers/,\
++uncore_dmc_0/read_txns/,\
++uncore_dmc_0/write_txns/ sleep 1
++
++# perf stat -a -e \
++uncore_l3c_0/read_request/,\
++uncore_l3c_0/read_hit/,\
++uncore_l3c_0/inv_request/,\
++uncore_l3c_0/inv_hit/ sleep 1
+--
+2.11.0
+
diff --git a/patches.drivers/ACPI-fix-menuconfig-presentation-of-ACPI-submenu.patch b/patches.drivers/ACPI-fix-menuconfig-presentation-of-ACPI-submenu.patch
index 9f726c1d09..40a12351c1 100644
--- a/patches.drivers/ACPI-fix-menuconfig-presentation-of-ACPI-submenu.patch
+++ b/patches.drivers/ACPI-fix-menuconfig-presentation-of-ACPI-submenu.patch
@@ -3,7 +3,7 @@ Date: Tue, 21 Aug 2018 22:37:33 +0200
Subject: ACPI: fix menuconfig presentation of ACPI submenu
Git-commit: f5d707ede37a962bc3cb9b3f8531a870dae29e46
Patch-mainline: v4.19-rc1
-References: bsc#1117158
+References: bsc#1117158 bsc#1134671
My fix for a recursive Kconfig dependency caused another issue where the
ACPI specific options end up in the top-level menu in 'menuconfig'. This
diff --git a/patches.drivers/ALSA-hda-realtek-Enable-micmute-LED-for-Huawei-lapto.patch b/patches.drivers/ALSA-hda-realtek-Enable-micmute-LED-for-Huawei-lapto.patch
new file mode 100644
index 0000000000..99a74ace30
--- /dev/null
+++ b/patches.drivers/ALSA-hda-realtek-Enable-micmute-LED-for-Huawei-lapto.patch
@@ -0,0 +1,66 @@
+From 0fbf21c3b36a9921467aa7525d2768b07f9f8fbb Mon Sep 17 00:00:00 2001
+From: Ayman Bagabas <ayman.bagabas@gmail.com>
+Date: Thu, 23 May 2019 05:30:11 -0400
+Subject: [PATCH] ALSA: hda/realtek - Enable micmute LED for Huawei laptops
+Git-commit: 0fbf21c3b36a9921467aa7525d2768b07f9f8fbb
+Patch-mainline: v5.2-rc3
+References: bsc#1051510
+
+Since this LED is found on all Huawei laptops, we can hook it to
+huawei-wmi platform driver to control it.
+
+Also, some renames have been made to use product name instead of common
+name to avoid confusions.
+
+Fixes: 8ac51bbc4cfe ("ALSA: hda: fix front speakers on Huawei MBXP")
+Signed-off-by: Ayman Bagabas <ayman.bagabas@gmail.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ sound/pci/hda/patch_realtek.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1ca2a83b65cd..f1bac03e954b 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5752,7 +5752,7 @@ enum {
+ ALC298_FIXUP_TPT470_DOCK,
+ ALC255_FIXUP_DUMMY_LINEOUT_VERB,
+ ALC255_FIXUP_DELL_HEADSET_MIC,
+- ALC256_FIXUP_HUAWEI_MBXP_PINS,
++ ALC256_FIXUP_HUAWEI_MACH_WX9_PINS,
+ ALC295_FIXUP_HP_X360,
+ ALC221_FIXUP_HP_HEADSET_MIC,
+ ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
+@@ -6043,7 +6043,7 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MIC
+ },
+- [ALC256_FIXUP_HUAWEI_MBXP_PINS] = {
++ [ALC256_FIXUP_HUAWEI_MACH_WX9_PINS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ {0x12, 0x90a60130},
+@@ -7068,9 +7068,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+ SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+- SND_PCI_QUIRK(0x19e5, 0x3200, "Huawei MBX", ALC255_FIXUP_MIC_MUTE_LED),
+- SND_PCI_QUIRK(0x19e5, 0x3201, "Huawei MBX", ALC255_FIXUP_MIC_MUTE_LED),
+- SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MBXP", ALC256_FIXUP_HUAWEI_MBXP_PINS),
++ SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
+ SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
+
+ #if 0
+@@ -7129,6 +7127,7 @@ static const struct snd_pci_quirk alc269_fixup_vendor_tbl[] = {
+ SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED),
+ SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
+ SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", ALC269_FIXUP_THINKPAD_ACPI),
++ SND_PCI_QUIRK_VENDOR(0x19e5, "Huawei Matebook", ALC255_FIXUP_MIC_MUTE_LED),
+ {}
+ };
+
+--
+2.16.4
+
diff --git a/patches.drivers/ALSA-hda-realtek-Improve-the-headset-mic-for-Acer-As.patch b/patches.drivers/ALSA-hda-realtek-Improve-the-headset-mic-for-Acer-As.patch
new file mode 100644
index 0000000000..8ccf8c2537
--- /dev/null
+++ b/patches.drivers/ALSA-hda-realtek-Improve-the-headset-mic-for-Acer-As.patch
@@ -0,0 +1,72 @@
+From 9cb40eb184c4220d244a532bd940c6345ad9dbd9 Mon Sep 17 00:00:00 2001
+From: Hui Wang <hui.wang@canonical.com>
+Date: Wed, 29 May 2019 12:41:38 +0800
+Subject: [PATCH] ALSA: hda/realtek - Improve the headset mic for Acer Aspire laptops
+Git-commit: 9cb40eb184c4220d244a532bd940c6345ad9dbd9
+Patch-mainline: v5.2-rc3
+References: bsc#1051510
+
+We met another Acer Aspire laptop which has the problem on the
+headset-mic, the Pin 0x19 is not set the corret configuration for a
+mic and the pin presence can't be detected too after plugging a
+headset. Kailang suggested that we should set the coeff to enable the
+mic and apply the ALC269_FIXUP_LIFEBOOK_EXTMIC. After doing that,
+both headset-mic presence and headset-mic work well.
+
+The existing ALC255_FIXUP_ACER_MIC_NO_PRESENCE set the headset-mic
+jack to be a phantom jack. Now since the jack can support presence
+unsol event, let us imporve it to set the jack to be a normal jack.
+
+https://bugs.launchpad.net/bugs/1821269
+
+Fixes: 5824ce8de7b1c ("ALSA: hda/realtek - Add support for Acer Aspire E5-475 headset mic")
+Cc: Chris Chiu <chiu@endlessm.com>
+Cc: Daniel Drake <drake@endlessm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Kailang Yang <kailang@realtek.com>
+Signed-off-by: Hui Wang <hui.wang@canonical.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ sound/pci/hda/patch_realtek.c | 16 +++++++++++-----
+ 1 file changed, 11 insertions(+), 5 deletions(-)
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f1bac03e954b..18cb48054e54 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6223,13 +6223,15 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chain_id = ALC269_FIXUP_THINKPAD_ACPI,
+ },
+ [ALC255_FIXUP_ACER_MIC_NO_PRESENCE] = {
+- .type = HDA_FIXUP_PINS,
+- .v.pins = (const struct hda_pintbl[]) {
+- { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+- { }
++ .type = HDA_FIXUP_VERBS,
++ .v.verbs = (const struct hda_verb[]) {
++ /* Enable the Mic */
++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
++ { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
++ {}
+ },
+ .chained = true,
+- .chain_id = ALC255_FIXUP_HEADSET_MODE
++ .chain_id = ALC269_FIXUP_LIFEBOOK_EXTMIC
+ },
+ [ALC255_FIXUP_ASUS_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+@@ -7273,6 +7275,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ {0x18, 0x02a11030},
+ {0x19, 0x0181303F},
+ {0x21, 0x0221102f}),
++ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1025, "Acer", ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
++ {0x12, 0x90a60140},
++ {0x14, 0x90170120},
++ {0x21, 0x02211030}),
+ SND_HDA_PIN_QUIRK(0x10ec0255, 0x1025, "Acer", ALC255_FIXUP_ACER_MIC_NO_PRESENCE,
+ {0x12, 0x90a601c0},
+ {0x14, 0x90171120},
+--
+2.16.4
+
diff --git a/patches.drivers/ASoC-Intel-Add-machine-driver-for-Cherrytrail-CX2072 b/patches.drivers/ASoC-Intel-Add-machine-driver-for-Cherrytrail-CX2072
index 35b4c452b4..2ca1565c25 100644
--- a/patches.drivers/ASoC-Intel-Add-machine-driver-for-Cherrytrail-CX2072
+++ b/patches.drivers/ASoC-Intel-Add-machine-driver-for-Cherrytrail-CX2072
@@ -1,23 +1,34 @@
-From 5a62f24d17554da9cf1c292aa31329237bd982f1 Mon Sep 17 00:00:00 2001
-From: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
-Date: Fri, 9 Dec 2016 07:49:28 -0600
-Subject: [PATCH] ASoC: Intel: Add machine driver for Cherrytrail-CX2072X
+From 3917da94f787e6c907e440653ead0c666a71379e Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Tue, 21 May 2019 08:26:53 +0200
+Subject: [PATCH] ASoC: Intel: Add machine driver for CX2072X on BYT/CHT
+ platforms
+Git-commit: 3917da94f787e6c907e440653ead0c666a71379e
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
+Patch-mainline: Queued in subsystem maintainer repository
References: bsc#1068546
-Patch-mainline: Submitted, alsa-devel ML
-Machine driver needed for Conexant CX2072X codec.
+This is an implementation of a machine driver needed for Conexant
+CX2072X codec on Intel Baytrail and Cherrytrail platforms. The
+current patch is based on the initial work by Pierre-Louis Bossart and
+the other Intel machine drivers.
-A couple of fixmes related to PLL
-Jack detection needs to be re-added
+The jack detection support (driven via the standard GPIO) was added on
+top of the original work.
-Signed-off-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Tested with ASUS E200HA laptop.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=115531
+Acked-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Mark Brown <broonie@kernel.org>
---
sound/soc/intel/Kconfig | 13 +
+ sound/soc/intel/atom/sst/sst_acpi.c | 4
sound/soc/intel/boards/Makefile | 2
- sound/soc/intel/boards/cht_cx2072x.c | 273 +++++++++++++++++++++++++++++++++++
- 3 files changed, 288 insertions(+)
+ sound/soc/intel/boards/cht_cx2072x.c | 337 +++++++++++++++++++++++++++++++++++
+ 4 files changed, 356 insertions(+)
create mode 100644 sound/soc/intel/boards/cht_cx2072x.c
--- a/sound/soc/intel/Kconfig
@@ -62,7 +73,7 @@ Signed-off-by: Takashi Iwai <tiwai@suse.de>
obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH) += snd-soc-sst-cht-bsw-max98090_ti.o
--- /dev/null
+++ b/sound/soc/intel/boards/cht_cx2072x.c
-@@ -0,0 +1,273 @@
+@@ -0,0 +1,337 @@
+/*
+ * cht_cx207x.c - ASoc DPCM Machine driver for CherryTrail w/ CX2072x
+ *
@@ -135,10 +146,49 @@ Signed-off-by: Takashi Iwai <tiwai@suse.de>
+ return 0;
+}
+
++static struct snd_soc_jack cht_cx_headset;
++
++/* Headset jack detection DAPM pins */
++static struct snd_soc_jack_pin cht_cx_headset_pins[] = {
++ {
++ .pin = "Headset Mic",
++ .mask = SND_JACK_MICROPHONE,
++ },
++ {
++ .pin = "Headphone",
++ .mask = SND_JACK_HEADPHONE,
++ },
++};
++
++static const struct acpi_gpio_params headset_gpios = { 0, 0, false };
++
++static const struct acpi_gpio_mapping acpi_cht_cx2072x_gpios[] = {
++ { "headset-gpios", &headset_gpios, 1 },
++ {},
++};
++
++static int cht_cx_jack_status_check(void *data)
++{
++ return cx2072x_get_jack_state(data);
++}
++
++static struct snd_soc_jack_gpio cht_cx_gpio = {
++ .name = "headset",
++ .report = SND_JACK_HEADSET | SND_JACK_BTN_0,
++ .debounce_time = 150,
++ .wake = true,
++ .jack_status_check = cht_cx_jack_status_check,
++};
++
+static int cht_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int ret;
+ struct snd_soc_card *card = rtd->card;
++ struct snd_soc_codec *codec = rtd->codec;
++
++ if (devm_acpi_dev_add_driver_gpios(codec->dev,
++ acpi_cht_cx2072x_gpios))
++ dev_warn(rtd->dev, "Unable to add GPIO mapping table\n");
+
+ card->dapm.idle_bias_off = true;
+
@@ -150,6 +200,24 @@ Signed-off-by: Takashi Iwai <tiwai@suse.de>
+ return ret;
+ }
+
++ ret = snd_soc_card_jack_new(card, "Headset",
++ SND_JACK_HEADSET | SND_JACK_BTN_0,
++ &cht_cx_headset,
++ cht_cx_headset_pins,
++ ARRAY_SIZE(cht_cx_headset_pins));
++ if (ret)
++ return ret;
++
++ cht_cx_gpio.gpiod_dev = codec->dev;
++ cht_cx_gpio.data = codec;
++ ret = snd_soc_jack_add_gpios(&cht_cx_headset, 1, &cht_cx_gpio);
++ if (ret) {
++ dev_err(rtd->dev, "Adding jack GPIO failed\n");
++ return ret;
++ }
++
++ cx2072x_enable_detect(codec);
++
+ return ret;
+}
+
@@ -324,11 +392,18 @@ Signed-off-by: Takashi Iwai <tiwai@suse.de>
+ return devm_snd_soc_register_card(&pdev->dev, &chtcx2072x_card);
+}
+
++static int snd_cht_mc_remove(struct platform_device *pdev)
++{
++ snd_soc_jack_free_gpios(&cht_cx_headset, 1, &cht_cx_gpio);
++ return 0;
++}
++
+static struct platform_driver snd_cht_mc_driver = {
+ .driver = {
+ .name = "cht-cx2072x",
+ },
+ .probe = snd_cht_mc_probe,
++ .remove = snd_cht_mc_remove,
+};
+module_platform_driver(snd_cht_mc_driver);
+
@@ -336,3 +411,23 @@ Signed-off-by: Takashi Iwai <tiwai@suse.de>
+MODULE_AUTHOR("Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:cht-cx2072x");
+--- a/sound/soc/intel/atom/sst/sst_acpi.c
++++ b/sound/soc/intel/atom/sst/sst_acpi.c
+@@ -503,6 +503,8 @@ static struct sst_acpi_mach sst_acpi_byt
+ &byt_rvp_platform_data },
+ {"10EC5648", "cht-bsw-rt5645", "intel/fw_sst_0f28.bin", "cht-bsw", NULL,
+ &byt_rvp_platform_data },
++ {"14F10720", "cht-cx2072x", "intel/fw_sst_0f28.bin", "cht-bsw", NULL,
++ &byt_rvp_platform_data },
+ #if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
+ /*
+ * This is always last in the table so that it is selected only when
+@@ -541,6 +543,8 @@ static struct sst_acpi_mach sst_acpi_chv
+ /* some CHT-T platforms rely on RT5651, use Baytrail machine driver */
+ {"10EC5651", "bytcr_rt5651", "intel/fw_sst_22a8.bin", "bytcr_rt5651", NULL,
+ &chv_platform_data },
++ {"14F10720", "cht-cx2072x", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
++ &chv_platform_data },
+ #if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
+ /*
+ * This is always last in the table so that it is selected only when
diff --git a/patches.drivers/ASoC-Intel-add-support-for-CX2072x-machine-driver b/patches.drivers/ASoC-Intel-add-support-for-CX2072x-machine-driver
deleted file mode 100644
index 0b5c9c493e..0000000000
--- a/patches.drivers/ASoC-Intel-add-support-for-CX2072x-machine-driver
+++ /dev/null
@@ -1,36 +0,0 @@
-From f6737dd82168f5a378fd035882b8ec67b3f7dba8 Mon Sep 17 00:00:00 2001
-From: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
-Date: Fri, 9 Dec 2016 09:10:39 -0600
-Subject: [PATCH] ASoC: Intel: add support for CX2072x machine driver
-References: bsc#1068546
-Patch-mainline: Submitted, alsa-devel ML
-
-Add ACPI reference to load machine driver
-
-Signed-off-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
-Signed-off-by: Takashi Iwai <tiwai@suse.de>
-
----
- sound/soc/intel/atom/sst/sst_acpi.c | 4 ++++
- 1 file changed, 4 insertions(+)
-
---- a/sound/soc/intel/atom/sst/sst_acpi.c
-+++ b/sound/soc/intel/atom/sst/sst_acpi.c
-@@ -503,6 +503,8 @@ static struct sst_acpi_mach sst_acpi_byt
- &byt_rvp_platform_data },
- {"10EC5648", "cht-bsw-rt5645", "intel/fw_sst_0f28.bin", "cht-bsw", NULL,
- &byt_rvp_platform_data },
-+ {"14F10720", "cht-cx2072x", "intel/fw_sst_0f28.bin", "cht-bsw", NULL,
-+ &byt_rvp_platform_data },
- #if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
- /*
- * This is always last in the table so that it is selected only when
-@@ -541,6 +543,8 @@ static struct sst_acpi_mach sst_acpi_chv
- /* some CHT-T platforms rely on RT5651, use Baytrail machine driver */
- {"10EC5651", "bytcr_rt5651", "intel/fw_sst_22a8.bin", "bytcr_rt5651", NULL,
- &chv_platform_data },
-+ {"14F10720", "cht-cx2072x", "intel/fw_sst_22a8.bin", "cht-bsw", NULL,
-+ &chv_platform_data },
- #if IS_ENABLED(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH)
- /*
- * This is always last in the table so that it is selected only when
diff --git a/patches.drivers/ASoC-add-support-for-Conexant-CX2072X-CODEC b/patches.drivers/ASoC-add-support-for-Conexant-CX2072X-CODEC
index 4908964b43..84a7521746 100644
--- a/patches.drivers/ASoC-add-support-for-Conexant-CX2072X-CODEC
+++ b/patches.drivers/ASoC-add-support-for-Conexant-CX2072X-CODEC
@@ -1,9 +1,11 @@
-From e376dc97ffd2dbbc40174b08818cec8d6b6b30aa Mon Sep 17 00:00:00 2001
+From a497a4363706b3eb208c64e66e5b485bb3b186ac Mon Sep 17 00:00:00 2001
From: Simon Ho <simon.ho@conexant.com>
-Date: Wed, 5 Apr 2017 17:07:14 +0800
-Subject: [PATCH] ASoC: add support for Conexant CX2072X CODEC
+Date: Tue, 21 May 2019 08:26:52 +0200
+Subject: [PATCH] ASoC: Add support for Conexant CX2072X CODEC
+Git-commit: a497a4363706b3eb208c64e66e5b485bb3b186ac
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
+Patch-mainline: Queued in subsystem maintainer repository
References: bsc#1068546
-Patch-mainline: Submitted, alsa-devel ML
Initial commit of the Conexant CX2072X CODEC driver. Some features are
not present.
@@ -29,21 +31,55 @@ Featues of CX2072X codec:
-TDM stream supports up to 4 channels.
* AEC loopback support.
-[Fixed by tiwai:
- * missing declarations of jack detection helpers
- * missing DAPM entry definitions
- * missing power hooks
- * Workaround for the jack detection during cache-only]
+Further fixes by tiwai:
+ * Rebase to 5.2+
+ * Missing DAPM entry definitions
+ * Missing power hooks
+ * Fix uninitialized variable warning
+ * Rewrite jack detection stuff to use set_jack callback
+ * Plumbing jack detection code for Intel ASoC
+ * Move clk management into runtime PM
+ * Drop incorrect regcache usages
+ * Drop untested stuff: OF table, EQ/DRC handling
+ * Lots of code cleanups and minor refactoring
+The OF code was dropped due to the lack of testability.
+It should be easy to re-add once if someone can test it.
+
+v1->v2: No change
+v2->v3: Move register tables to appropriate place
+ Remove some confusing codes
+ Set snd_ctl_boolean_* helpers directly
+ Fix EQ put callback
+ Rename to "DAC1 Switch" from "DAC1 Mute Switch"
+ Drop superfluous regmap calls at shutdown
+ Avoid regmap_register_patch()
+ Add missing register definitions
+ Fix register access on big-endian machine
+ Remove regcache messes
+v3->v4: Fix the wrong endianess conversion in reg write
+ Minor code cleanups
+v4->v5: Move clk management to runtime PM
+ Sparse warning fixes
+ Some more code simplification
+ Drop tricky regcache fiddling
+ Apply mutex locks around possible racy sequences
+ Move exported jack detection stuff into set_jack callback
+v5->v6: Drop buggy&untested EQ and DRC codes
+ Lots of code reduction/cleanup
+ Add more comments about platform-specific stuff
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=115531
Signed-off-by: Simon Ho <simon.ho@conexant.com>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Mark Brown <broonie@kernel.org>
---
sound/soc/codecs/Kconfig | 5
sound/soc/codecs/Makefile | 2
- sound/soc/codecs/cx2072x.c | 2266 +++++++++++++++++++++++++++++++++++++++++++++
+ sound/soc/codecs/cx2072x.c | 2254 +++++++++++++++++++++++++++++++++++++++++++++
sound/soc/codecs/cx2072x.h | 320 ++++++
- 4 files changed, 2593 insertions(+)
+ 4 files changed, 2581 insertions(+)
create mode 100644 sound/soc/codecs/cx2072x.c
create mode 100644 sound/soc/codecs/cx2072x.h
@@ -88,7 +124,7 @@ Signed-off-by: Takashi Iwai <tiwai@suse.de>
obj-$(CONFIG_SND_SOC_DA7218) += snd-soc-da7218.o
--- /dev/null
+++ b/sound/soc/codecs/cx2072x.c
-@@ -0,0 +1,2266 @@
+@@ -0,0 +1,2254 @@
+/*
+ * ALSA SoC CX20721/CX20723 codec driver
+ *
@@ -108,13 +144,10 @@ Signed-off-by: Takashi Iwai <tiwai@suse.de>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
-+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
-+#include <linux/firmware.h>
+#include <linux/regmap.h>
-+#include <linux/proc_fs.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
@@ -2323,14 +2356,6 @@ Signed-off-by: Takashi Iwai <tiwai@suse.de>
+};
+MODULE_DEVICE_TABLE(i2c, cx2072x_i2c_id);
+
-+static const struct of_device_id cx2072x_of_match[] = {
-+ { .compatible = "cnxt,cx20721", },
-+ { .compatible = "cnxt,cx20723", },
-+ { .compatible = "cnxt,cx7601", },
-+ {}
-+};
-+MODULE_DEVICE_TABLE(of, cx2072x_of_match);
-+
+#ifdef CONFIG_ACPI
+static struct acpi_device_id cx2072x_acpi_match[] = {
+ { "14F10720", 0 },
@@ -2345,7 +2370,6 @@ Signed-off-by: Takashi Iwai <tiwai@suse.de>
+ .id_table = cx2072x_i2c_id,
+ .driver = {
+ .name = "cx2072x",
-+ .of_match_table = cx2072x_of_match,
+ .acpi_match_table = ACPI_PTR(cx2072x_acpi_match),
+ },
+};
diff --git a/patches.drivers/ASoC-cx2072x-Add-DT-bingings-documentation-for-CX207 b/patches.drivers/ASoC-cx2072x-Add-DT-bingings-documentation-for-CX207
deleted file mode 100644
index ca10c65ae4..0000000000
--- a/patches.drivers/ASoC-cx2072x-Add-DT-bingings-documentation-for-CX207
+++ /dev/null
@@ -1,57 +0,0 @@
-From a1cf13479084b988fa9ef536b8256d9908fc6e30 Mon Sep 17 00:00:00 2001
-From: Simon Ho <simon.ho@conexant.com>
-Date: Wed, 5 Apr 2017 17:07:13 +0800
-Subject: [PATCH] ASoC: cx2072x: Add DT bingings documentation for CX2072X
- CODEC
-References: bsc#1068546
-Patch-mainline: Submitted, alsa-devel ML
-
-Initial version of CX2072X device tree bindings document.
-
-Signed-off-by: Simon Ho <simon.ho@conexant.com>
-Signed-off-by: Takashi Iwai <tiwai@suse.de>
-
----
- Documentation/devicetree/bindings/sound/cx2072x.txt | 36 ++++++++++++++++++++
- 1 file changed, 36 insertions(+)
- create mode 100644 Documentation/devicetree/bindings/sound/cx2072x.txt
-
---- /dev/null
-+++ b/Documentation/devicetree/bindings/sound/cx2072x.txt
-@@ -0,0 +1,36 @@
-+Conexant CX20721/CX20723/CX7601 audio CODEC
-+
-+The devices support I2C only.
-+
-+Required properties:
-+
-+ - compatible : One of "cnxt,cx20721", "cnxt,cx20723", "cnxt,cx7601".
-+
-+ - reg : the I2C address of the device for I2C, it should be <0x33>
-+
-+Optional properties:
-+
-+ - clocks : phandle and clock specifier for codec MCLK.
-+ - clock-names : Clock name string for 'clocks' attribute, should be "mclk".
-+
-+CODEC output pins:
-+ "PORTA" - Headphone
-+ "PORTG" - Class-D output
-+ "PORTE" - Line out
-+
-+CODEC output pins for Conexant DSP chip:
-+ "AEC REF" - AEC reference signal
-+
-+CODEC input pins:
-+ "PORTB" - Analog mic
-+ "PORTC" - Digital mic
-+ "PORTD" - Headset mic
-+
-+Example:
-+
-+codec: cx20721@33 {
-+ compatible = "cnxt,cx20721";
-+ reg = <0x33>;
-+ clocks = <&sco>;
-+ clock-names = "mclk";
-+};
diff --git a/patches.drivers/ASoC-intel-Add-headset-jack-support-to-cht-cx2072x b/patches.drivers/ASoC-intel-Add-headset-jack-support-to-cht-cx2072x
deleted file mode 100644
index a35989a676..0000000000
--- a/patches.drivers/ASoC-intel-Add-headset-jack-support-to-cht-cx2072x
+++ /dev/null
@@ -1,111 +0,0 @@
-From 8f5df7e07f68efe5ee5da4b95f6596138e3ff736 Mon Sep 17 00:00:00 2001
-From: Takashi Iwai <tiwai@suse.de>
-Date: Tue, 11 Apr 2017 15:51:02 +0200
-Subject: [PATCH] ASoC: intel: Add headset jack support to cht-cx2072x
-References: bsc#1068546
-Patch-mainline: Submitted, alsa-devel ML
-
-This patch adds plumbing up the jack detection via the standard gpio.
-
-Signed-off-by: Takashi Iwai <tiwai@suse.de>
-
----
- sound/soc/intel/boards/cht_cx2072x.c | 64 +++++++++++++++++++++++++++++++++++
- 1 file changed, 64 insertions(+)
-
---- a/sound/soc/intel/boards/cht_cx2072x.c
-+++ b/sound/soc/intel/boards/cht_cx2072x.c
-@@ -70,10 +70,49 @@ static int cht_aif1_hw_params(struct snd
- return 0;
- }
-
-+static struct snd_soc_jack cht_cx_headset;
-+
-+/* Headset jack detection DAPM pins */
-+static struct snd_soc_jack_pin cht_cx_headset_pins[] = {
-+ {
-+ .pin = "Headset Mic",
-+ .mask = SND_JACK_MICROPHONE,
-+ },
-+ {
-+ .pin = "Headphone",
-+ .mask = SND_JACK_HEADPHONE,
-+ },
-+};
-+
-+static const struct acpi_gpio_params headset_gpios = { 0, 0, false };
-+
-+static const struct acpi_gpio_mapping acpi_cht_cx2072x_gpios[] = {
-+ { "headset-gpios", &headset_gpios, 1 },
-+ {},
-+};
-+
-+static int cht_cx_jack_status_check(void *data)
-+{
-+ return cx2072x_get_jack_state(data);
-+}
-+
-+static struct snd_soc_jack_gpio cht_cx_gpio = {
-+ .name = "headset",
-+ .report = SND_JACK_HEADSET | SND_JACK_BTN_0,
-+ .debounce_time = 150,
-+ .wake = true,
-+ .jack_status_check = cht_cx_jack_status_check,
-+};
-+
- static int cht_codec_init(struct snd_soc_pcm_runtime *rtd)
- {
- int ret;
- struct snd_soc_card *card = rtd->card;
-+ struct snd_soc_codec *codec = rtd->codec;
-+
-+ if (devm_acpi_dev_add_driver_gpios(codec->dev,
-+ acpi_cht_cx2072x_gpios))
-+ dev_warn(rtd->dev, "Unable to add GPIO mapping table\n");
-
- card->dapm.idle_bias_off = true;
-
-@@ -85,6 +124,24 @@ static int cht_codec_init(struct snd_soc
- return ret;
- }
-
-+ ret = snd_soc_card_jack_new(card, "Headset",
-+ SND_JACK_HEADSET | SND_JACK_BTN_0,
-+ &cht_cx_headset,
-+ cht_cx_headset_pins,
-+ ARRAY_SIZE(cht_cx_headset_pins));
-+ if (ret)
-+ return ret;
-+
-+ cht_cx_gpio.gpiod_dev = codec->dev;
-+ cht_cx_gpio.data = codec;
-+ ret = snd_soc_jack_add_gpios(&cht_cx_headset, 1, &cht_cx_gpio);
-+ if (ret) {
-+ dev_err(rtd->dev, "Adding jack GPIO failed\n");
-+ return ret;
-+ }
-+
-+ cx2072x_enable_detect(codec);
-+
- return ret;
- }
-
-@@ -259,11 +316,18 @@ static int snd_cht_mc_probe(struct platf
- return devm_snd_soc_register_card(&pdev->dev, &chtcx2072x_card);
- }
-
-+static int snd_cht_mc_remove(struct platform_device *pdev)
-+{
-+ snd_soc_jack_free_gpios(&cht_cx_headset, 1, &cht_cx_gpio);
-+ return 0;
-+}
-+
- static struct platform_driver snd_cht_mc_driver = {
- .driver = {
- .name = "cht-cx2072x",
- },
- .probe = snd_cht_mc_probe,
-+ .remove = snd_cht_mc_remove,
- };
- module_platform_driver(snd_cht_mc_driver);
-
diff --git a/patches.drivers/IB-hfi1-Add-debugfs-to-control-expansion-ROM-write-p.patch b/patches.drivers/IB-hfi1-Add-debugfs-to-control-expansion-ROM-write-p.patch
new file mode 100644
index 0000000000..a922436067
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Add-debugfs-to-control-expansion-ROM-write-p.patch
@@ -0,0 +1,118 @@
+From: Josh Collier <josh.d.collier@intel.com>
+Date: Thu, 11 Apr 2019 07:07:42 -0700
+Subject: IB/hfi1: Add debugfs to control expansion ROM write protect
+Patch-mainline: v5.2-rc1
+Git-commit: 07c5ba912401b2ae3f13e3ce214158aec723c3fd
+References: jsc#SLE-4925
+
+Some kernels now enable CONFIG_IO_STRICT_DEVMEM which prevents multiple
+handles to PCI resource0. In order to continue to support expansion ROM
+updates while the driver is loaded, the driver must now provide an
+interface to control the expansion ROM write protection.
+
+This patch adds an exprom_wp debugfs interface that allows the hfi1_eprom
+user tool to disable the expansion ROM write protection by opening the
+file and writing a '1'. The write protection is released when writing a
+'0' or automatically re-enabled when the file handle is closed. The
+current implementation will only allow one handle to be opened at a time
+across all hfi1 devices.
+
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Josh Collier <josh.d.collier@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/debugfs.c | 74 +++++++++++++++++++++++++++++++++++
+ 1 file changed, 74 insertions(+)
+
+--- a/drivers/infiniband/hw/hfi1/debugfs.c
++++ b/drivers/infiniband/hw/hfi1/debugfs.c
+@@ -1080,6 +1080,77 @@ static int qsfp2_debugfs_release(struct
+ return __qsfp_debugfs_release(in, fp, 1);
+ }
+
++#define EXPROM_WRITE_ENABLE BIT_ULL(14)
++
++static bool exprom_wp_disabled;
++
++static int exprom_wp_set(struct hfi1_devdata *dd, bool disable)
++{
++ u64 gpio_val = 0;
++
++ if (disable) {
++ gpio_val = EXPROM_WRITE_ENABLE;
++ exprom_wp_disabled = true;
++ dd_dev_info(dd, "Disable Expansion ROM Write Protection\n");
++ } else {
++ exprom_wp_disabled = false;
++ dd_dev_info(dd, "Enable Expansion ROM Write Protection\n");
++ }
++
++ write_csr(dd, ASIC_GPIO_OUT, gpio_val);
++ write_csr(dd, ASIC_GPIO_OE, gpio_val);
++
++ return 0;
++}
++
++static ssize_t exprom_wp_debugfs_read(struct file *file, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ return 0;
++}
++
++static ssize_t exprom_wp_debugfs_write(struct file *file,
++ const char __user *buf, size_t count,
++ loff_t *ppos)
++{
++ struct hfi1_pportdata *ppd = private2ppd(file);
++ char cdata;
++
++ if (count != 1)
++ return -EINVAL;
++ if (get_user(cdata, buf))
++ return -EFAULT;
++ if (cdata == '0')
++ exprom_wp_set(ppd->dd, false);
++ else if (cdata == '1')
++ exprom_wp_set(ppd->dd, true);
++ else
++ return -EINVAL;
++
++ return 1;
++}
++
++static unsigned long exprom_in_use;
++
++static int exprom_wp_debugfs_open(struct inode *in, struct file *fp)
++{
++ if (test_and_set_bit(0, &exprom_in_use))
++ return -EBUSY;
++
++ return 0;
++}
++
++static int exprom_wp_debugfs_release(struct inode *in, struct file *fp)
++{
++ struct hfi1_pportdata *ppd = private2ppd(fp);
++
++ if (exprom_wp_disabled)
++ exprom_wp_set(ppd->dd, false);
++ clear_bit(0, &exprom_in_use);
++
++ return 0;
++}
++
+ #define DEBUGFS_OPS(nm, readroutine, writeroutine) \
+ { \
+ .name = nm, \
+@@ -1119,6 +1190,9 @@ static const struct counter_info port_cn
+ qsfp1_debugfs_open, qsfp1_debugfs_release),
+ DEBUGFS_XOPS("qsfp2", qsfp2_debugfs_read, qsfp2_debugfs_write,
+ qsfp2_debugfs_open, qsfp2_debugfs_release),
++ DEBUGFS_XOPS("exprom_wp", exprom_wp_debugfs_read,
++ exprom_wp_debugfs_write, exprom_wp_debugfs_open,
++ exprom_wp_debugfs_release),
+ DEBUGFS_OPS("asic_flags", asic_flags_read, asic_flags_write),
+ DEBUGFS_OPS("dc8051_memory", dc8051_memory_read, NULL),
+ DEBUGFS_OPS("lcb", debugfs_lcb_read, debugfs_lcb_write),
diff --git a/patches.drivers/IB-hfi1-Add-selected-Rcv-counters.patch b/patches.drivers/IB-hfi1-Add-selected-Rcv-counters.patch
new file mode 100644
index 0000000000..11a83a8b86
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Add-selected-Rcv-counters.patch
@@ -0,0 +1,56 @@
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Thu, 11 Apr 2019 07:17:10 -0700
+Subject: IB/hfi1: Add selected Rcv counters
+Patch-mainline: v5.2-rc1
+Git-commit: a9c62e007878ba88b703369c1cd9e26682453665
+References: jsc#SLE-4925
+
+These counters are required for error analysis and debug.
+
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/chip.c | 3 +++
+ drivers/infiniband/hw/hfi1/chip.h | 3 +++
+ drivers/infiniband/hw/hfi1/chip_registers.h | 3 +++
+ 3 files changed, 9 insertions(+)
+
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -4104,6 +4104,9 @@ def_access_ibp_counter(seq_naks);
+
+ static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
+ [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
++[C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
++[C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
++[C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
+ [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
+ CNTR_NORMAL),
+ [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
+--- a/drivers/infiniband/hw/hfi1/chip.h
++++ b/drivers/infiniband/hw/hfi1/chip.h
+@@ -858,6 +858,9 @@ static inline int idx_from_vl(int vl)
+ /* Per device counter indexes */
+ enum {
+ C_RCV_OVF = 0,
++ C_RX_LEN_ERR,
++ C_RX_ICRC_ERR,
++ C_RX_EBP,
+ C_RX_TID_FULL,
+ C_RX_TID_INVALID,
+ C_RX_TID_FLGMS,
+--- a/drivers/infiniband/hw/hfi1/chip_registers.h
++++ b/drivers/infiniband/hw/hfi1/chip_registers.h
+@@ -380,6 +380,9 @@
+ #define DC_LCB_PRF_TX_FLIT_CNT (DC_LCB_CSRS + 0x000000000418)
+ #define DC_LCB_STS_LINK_TRANSFER_ACTIVE (DC_LCB_CSRS + 0x000000000468)
+ #define DC_LCB_STS_ROUND_TRIP_LTP_CNT (DC_LCB_CSRS + 0x0000000004B0)
++#define RCV_LENGTH_ERR_CNT 0
++#define RCV_ICRC_ERR_CNT 6
++#define RCV_EBP_CNT 9
+ #define RCV_BUF_OVFL_CNT 10
+ #define RCV_CONTEXT_EGR_STALL 22
+ #define RCV_DATA_PKT_CNT 0
diff --git a/patches.drivers/IB-hfi1-Close-VNIC-sdma_progress-sleep-window.patch b/patches.drivers/IB-hfi1-Close-VNIC-sdma_progress-sleep-window.patch
new file mode 100644
index 0000000000..738a24107a
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Close-VNIC-sdma_progress-sleep-window.patch
@@ -0,0 +1,83 @@
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Wed, 28 Nov 2018 10:32:48 -0800
+Subject: IB/hfi1: Close VNIC sdma_progress sleep window
+Patch-mainline: v5.0-rc1
+Git-commit: 18912c4524385dd6532c682cb9d4f6aa39ba8d47
+References: jsc#SLE-4925
+
+The call to sdma_progress() is called outside the wait lock.
+
+In this case, there is a race condition where sdma_progress() can return
+false and the sdma_engine can idle. If that happens, there will be no
+more sdma interrupts to cause the wakeup and the vnic_sdma xmit will hang.
+
+Fix by moving the lock to enclose the sdma_progress() call.
+
+Also, delete the tx_retry. The need for this was removed by:
+commit bcad29137a97 ("IB/hfi1: Serve the most starved iowait entry first")
+
+Fixes: 64551ede6cd1 ("IB/hfi1: VNIC SDMA support")
+Reviewed-by: Gary Leshner <Gary.S.Leshner@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/vnic_sdma.c | 15 +++++----------
+ 1 file changed, 5 insertions(+), 10 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/vnic_sdma.c
++++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c
+@@ -57,7 +57,6 @@
+
+ #define HFI1_VNIC_TXREQ_NAME_LEN 32
+ #define HFI1_VNIC_SDMA_DESC_WTRMRK 64
+-#define HFI1_VNIC_SDMA_RETRY_COUNT 1
+
+ /*
+ * struct vnic_txreq - VNIC transmit descriptor
+@@ -67,7 +66,6 @@
+ * @pad: pad buffer
+ * @plen: pad length
+ * @pbc_val: pbc value
+- * @retry_count: tx retry count
+ */
+ struct vnic_txreq {
+ struct sdma_txreq txreq;
+@@ -77,8 +75,6 @@ struct vnic_txreq {
+ unsigned char pad[HFI1_VNIC_MAX_PAD];
+ u16 plen;
+ __le64 pbc_val;
+-
+- u32 retry_count;
+ };
+
+ static void vnic_sdma_complete(struct sdma_txreq *txreq,
+@@ -196,7 +192,6 @@ int hfi1_vnic_send_dma(struct hfi1_devda
+ ret = build_vnic_tx_desc(sde, tx, pbc);
+ if (unlikely(ret))
+ goto free_desc;
+- tx->retry_count = 0;
+
+ ret = sdma_send_txreq(sde, iowait_get_ib_work(&vnic_sdma->wait),
+ &tx->txreq, vnic_sdma->pkts_sent);
+@@ -238,14 +233,14 @@ static int hfi1_vnic_sdma_sleep(struct s
+ struct hfi1_vnic_sdma *vnic_sdma =
+ container_of(wait->iow, struct hfi1_vnic_sdma, wait);
+ struct hfi1_ibdev *dev = &vnic_sdma->dd->verbs_dev;
+- struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
+
+- if (sdma_progress(sde, seq, txreq))
+- if (tx->retry_count++ < HFI1_VNIC_SDMA_RETRY_COUNT)
+- return -EAGAIN;
++ write_seqlock(&dev->iowait_lock);
++ if (sdma_progress(sde, seq, txreq)) {
++ write_sequnlock(&dev->iowait_lock);
++ return -EAGAIN;
++ }
+
+ vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
+- write_seqlock(&dev->iowait_lock);
+ if (list_empty(&vnic_sdma->wait.list))
+ iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
+ write_sequnlock(&dev->iowait_lock);
diff --git a/patches.drivers/IB-hfi1-Consider-LMC-in-16B-bypass-ingress-packet-ch.patch b/patches.drivers/IB-hfi1-Consider-LMC-in-16B-bypass-ingress-packet-ch.patch
new file mode 100644
index 0000000000..3c688e3372
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Consider-LMC-in-16B-bypass-ingress-packet-ch.patch
@@ -0,0 +1,31 @@
+From: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Date: Wed, 28 Nov 2018 10:19:47 -0800
+Subject: IB/hfi1: Consider LMC in 16B/bypass ingress packet check
+Patch-mainline: v5.0-rc1
+Git-commit: ff8b67fccdb65402df78a1695c38be805252cf8e
+References: jsc#SLE-4925
+
+Ingress packet check for 16B/bypass packets should consider the port
+LMC. Not doing this will result in packets sent to the LMC LIDs getting
+dropped. The check is implemented in HW for 9B packets.
+
+Reviewed-by: Mike Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/driver.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hfi1/driver.c
++++ b/drivers/infiniband/hw/hfi1/driver.c
+@@ -1427,7 +1427,7 @@ static int hfi1_bypass_ingress_pkt_check
+ if ((!(hfi1_is_16B_mcast(packet->dlid))) &&
+ (packet->dlid !=
+ opa_get_lid(be32_to_cpu(OPA_LID_PERMISSIVE), 16B))) {
+- if (packet->dlid != ppd->lid)
++ if ((packet->dlid & ~((1 << ppd->lmc) - 1)) != ppd->lid)
+ return -EINVAL;
+ }
+
diff --git a/patches.drivers/IB-hfi1-Correctly-process-FECN-and-BECN-in-packets.patch b/patches.drivers/IB-hfi1-Correctly-process-FECN-and-BECN-in-packets.patch
new file mode 100644
index 0000000000..b79b8a91bf
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Correctly-process-FECN-and-BECN-in-packets.patch
@@ -0,0 +1,459 @@
+From: Mitko Haralanov <mitko.haralanov@intel.com>
+Date: Wed, 28 Nov 2018 10:19:15 -0800
+Subject: IB/hfi1: Correctly process FECN and BECN in packets
+Patch-mainline: v5.0-rc1
+Git-commit: fe4dd4239277486ca3a468e7bbeafd7ef3a5634e
+References: jsc#SLE-4925
+
+A CA is supposed to ignore FECN bits in multicast, ACK, and CNP
+packets. This patch corrects the behavior of the HFI1 driver in this
+regard by ignoring FECNs in those packet types.
+
+While fixing the above behavior, fix the extraction of the FECN and BECN
+bits from the packet headers for both 9B and 16B packets.
+
+Furthermore, this patch corrects the driver's response to a FECN in RDMA
+READ RESPONSE packets. Instead of sending an "empty" ACK, the driver now
+sends a CNP packet. While editing that code path, add the missing trace
+for CNP packets.
+
+Fixes: 88733e3b8450 ("IB/hfi1: Add 16B UD support")
+Fixes: f59fb9e05109 ("IB/hfi1: Fix handling of FECN marked multicast packet")
+Reviewed-by: Kaike Wan <kaike.wan@intel.com>
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Mitko Haralanov <mitko.haralanov@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/driver.c | 70 ++++++++++++++++++++++++------------
+ drivers/infiniband/hw/hfi1/hfi.h | 35 ++++++++++++------
+ drivers/infiniband/hw/hfi1/rc.c | 30 +++++----------
+ drivers/infiniband/hw/hfi1/uc.c | 2 -
+ drivers/infiniband/hw/hfi1/ud.c | 33 ++++++++++------
+ 5 files changed, 104 insertions(+), 66 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/driver.c
++++ b/drivers/infiniband/hw/hfi1/driver.c
+@@ -430,40 +430,60 @@ static const hfi1_handle_cnp hfi1_handle
+ [HFI1_PKT_TYPE_16B] = &return_cnp_16B
+ };
+
+-void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
+- bool do_cnp)
++/**
++ * hfi1_process_ecn_slowpath - Process FECN or BECN bits
++ * @qp: The packet's destination QP
++ * @pkt: The packet itself.
++ * @prescan: Is the caller the RXQ prescan
++ *
++ * Process the packet's FECN or BECN bits. By now, the packet
++ * has already been evaluated whether processing of those bit should
++ * be done.
++ * The significance of the @prescan argument is that if the caller
++ * is the RXQ prescan, a CNP will be send out instead of waiting for the
++ * normal packet processing to send an ACK with BECN set (or a CNP).
++ */
++bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
++ bool prescan)
+ {
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct ib_other_headers *ohdr = pkt->ohdr;
+ struct ib_grh *grh = pkt->grh;
+- u32 rqpn = 0, bth1;
++ u32 rqpn = 0;
+ u16 pkey;
+ u32 rlid, slid, dlid = 0;
+- u8 hdr_type, sc, svc_type;
+- bool is_mcast = false;
++ u8 hdr_type, sc, svc_type, opcode;
++ bool is_mcast = false, ignore_fecn = false, do_cnp = false,
++ fecn, becn;
+
+ /* can be called from prescan */
+ if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
+- is_mcast = hfi1_is_16B_mcast(dlid);
+ pkey = hfi1_16B_get_pkey(pkt->hdr);
+ sc = hfi1_16B_get_sc(pkt->hdr);
+ dlid = hfi1_16B_get_dlid(pkt->hdr);
+ slid = hfi1_16B_get_slid(pkt->hdr);
++ is_mcast = hfi1_is_16B_mcast(dlid);
++ opcode = ib_bth_get_opcode(ohdr);
+ hdr_type = HFI1_PKT_TYPE_16B;
++ fecn = hfi1_16B_get_fecn(pkt->hdr);
++ becn = hfi1_16B_get_becn(pkt->hdr);
+ } else {
+- is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
+- (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
+ pkey = ib_bth_get_pkey(ohdr);
+ sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf);
+- dlid = ib_get_dlid(pkt->hdr);
++ dlid = qp->ibqp.qp_type != IB_QPT_UD ? ib_get_dlid(pkt->hdr) :
++ ppd->lid;
+ slid = ib_get_slid(pkt->hdr);
++ is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
++ (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
++ opcode = ib_bth_get_opcode(ohdr);
+ hdr_type = HFI1_PKT_TYPE_9B;
++ fecn = ib_bth_get_fecn(ohdr);
++ becn = ib_bth_get_becn(ohdr);
+ }
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_UD:
+- dlid = ppd->lid;
+ rlid = slid;
+ rqpn = ib_get_sqpn(pkt->ohdr);
+ svc_type = IB_CC_SVCTYPE_UD;
+@@ -485,22 +505,31 @@ void hfi1_process_ecn_slowpath(struct rv
+ svc_type = IB_CC_SVCTYPE_RC;
+ break;
+ default:
+- return;
++ return false;
+ }
+
+- bth1 = be32_to_cpu(ohdr->bth[1]);
++ ignore_fecn = is_mcast || (opcode == IB_OPCODE_CNP) ||
++ (opcode == IB_OPCODE_RC_ACKNOWLEDGE);
++ /*
++ * ACKNOWLEDGE packets do not get a CNP but this will be
++ * guarded by ignore_fecn above.
++ */
++ do_cnp = prescan ||
++ (opcode >= IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST &&
++ opcode <= IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE);
++
+ /* Call appropriate CNP handler */
+- if (do_cnp && (bth1 & IB_FECN_SMASK))
++ if (!ignore_fecn && do_cnp && fecn)
+ hfi1_handle_cnp_tbl[hdr_type](ibp, qp, rqpn, pkey,
+ dlid, rlid, sc, grh);
+
+- if (!is_mcast && (bth1 & IB_BECN_SMASK)) {
+- u32 lqpn = bth1 & RVT_QPN_MASK;
++ if (becn) {
++ u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
+ u8 sl = ibp->sc_to_sl[sc];
+
+ process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
+ }
+-
++ return !ignore_fecn && fecn;
+ }
+
+ struct ps_mdata {
+@@ -599,7 +628,6 @@ static void __prescan_rxq(struct hfi1_pa
+ struct rvt_dev_info *rdi = &rcd->dd->verbs_dev.rdi;
+ u64 rhf = rhf_to_cpu(rhf_addr);
+ u32 etype = rhf_rcv_type(rhf), qpn, bth1;
+- int is_ecn = 0;
+ u8 lnh;
+
+ if (ps_done(&mdata, rhf, rcd))
+@@ -625,12 +653,10 @@ static void __prescan_rxq(struct hfi1_pa
+ goto next; /* just in case */
+ }
+
+- bth1 = be32_to_cpu(packet->ohdr->bth[1]);
+- is_ecn = !!(bth1 & (IB_FECN_SMASK | IB_BECN_SMASK));
+-
+- if (!is_ecn)
++ if (!hfi1_may_ecn(packet))
+ goto next;
+
++ bth1 = be32_to_cpu(packet->ohdr->bth[1]);
+ qpn = bth1 & RVT_QPN_MASK;
+ rcu_read_lock();
+ qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn);
+@@ -640,7 +666,7 @@ static void __prescan_rxq(struct hfi1_pa
+ goto next;
+ }
+
+- process_ecn(qp, packet, true);
++ hfi1_process_ecn_slowpath(qp, packet, true);
+ rcu_read_unlock();
+
+ /* turn off BECN, FECN */
+--- a/drivers/infiniband/hw/hfi1/hfi.h
++++ b/drivers/infiniband/hw/hfi1/hfi.h
+@@ -1804,13 +1804,20 @@ static inline struct hfi1_ibport *rcd_to
+ return &rcd->ppd->ibport_data;
+ }
+
+-void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
+- bool do_cnp);
+-static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt,
+- bool do_cnp)
++/**
++ * hfi1_may_ecn - Check whether FECN or BECN processing should be done
++ * @pkt: the packet to be evaluated
++ *
++ * Check whether the FECN or BECN bits in the packet's header are
++ * enabled, depending on packet type.
++ *
++ * This function only checks for FECN and BECN bits. Additional checks
++ * are done in the slowpath (hfi1_process_ecn_slowpath()) in order to
++ * ensure correct handling.
++ */
++static inline bool hfi1_may_ecn(struct hfi1_packet *pkt)
+ {
+- bool becn;
+- bool fecn;
++ bool fecn, becn;
+
+ if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
+ fecn = hfi1_16B_get_fecn(pkt->hdr);
+@@ -1819,10 +1826,18 @@ static inline bool process_ecn(struct rv
+ fecn = ib_bth_get_fecn(pkt->ohdr);
+ becn = ib_bth_get_becn(pkt->ohdr);
+ }
+- if (unlikely(fecn || becn)) {
+- hfi1_process_ecn_slowpath(qp, pkt, do_cnp);
+- return fecn;
+- }
++ return fecn || becn;
++}
++
++bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
++ bool prescan);
++static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt)
++{
++ bool do_work;
++
++ do_work = hfi1_may_ecn(pkt);
++ if (unlikely(do_work))
++ return hfi1_process_ecn_slowpath(qp, pkt, false);
+ return false;
+ }
+
+--- a/drivers/infiniband/hw/hfi1/rc.c
++++ b/drivers/infiniband/hw/hfi1/rc.c
+@@ -2049,8 +2049,7 @@ void hfi1_rc_rcv(struct hfi1_packet *pac
+ struct ib_reth *reth;
+ unsigned long flags;
+ int ret;
+- bool is_fecn = false;
+- bool copy_last = false;
++ bool copy_last = false, fecn;
+ u32 rkey;
+ u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
+
+@@ -2059,7 +2058,7 @@ void hfi1_rc_rcv(struct hfi1_packet *pac
+ if (hfi1_ruc_check_hdr(ibp, packet))
+ return;
+
+- is_fecn = process_ecn(qp, packet, false);
++ fecn = process_ecn(qp, packet);
+
+ /*
+ * Process responses (ACKs) before anything else. Note that the
+@@ -2070,8 +2069,6 @@ void hfi1_rc_rcv(struct hfi1_packet *pac
+ if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+ opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
+ rc_rcv_resp(packet);
+- if (is_fecn)
+- goto send_ack;
+ return;
+ }
+
+@@ -2347,11 +2344,11 @@ send_last:
+
+ /* Schedule the send engine. */
+ qp->s_flags |= RVT_S_RESP_PENDING;
++ if (fecn)
++ qp->s_flags |= RVT_S_ECN;
+ hfi1_schedule_send(qp);
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+- if (is_fecn)
+- goto send_ack;
+ return;
+ }
+
+@@ -2413,11 +2410,11 @@ send_last:
+
+ /* Schedule the send engine. */
+ qp->s_flags |= RVT_S_RESP_PENDING;
++ if (fecn)
++ qp->s_flags |= RVT_S_ECN;
+ hfi1_schedule_send(qp);
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+- if (is_fecn)
+- goto send_ack;
+ return;
+ }
+
+@@ -2430,16 +2427,9 @@ send_last:
+ qp->r_ack_psn = psn;
+ qp->r_nak_state = 0;
+ /* Send an ACK if requested or required. */
+- if (psn & IB_BTH_REQ_ACK) {
+- if (packet->numpkt == 0) {
+- rc_cancel_ack(qp);
+- goto send_ack;
+- }
+- if (qp->r_adefered >= HFI1_PSN_CREDIT) {
+- rc_cancel_ack(qp);
+- goto send_ack;
+- }
+- if (unlikely(is_fecn)) {
++ if (psn & IB_BTH_REQ_ACK || fecn) {
++ if (packet->numpkt == 0 || fecn ||
++ qp->r_adefered >= HFI1_PSN_CREDIT) {
+ rc_cancel_ack(qp);
+ goto send_ack;
+ }
+@@ -2480,7 +2470,7 @@ nack_acc:
+ qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+ send_ack:
+- hfi1_send_rc_ack(packet, is_fecn);
++ hfi1_send_rc_ack(packet, fecn);
+ }
+
+ void hfi1_rc_hdrerr(
+--- a/drivers/infiniband/hw/hfi1/uc.c
++++ b/drivers/infiniband/hw/hfi1/uc.c
+@@ -321,7 +321,7 @@ void hfi1_uc_rcv(struct hfi1_packet *pac
+ if (hfi1_ruc_check_hdr(ibp, packet))
+ return;
+
+- process_ecn(qp, packet, true);
++ process_ecn(qp, packet);
+
+ psn = ib_bth_get_psn(ohdr);
+ /* Compare the PSN verses the expected PSN. */
+--- a/drivers/infiniband/hw/hfi1/ud.c
++++ b/drivers/infiniband/hw/hfi1/ud.c
+@@ -51,6 +51,7 @@
+ #include "hfi.h"
+ #include "mad.h"
+ #include "verbs_txreq.h"
++#include "trace_ibhdrs.h"
+ #include "qp.h"
+
+ /* We support only two types - 9B and 16B for now */
+@@ -656,18 +657,19 @@ void return_cnp_16B(struct hfi1_ibport *
+ u32 bth0, plen, vl, hwords = 7;
+ u16 len;
+ u8 l4;
+- struct hfi1_16b_header hdr;
++ struct hfi1_opa_header hdr;
+ struct ib_other_headers *ohdr;
+ struct pio_buf *pbuf;
+ struct send_context *ctxt = qp_to_send_context(qp, sc5);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 nwords;
+
++ hdr.hdr_type = HFI1_PKT_TYPE_16B;
+ /* Populate length */
+ nwords = ((hfi1_get_16b_padding(hwords << 2, 0) +
+ SIZE_OF_LT) >> 2) + SIZE_OF_CRC;
+ if (old_grh) {
+- struct ib_grh *grh = &hdr.u.l.grh;
++ struct ib_grh *grh = &hdr.opah.u.l.grh;
+
+ grh->version_tclass_flow = old_grh->version_tclass_flow;
+ grh->paylen = cpu_to_be16(
+@@ -675,11 +677,11 @@ void return_cnp_16B(struct hfi1_ibport *
+ grh->hop_limit = 0xff;
+ grh->sgid = old_grh->dgid;
+ grh->dgid = old_grh->sgid;
+- ohdr = &hdr.u.l.oth;
++ ohdr = &hdr.opah.u.l.oth;
+ l4 = OPA_16B_L4_IB_GLOBAL;
+ hwords += sizeof(struct ib_grh) / sizeof(u32);
+ } else {
+- ohdr = &hdr.u.oth;
++ ohdr = &hdr.opah.u.oth;
+ l4 = OPA_16B_L4_IB_LOCAL;
+ }
+
+@@ -693,7 +695,7 @@ void return_cnp_16B(struct hfi1_ibport *
+
+ /* Convert dwords to flits */
+ len = (hwords + nwords) >> 1;
+- hfi1_make_16b_hdr(&hdr, slid, dlid, len, pkey, 1, 0, l4, sc5);
++ hfi1_make_16b_hdr(&hdr.opah, slid, dlid, len, pkey, 1, 0, l4, sc5);
+
+ plen = 2 /* PBC */ + hwords + nwords;
+ pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
+@@ -701,9 +703,11 @@ void return_cnp_16B(struct hfi1_ibport *
+ pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
+ if (ctxt) {
+ pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
+- if (pbuf)
++ if (pbuf) {
++ trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
+ ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
+ &hdr, hwords);
++ }
+ }
+ }
+
+@@ -715,14 +719,15 @@ void return_cnp(struct hfi1_ibport *ibp,
+ u32 bth0, plen, vl, hwords = 5;
+ u16 lrh0;
+ u8 sl = ibp->sc_to_sl[sc5];
+- struct ib_header hdr;
++ struct hfi1_opa_header hdr;
+ struct ib_other_headers *ohdr;
+ struct pio_buf *pbuf;
+ struct send_context *ctxt = qp_to_send_context(qp, sc5);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
++ hdr.hdr_type = HFI1_PKT_TYPE_9B;
+ if (old_grh) {
+- struct ib_grh *grh = &hdr.u.l.grh;
++ struct ib_grh *grh = &hdr.ibh.u.l.grh;
+
+ grh->version_tclass_flow = old_grh->version_tclass_flow;
+ grh->paylen = cpu_to_be16(
+@@ -730,11 +735,11 @@ void return_cnp(struct hfi1_ibport *ibp,
+ grh->hop_limit = 0xff;
+ grh->sgid = old_grh->dgid;
+ grh->dgid = old_grh->sgid;
+- ohdr = &hdr.u.l.oth;
++ ohdr = &hdr.ibh.u.l.oth;
+ lrh0 = HFI1_LRH_GRH;
+ hwords += sizeof(struct ib_grh) / sizeof(u32);
+ } else {
+- ohdr = &hdr.u.oth;
++ ohdr = &hdr.ibh.u.oth;
+ lrh0 = HFI1_LRH_BTH;
+ }
+
+@@ -746,16 +751,18 @@ void return_cnp(struct hfi1_ibport *ibp,
+ ohdr->bth[1] = cpu_to_be32(remote_qpn | (1 << IB_BECN_SHIFT));
+ ohdr->bth[2] = 0; /* PSN 0 */
+
+- hfi1_make_ib_hdr(&hdr, lrh0, hwords + SIZE_OF_CRC, dlid, slid);
++ hfi1_make_ib_hdr(&hdr.ibh, lrh0, hwords + SIZE_OF_CRC, dlid, slid);
+ plen = 2 /* PBC */ + hwords;
+ pbc_flags |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
+ vl = sc_to_vlt(ppd->dd, sc5);
+ pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
+ if (ctxt) {
+ pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
+- if (pbuf)
++ if (pbuf) {
++ trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
+ ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
+ &hdr, hwords);
++ }
+ }
+ }
+
+@@ -912,7 +919,7 @@ void hfi1_ud_rcv(struct hfi1_packet *pac
+ src_qp = hfi1_16B_get_src_qpn(packet->mgmt);
+ }
+
+- process_ecn(qp, packet, (opcode != IB_OPCODE_CNP));
++ process_ecn(qp, packet);
+ /*
+ * Get the number of bytes the message was padded by
+ * and drop incomplete packets.
diff --git a/patches.drivers/IB-hfi1-Dump-pio-info-for-non-user-send-contexts.patch b/patches.drivers/IB-hfi1-Dump-pio-info-for-non-user-send-contexts.patch
new file mode 100644
index 0000000000..2cdcc39d3e
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Dump-pio-info-for-non-user-send-contexts.patch
@@ -0,0 +1,142 @@
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Wed, 28 Nov 2018 10:14:32 -0800
+Subject: IB/hfi1: Dump pio info for non-user send contexts
+Patch-mainline: v5.0-rc1
+Git-commit: 937488a85986faa743d12456970a0cbe83e3b04e
+References: jsc#SLE-4925
+
+This patch dumps the pio info for non-user send contexts to assist
+debugging in the field.
+
+Reviewed-by: Mike Marciniczyn <mike.marciniszyn@intel.com>
+Reviewed-by: Mike Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/chip_registers.h | 4 ++
+ drivers/infiniband/hw/hfi1/debugfs.c | 49 ++++++++++++++++++++++++++++
+ drivers/infiniband/hw/hfi1/pio.c | 25 ++++++++++++++
+ drivers/infiniband/hw/hfi1/pio.h | 3 +
+ 4 files changed, 81 insertions(+)
+
+--- a/drivers/infiniband/hw/hfi1/chip_registers.h
++++ b/drivers/infiniband/hw/hfi1/chip_registers.h
+@@ -935,6 +935,10 @@
+ #define SEND_CTXT_CREDIT_CTRL_THRESHOLD_MASK 0x7FFull
+ #define SEND_CTXT_CREDIT_CTRL_THRESHOLD_SHIFT 0
+ #define SEND_CTXT_CREDIT_CTRL_THRESHOLD_SMASK 0x7FFull
++#define SEND_CTXT_CREDIT_STATUS (TXE + 0x000000100018)
++#define SEND_CTXT_CREDIT_STATUS_CURRENT_FREE_COUNTER_MASK 0x7FFull
++#define SEND_CTXT_CREDIT_STATUS_CURRENT_FREE_COUNTER_SHIFT 32
++#define SEND_CTXT_CREDIT_STATUS_LAST_RETURNED_COUNTER_SMASK 0x7FFull
+ #define SEND_CTXT_CREDIT_FORCE (TXE + 0x000000100028)
+ #define SEND_CTXT_CREDIT_FORCE_FORCE_RETURN_SMASK 0x1ull
+ #define SEND_CTXT_CREDIT_RETURN_ADDR (TXE + 0x000000100020)
+--- a/drivers/infiniband/hw/hfi1/debugfs.c
++++ b/drivers/infiniband/hw/hfi1/debugfs.c
+@@ -407,6 +407,54 @@ DEBUGFS_SEQ_FILE_OPS(rcds);
+ DEBUGFS_SEQ_FILE_OPEN(rcds)
+ DEBUGFS_FILE_OPS(rcds);
+
++static void *_pios_seq_start(struct seq_file *s, loff_t *pos)
++{
++ struct hfi1_ibdev *ibd;
++ struct hfi1_devdata *dd;
++
++ ibd = (struct hfi1_ibdev *)s->private;
++ dd = dd_from_dev(ibd);
++ if (!dd->send_contexts || *pos >= dd->num_send_contexts)
++ return NULL;
++ return pos;
++}
++
++static void *_pios_seq_next(struct seq_file *s, void *v, loff_t *pos)
++{
++ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
++ struct hfi1_devdata *dd = dd_from_dev(ibd);
++
++ ++*pos;
++ if (!dd->send_contexts || *pos >= dd->num_send_contexts)
++ return NULL;
++ return pos;
++}
++
++static void _pios_seq_stop(struct seq_file *s, void *v)
++{
++}
++
++static int _pios_seq_show(struct seq_file *s, void *v)
++{
++ struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
++ struct hfi1_devdata *dd = dd_from_dev(ibd);
++ struct send_context_info *sci;
++ loff_t *spos = v;
++ loff_t i = *spos;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dd->sc_lock, flags);
++ sci = &dd->send_contexts[i];
++ if (sci && sci->type != SC_USER && sci->allocated && sci->sc)
++ seqfile_dump_sci(s, i, sci);
++ spin_unlock_irqrestore(&dd->sc_lock, flags);
++ return 0;
++}
++
++DEBUGFS_SEQ_FILE_OPS(pios);
++DEBUGFS_SEQ_FILE_OPEN(pios)
++DEBUGFS_FILE_OPS(pios);
++
+ /* read the per-device counters */
+ static ssize_t dev_counters_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+@@ -1143,6 +1191,7 @@ void hfi1_dbg_ibdev_init(struct hfi1_ibd
+ DEBUGFS_SEQ_FILE_CREATE(qp_stats, ibd->hfi1_ibdev_dbg, ibd);
+ DEBUGFS_SEQ_FILE_CREATE(sdes, ibd->hfi1_ibdev_dbg, ibd);
+ DEBUGFS_SEQ_FILE_CREATE(rcds, ibd->hfi1_ibdev_dbg, ibd);
++ DEBUGFS_SEQ_FILE_CREATE(pios, ibd->hfi1_ibdev_dbg, ibd);
+ DEBUGFS_SEQ_FILE_CREATE(sdma_cpu_list, ibd->hfi1_ibdev_dbg, ibd);
+ /* dev counter files */
+ for (i = 0; i < ARRAY_SIZE(cntr_ops); i++)
+--- a/drivers/infiniband/hw/hfi1/pio.c
++++ b/drivers/infiniband/hw/hfi1/pio.c
+@@ -2137,3 +2137,28 @@ void free_credit_return(struct hfi1_devd
+ kfree(dd->cr_base);
+ dd->cr_base = NULL;
+ }
++
++void seqfile_dump_sci(struct seq_file *s, u32 i,
++ struct send_context_info *sci)
++{
++ struct send_context *sc = sci->sc;
++ u64 reg;
++
++ seq_printf(s, "SCI %u: type %u base %u credits %u\n",
++ i, sci->type, sci->base, sci->credits);
++ seq_printf(s, " flags 0x%x sw_inx %u hw_ctxt %u grp %u\n",
++ sc->flags, sc->sw_index, sc->hw_context, sc->group);
++ seq_printf(s, " sr_size %u credits %u sr_head %u sr_tail %u\n",
++ sc->sr_size, sc->credits, sc->sr_head, sc->sr_tail);
++ seq_printf(s, " fill %lu free %lu fill_wrap %u alloc_free %lu\n",
++ sc->fill, sc->free, sc->fill_wrap, sc->alloc_free);
++ seq_printf(s, " credit_intr_count %u credit_ctrl 0x%llx\n",
++ sc->credit_intr_count, sc->credit_ctrl);
++ reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CREDIT_STATUS));
++ seq_printf(s, " *hw_free %llu CurrentFree %llu LastReturned %llu\n",
++ (le64_to_cpu(*sc->hw_free) & CR_COUNTER_SMASK) >>
++ CR_COUNTER_SHIFT,
++ (reg >> SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_SHIFT)) &
++ SC(CREDIT_STATUS_CURRENT_FREE_COUNTER_MASK),
++ reg & SC(CREDIT_STATUS_LAST_RETURNED_COUNTER_SMASK));
++}
+--- a/drivers/infiniband/hw/hfi1/pio.h
++++ b/drivers/infiniband/hw/hfi1/pio.h
+@@ -329,4 +329,7 @@ void seg_pio_copy_start(struct pio_buf *
+ void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes);
+ void seg_pio_copy_end(struct pio_buf *pbuf);
+
++void seqfile_dump_sci(struct seq_file *s, u32 i,
++ struct send_context_info *sci);
++
+ #endif /* _PIO_H */
diff --git a/patches.drivers/IB-hfi1-Eliminate-opcode-tests-on-mr-deref.patch b/patches.drivers/IB-hfi1-Eliminate-opcode-tests-on-mr-deref.patch
new file mode 100644
index 0000000000..441d3b9f70
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Eliminate-opcode-tests-on-mr-deref.patch
@@ -0,0 +1,47 @@
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Mon, 18 Mar 2019 09:55:39 -0700
+Subject: IB/hfi1: Eliminate opcode tests on mr deref
+Patch-mainline: v5.1-rc5
+Git-commit: a8639a79e85c18c16c10089edd589c7948f19bbd
+References: jsc#SLE-4925
+
+When an old ack_queue entry is used to store an incoming request, it may
+need to clean up the old entry if it is still referencing the
+MR. Originally only RDMA READ request needed to reference MR on the
+responder side and therefore the opcode was tested when cleaning up the
+old entry. The introduction of tid rdma specific operations in the
+ack_queue makes the specific opcode tests wrong. Multiple opcodes (RDMA
+READ, TID RDMA READ, and TID RDMA WRITE) may need MR ref cleanup.
+
+Remove the opcode specific tests associated with the ack_queue.
+
+Fixes: f48ad614c100 ("IB/hfi1: Move driver out of staging")
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/rc.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/rc.c
++++ b/drivers/infiniband/hw/hfi1/rc.c
+@@ -2302,7 +2302,7 @@ send_last:
+ update_ack_queue(qp, next);
+ }
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
++ if (e->rdma_sge.mr) {
+ rvt_put_mr(e->rdma_sge.mr);
+ e->rdma_sge.mr = NULL;
+ }
+@@ -2376,7 +2376,7 @@ send_last:
+ update_ack_queue(qp, next);
+ }
+ e = &qp->s_ack_queue[qp->r_head_ack_queue];
+- if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
++ if (e->rdma_sge.mr) {
+ rvt_put_mr(e->rdma_sge.mr);
+ e->rdma_sge.mr = NULL;
+ }
diff --git a/patches.drivers/IB-hfi1-Failed-to-drain-send-queue-when-QP-is-put-in.patch b/patches.drivers/IB-hfi1-Failed-to-drain-send-queue-when-QP-is-put-in.patch
new file mode 100644
index 0000000000..16c34a14e2
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Failed-to-drain-send-queue-when-QP-is-put-in.patch
@@ -0,0 +1,58 @@
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Mon, 18 Mar 2019 09:55:19 -0700
+Subject: IB/hfi1: Failed to drain send queue when QP is put into error state
+Patch-mainline: v5.1-rc5
+Git-commit: 662d66466637862ef955f7f6e78a286d8cf0ebef
+References: jsc#SLE-4925
+
+When a QP is put into error state, all pending requests in the send work
+queue should be drained. The following sequence of events could lead to a
+failure, causing a request to hang:
+
+(1) The QP builds a packet and tries to send through SDMA engine.
+ However, PIO engine is still busy. Consequently, this packet is put on
+ the QP's tx list and the QP is put on the PIO waiting list. The field
+ qp->s_flags is set with HFI1_S_WAIT_PIO_DRAIN;
+
+(2) The QP is put into error state by the user application and
+ notify_error_qp() is called, which removes the QP from the PIO waiting
+ list and the packet from the QP's tx list. In addition, qp->s_flags is
+ cleared of RVT_S_ANY_WAIT_IO bits, which does not include
+ HFI1_S_WAIT_PIO_DRAIN bit;
+
+(3) The hfi1_schdule_send() function is called to drain the QP's send
+ queue. Subsequently, hfi1_do_send() is called. Since the flag bit
+ HFI1_S_WAIT_PIO_DRAIN is set in qp->s_flags, hfi1_send_ok() fails. As
+ a result, hfi1_do_send() bails out without draining any request from
+ the send queue;
+
+(4) The PIO engine completes the sending and tries to wake up any QP on
+ its waiting list. But the QP has been removed from the PIO waiting
+ list and therefore is kept in sleep forever.
+
+The fix is to clear qp->s_flags of HFI1_S_ANY_WAIT_IO bits in step (2).
+HFI1_S_ANY_WAIT_IO includes RVT_S_ANY_WAIT_IO and HFI1_S_WAIT_PIO_DRAIN.
+
+Fixes: 2e2ba09e48b7 ("IB/rdmavt, IB/hfi1: Create device dependent s_flags")
+Cc: <stable@vger.kernel.org> # 4.19.x+
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Alex Estrin <alex.estrin@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/qp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hfi1/qp.c
++++ b/drivers/infiniband/hw/hfi1/qp.c
+@@ -833,7 +833,7 @@ void notify_error_qp(struct rvt_qp *qp)
+ write_seqlock(lock);
+ if (!list_empty(&priv->s_iowait.list) &&
+ !(qp->s_flags & RVT_S_BUSY)) {
+- qp->s_flags &= ~RVT_S_ANY_WAIT_IO;
++ qp->s_flags &= ~HFI1_S_ANY_WAIT_IO;
+ list_del_init(&priv->s_iowait.list);
+ priv->s_iowait.lock = NULL;
+ rvt_put_qp(qp);
diff --git a/patches.drivers/IB-hfi1-Fix-WQ_MEM_RECLAIM-warning.patch b/patches.drivers/IB-hfi1-Fix-WQ_MEM_RECLAIM-warning.patch
new file mode 100644
index 0000000000..b99e63edd1
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Fix-WQ_MEM_RECLAIM-warning.patch
@@ -0,0 +1,57 @@
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Mon, 18 Mar 2019 09:55:09 -0700
+Subject: IB/hfi1: Fix WQ_MEM_RECLAIM warning
+Patch-mainline: v5.2-rc1
+Git-commit: 4c4b1996b5db688e2dcb8242b0a3bf7b1e845e42
+References: jsc#SLE-4925
+
+The work_item cancels that occur when a QP is destroyed can elicit the
+following trace:
+
+ workqueue: WQ_MEM_RECLAIM ipoib_wq:ipoib_cm_tx_reap [ib_ipoib] is flushing !WQ_MEM_RECLAIM hfi0_0:_hfi1_do_send [hfi1]
+ WARNING: CPU: 7 PID: 1403 at kernel/workqueue.c:2486 check_flush_dependency+0xb1/0x100
+ Call Trace:
+ __flush_work.isra.29+0x8c/0x1a0
+ ? __switch_to_asm+0x40/0x70
+ __cancel_work_timer+0x103/0x190
+ ? schedule+0x32/0x80
+ iowait_cancel_work+0x15/0x30 [hfi1]
+ rvt_reset_qp+0x1f8/0x3e0 [rdmavt]
+ rvt_destroy_qp+0x65/0x1f0 [rdmavt]
+ ? _cond_resched+0x15/0x30
+ ib_destroy_qp+0xe9/0x230 [ib_core]
+ ipoib_cm_tx_reap+0x21c/0x560 [ib_ipoib]
+ process_one_work+0x171/0x370
+ worker_thread+0x49/0x3f0
+ kthread+0xf8/0x130
+ ? max_active_store+0x80/0x80
+ ? kthread_bind+0x10/0x10
+ ret_from_fork+0x35/0x40
+
+Since QP destruction frees memory, hfi1_wq should have the WQ_MEM_RECLAIM.
+
+The hfi1_wq does not allocate memory with GFP_KERNEL or otherwise become
+entangled with memory reclaim, so this flag is appropriate.
+
+Fixes: 0a226edd203f ("staging/rdma/hfi1: Use parallel workqueue for SDMA engines")
+Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/init.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hfi1/init.c
++++ b/drivers/infiniband/hw/hfi1/init.c
+@@ -804,7 +804,8 @@ static int create_workqueues(struct hfi1
+ ppd->hfi1_wq =
+ alloc_workqueue(
+ "hfi%d_%d",
+- WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
++ WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
++ WQ_MEM_RECLAIM,
+ HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
+ dd->unit, pidx);
+ if (!ppd->hfi1_wq)
diff --git a/patches.drivers/IB-hfi1-Fix-the-allocation-of-RSM-table.patch b/patches.drivers/IB-hfi1-Fix-the-allocation-of-RSM-table.patch
new file mode 100644
index 0000000000..bf80d506ff
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Fix-the-allocation-of-RSM-table.patch
@@ -0,0 +1,96 @@
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Mon, 18 Mar 2019 09:55:49 -0700
+Subject: IB/hfi1: Fix the allocation of RSM table
+Patch-mainline: v5.1-rc5
+Git-commit: d0294344470e6b52d097aa7369173f32d11f2f52
+References: jsc#SLE-4925
+
+The receive side mapping (RSM) on hfi1 hardware is a special
+matching mechanism to direct an incoming packet to a given
+hardware receive context. It has 4 instances of matching capabilities
+(RSM0 - RSM3) that share the same RSM table (RMT). The RMT has a total of
+256 entries, each of which points to a receive context.
+
+Currently, three instances of RSM have been used:
+1. RSM0 by QOS;
+2. RSM1 by PSM FECN;
+3. RSM2 by VNIC.
+
+Each RSM instance should reserve enough entries in RMT to function
+properly. Since both PSM and VNIC could allocate any receive context
+between dd->first_dyn_alloc_ctxt and dd->num_rcv_contexts, PSM FECN must
+reserve enough RMT entries to cover the entire receive context index
+range (dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt) instead of only
+the user receive contexts allocated for PSM
+(dd->num_user_contexts). Consequently, the sizing of
+dd->num_user_contexts in set_up_context_variables is incorrect.
+
+Fixes: 2280740f01ae ("IB/hfi1: Virtual Network Interface Controller (VNIC) HW support")
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/chip.c | 26 +++++++++++++++++++-------
+ 1 file changed, 19 insertions(+), 7 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -13222,7 +13222,7 @@ static int set_up_context_variables(stru
+ int total_contexts;
+ int ret;
+ unsigned ngroups;
+- int qos_rmt_count;
++ int rmt_count;
+ int user_rmt_reduced;
+ u32 n_usr_ctxts;
+ u32 send_contexts = chip_send_contexts(dd);
+@@ -13284,10 +13284,20 @@ static int set_up_context_variables(stru
+ n_usr_ctxts = rcv_contexts - total_contexts;
+ }
+
+- /* each user context requires an entry in the RMT */
+- qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
+- if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
+- user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
++ /*
++ * The RMT entries are currently allocated as shown below:
++ * 1. QOS (0 to 128 entries);
++ * 2. FECN for PSM (num_user_contexts + num_vnic_contexts);
++ * 3. VNIC (num_vnic_contexts).
++ * It should be noted that PSM FECN oversubscribe num_vnic_contexts
++ * entries of RMT because both VNIC and PSM could allocate any receive
++ * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
++ * and PSM FECN must reserve an RMT entry for each possible PSM receive
++ * context.
++ */
++ rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
++ if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
++ user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
+ dd_dev_err(dd,
+ "RMT size is reducing the number of user receive contexts from %u to %d\n",
+ n_usr_ctxts,
+@@ -14275,9 +14285,11 @@ static void init_user_fecn_handling(stru
+ u64 reg;
+ int i, idx, regoff, regidx;
+ u8 offset;
++ u32 total_cnt;
+
+ /* there needs to be enough room in the map table */
+- if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
++ total_cnt = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
++ if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
+ dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
+ return;
+ }
+@@ -14331,7 +14343,7 @@ static void init_user_fecn_handling(stru
+ /* add rule 1 */
+ add_rsm_rule(dd, RSM_INS_FECN, &rrd);
+
+- rmt->used += dd->num_user_contexts;
++ rmt->used += total_cnt;
+ }
+
+ /* Initialize RSM for VNIC */
diff --git a/patches.drivers/IB-hfi1-Fix-two-format-strings.patch b/patches.drivers/IB-hfi1-Fix-two-format-strings.patch
new file mode 100644
index 0000000000..55a84637ad
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Fix-two-format-strings.patch
@@ -0,0 +1,57 @@
+From: Bart Van Assche <bvanassche@acm.org>
+Date: Wed, 27 Mar 2019 16:50:50 -0700
+Subject: IB/hfi1: Fix two format strings
+Patch-mainline: v5.2-rc1
+Git-commit: 920d10e45844d1448d4d279d07fa91e5a7cee4f1
+References: jsc#SLE-4925
+
+Enable format string checking for hfi1_cdbg() and fix the resulting
+compiler warnings.
+
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/init.c | 4 ++--
+ drivers/infiniband/hw/hfi1/trace_dbg.h | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/init.c
++++ b/drivers/infiniband/hw/hfi1/init.c
+@@ -468,7 +468,7 @@ int hfi1_create_ctxtdata(struct hfi1_ppo
+ if (rcd->egrbufs.size < hfi1_max_mtu) {
+ rcd->egrbufs.size = __roundup_pow_of_two(hfi1_max_mtu);
+ hfi1_cdbg(PROC,
+- "ctxt%u: eager bufs size too small. Adjusting to %zu\n",
++ "ctxt%u: eager bufs size too small. Adjusting to %u\n",
+ rcd->ctxt, rcd->egrbufs.size);
+ }
+ rcd->egrbufs.rcvtid_size = HFI1_MAX_EAGER_BUFFER_SIZE;
+@@ -2069,7 +2069,7 @@ int hfi1_setup_eagerbufs(struct hfi1_ctx
+ rcd->egrbufs.size = alloced_bytes;
+
+ hfi1_cdbg(PROC,
+- "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %zuKB\n",
++ "ctxt%u: Alloced %u rcv tid entries @ %uKB, total %uKB\n",
+ rcd->ctxt, rcd->egrbufs.alloced,
+ rcd->egrbufs.rcvtid_size / 1024, rcd->egrbufs.size / 1024);
+
+--- a/drivers/infiniband/hw/hfi1/trace_dbg.h
++++ b/drivers/infiniband/hw/hfi1/trace_dbg.h
+@@ -86,14 +86,14 @@ DECLARE_EVENT_CLASS(hfi1_trace_template,
+ * actual function to work and can not be in a macro.
+ */
+ #define __hfi1_trace_def(lvl) \
+-void __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
++void __printf(2, 3) __hfi1_trace_##lvl(const char *funct, char *fmt, ...); \
+ \
+ DEFINE_EVENT(hfi1_trace_template, hfi1_ ##lvl, \
+ TP_PROTO(const char *function, struct va_format *vaf), \
+ TP_ARGS(function, vaf))
+
+ #define __hfi1_trace_fn(lvl) \
+-void __hfi1_trace_##lvl(const char *func, char *fmt, ...) \
++void __printf(2, 3) __hfi1_trace_##lvl(const char *func, char *fmt, ...)\
+ { \
+ struct va_format vaf = { \
+ .fmt = fmt, \
diff --git a/patches.drivers/IB-hfi1-Ignore-LNI-errors-before-DC8051-transitions-.patch b/patches.drivers/IB-hfi1-Ignore-LNI-errors-before-DC8051-transitions-.patch
new file mode 100644
index 0000000000..ef9f7d53e5
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Ignore-LNI-errors-before-DC8051-transitions-.patch
@@ -0,0 +1,115 @@
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Wed, 28 Nov 2018 10:19:04 -0800
+Subject: IB/hfi1: Ignore LNI errors before DC8051 transitions to Polling state
+Patch-mainline: v5.0-rc1
+Git-commit: c1a797c0818e0122c7ec8422edd971cfec9b15ea
+References: jsc#SLE-4925
+
+When it is requested to change its physical state back to Offline while in
+the process to go up, DC8051 will set the ERROR field in the
+DC8051_DBG_ERR_INFO_SET_BY_8051 register. This ERROR field will remain
+until the next time when DC8051 transitions from Offline to Polling.
+Subsequently, when the host requests DC8051 to change its physical state
+to Polling again, it may receive a DC8051 interrupt with the stale ERROR
+field still in DC8051_DBG_ERR_INFO_SET_BY_8051. If the host link state has
+been changed to Polling, this stale ERROR will force the host to
+transition to Offline state, resulting in a vicious cycle of Polling
+->Offline->Polling->Offline. On the other hand, if the host link state is
+still Offline when the stale ERROR is received, the stale ERROR will be
+ignored, and the link will come up correctly. This patch implements the
+correct behavior by changing host link state to Polling only after DC8051
+changes its physical state to Polling.
+
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Krzysztof Goreczny <krzysztof.goreczny@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/chip.c | 47 +++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 46 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -1072,6 +1072,8 @@ static void log_state_transition(struct
+ static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
+ static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
+ int msecs);
++static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
++ int msecs);
+ static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
+ static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
+ static void handle_temp_err(struct hfi1_devdata *dd);
+@@ -10771,13 +10773,15 @@ int set_link_state(struct hfi1_pportdata
+ break;
+
+ ppd->port_error_action = 0;
+- ppd->host_link_state = HLS_DN_POLL;
+
+ if (quick_linkup) {
+ /* quick linkup does not go into polling */
+ ret = do_quick_linkup(dd);
+ } else {
+ ret1 = set_physical_link_state(dd, PLS_POLLING);
++ if (!ret1)
++ ret1 = wait_phys_link_out_of_offline(ppd,
++ 3000);
+ if (ret1 != HCMD_SUCCESS) {
+ dd_dev_err(dd,
+ "Failed to transition to Polling link state, return 0x%x\n",
+@@ -10785,6 +10789,14 @@ int set_link_state(struct hfi1_pportdata
+ ret = -EINVAL;
+ }
+ }
++
++ /*
++ * Change the host link state after requesting DC8051 to
++ * change its physical state so that we can ignore any
++ * interrupt with stale LNI(XX) error, which will not be
++ * cleared until DC8051 transitions to Polling state.
++ */
++ ppd->host_link_state = HLS_DN_POLL;
+ ppd->offline_disabled_reason =
+ HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
+ /*
+@@ -12924,6 +12936,39 @@ static int wait_phys_link_offline_substa
+ read_state, msecs);
+ return -ETIMEDOUT;
+ }
++ usleep_range(1950, 2050); /* sleep 2ms-ish */
++ }
++
++ log_state_transition(ppd, read_state);
++ return read_state;
++}
++
++/*
++ * wait_phys_link_out_of_offline - wait for any out of offline state
++ * @ppd: port device
++ * @msecs: the number of milliseconds to wait
++ *
++ * Wait up to msecs milliseconds for any out of offline physical link
++ * state change to occur.
++ * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
++ */
++static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
++ int msecs)
++{
++ u32 read_state;
++ unsigned long timeout;
++
++ timeout = jiffies + msecs_to_jiffies(msecs);
++ while (1) {
++ read_state = read_physical_state(ppd->dd);
++ if ((read_state & 0xF0) != PLS_OFFLINE)
++ break;
++ if (time_after(jiffies, timeout)) {
++ dd_dev_err(ppd->dd,
++ "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
++ read_state, msecs);
++ return -ETIMEDOUT;
++ }
+ usleep_range(1950, 2050); /* sleep 2ms-ish */
+ }
+
diff --git a/patches.drivers/IB-hfi1-Incorrect-sizing-of-sge-for-PIO-will-OOPs.patch b/patches.drivers/IB-hfi1-Incorrect-sizing-of-sge-for-PIO-will-OOPs.patch
new file mode 100644
index 0000000000..42271784b0
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Incorrect-sizing-of-sge-for-PIO-will-OOPs.patch
@@ -0,0 +1,53 @@
+From: "Michael J. Ruhl" <michael.j.ruhl@intel.com>
+Date: Wed, 28 Nov 2018 10:19:36 -0800
+Subject: IB/hfi1: Incorrect sizing of sge for PIO will OOPs
+Patch-mainline: v5.0-rc1
+Git-commit: dbc2970caef74e8ff41923d302aa6fb5a4812d0e
+References: jsc#SLE-4925
+
+An incorrect sge sizing in the HFI PIO path will cause an OOPs similar to
+this:
+
+BUG: unable to handle kernel NULL pointer dereference at (null)
+IP: [] hfi1_verbs_send_pio+0x3d8/0x530 [hfi1]
+PGD 0
+Oops: 0000 1 SMP
+ Call Trace:
+ ? hfi1_verbs_send_dma+0xad0/0xad0 [hfi1]
+ hfi1_verbs_send+0xdf/0x250 [hfi1]
+ ? make_rc_ack+0xa80/0xa80 [hfi1]
+ hfi1_do_send+0x192/0x430 [hfi1]
+ hfi1_do_send_from_rvt+0x10/0x20 [hfi1]
+ rvt_post_send+0x369/0x820 [rdmavt]
+ ib_uverbs_post_send+0x317/0x570 [ib_uverbs]
+ ib_uverbs_write+0x26f/0x420 [ib_uverbs]
+ ? security_file_permission+0x21/0xa0
+ vfs_write+0xbd/0x1e0
+ ? mntput+0x24/0x40
+ SyS_write+0x7f/0xe0
+ system_call_fastpath+0x16/0x1b
+
+Fix by adding the missing sizing check to correctly determine the sge
+length.
+
+Fixes: 7724105686e7 ("IB/hfi1: add driver files")
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/verbs.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/infiniband/hw/hfi1/verbs.c
++++ b/drivers/infiniband/hw/hfi1/verbs.c
+@@ -919,6 +919,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *q
+
+ if (slen > len)
+ slen = len;
++ if (slen > ss->sge.sge_length)
++ slen = ss->sge.sge_length;
+ rvt_update_sge(ss, slen, false);
+ seg_pio_copy_mid(pbuf, addr, slen);
+ len -= slen;
diff --git a/patches.drivers/IB-hfi1-Limit-VNIC-use-of-SDMA-engines-to-the-availa.patch b/patches.drivers/IB-hfi1-Limit-VNIC-use-of-SDMA-engines-to-the-availa.patch
new file mode 100644
index 0000000000..0ac9d39cb4
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Limit-VNIC-use-of-SDMA-engines-to-the-availa.patch
@@ -0,0 +1,43 @@
+From: "Michael J. Ruhl" <michael.j.ruhl@intel.com>
+Date: Wed, 28 Nov 2018 10:19:25 -0800
+Subject: IB/hfi1: Limit VNIC use of SDMA engines to the available count
+Patch-mainline: v5.0-rc1
+Git-commit: dd6c6a5a2e1e7be615c81ca6d44c2e89e22cb463
+References: jsc#SLE-4925
+
+VNIC assumes that all SDMA engines have been configured for use. This is
+not necessarily true (i.e. if the count was constrained by the module
+parameter).
+
+Update VNICs usage to use the configured count, rather than the hardware
+count.
+
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Gary Leshner <gary.s.leshner@intel.com>
+Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/vnic_main.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/vnic_main.c
++++ b/drivers/infiniband/hw/hfi1/vnic_main.c
+@@ -816,14 +816,14 @@ struct net_device *hfi1_vnic_alloc_rn(st
+
+ size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo);
+ netdev = alloc_netdev_mqs(size, name, name_assign_type, setup,
+- chip_sdma_engines(dd), dd->num_vnic_contexts);
++ dd->num_sdma, dd->num_vnic_contexts);
+ if (!netdev)
+ return ERR_PTR(-ENOMEM);
+
+ rn = netdev_priv(netdev);
+ vinfo = opa_vnic_dev_priv(netdev);
+ vinfo->dd = dd;
+- vinfo->num_tx_q = chip_sdma_engines(dd);
++ vinfo->num_tx_q = dd->num_sdma;
+ vinfo->num_rx_q = dd->num_vnic_contexts;
+ vinfo->netdev = netdev;
+ rn->free_rdma_netdev = hfi1_vnic_free_rn;
diff --git a/patches.drivers/IB-hfi1-Reduce-lock-contention-on-iowait_lock-for-sd.patch b/patches.drivers/IB-hfi1-Reduce-lock-contention-on-iowait_lock-for-sd.patch
new file mode 100644
index 0000000000..afa2636d6c
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Reduce-lock-contention-on-iowait_lock-for-sd.patch
@@ -0,0 +1,287 @@
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Wed, 28 Nov 2018 10:33:00 -0800
+Subject: IB/hfi1: Reduce lock contention on iowait_lock for sdma and pio
+Patch-mainline: v5.0-rc1
+Git-commit: 9aefcabe579bca06325ad9e577a36816f57386ff
+References: jsc#SLE-4925
+
+Commit 4e045572e2c2 ("IB/hfi1: Add unique txwait_lock for txreq events")
+laid the ground work to support per resource waiting locking.
+
+This patch adds that with a lock unique to each sdma engine and pio
+sendcontext and makes necessary changes for verbs, PSM, and vnic to use
+the new locks.
+
+This is particularly beneficial for smaller messages that will exhaust
+resources at a faster rate.
+
+Fixes: 7724105686e7 ("IB/hfi1: add driver files")
+Reviewed-by: Gary Leshner <Gary.S.Leshner@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/pio.c | 6 +++---
+ drivers/infiniband/hw/hfi1/pio.h | 2 ++
+ drivers/infiniband/hw/hfi1/qp.c | 20 ++++++++------------
+ drivers/infiniband/hw/hfi1/sdma.c | 10 +++++-----
+ drivers/infiniband/hw/hfi1/sdma.h | 1 +
+ drivers/infiniband/hw/hfi1/user_sdma.c | 5 ++---
+ drivers/infiniband/hw/hfi1/verbs.c | 7 +++----
+ drivers/infiniband/hw/hfi1/vnic_sdma.c | 7 +++----
+ 8 files changed, 27 insertions(+), 31 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/pio.c
++++ b/drivers/infiniband/hw/hfi1/pio.c
+@@ -742,6 +742,7 @@ struct send_context *sc_alloc(struct hfi
+ spin_lock_init(&sc->alloc_lock);
+ spin_lock_init(&sc->release_lock);
+ spin_lock_init(&sc->credit_ctrl_lock);
++ seqlock_init(&sc->waitlock);
+ INIT_LIST_HEAD(&sc->piowait);
+ INIT_WORK(&sc->halt_work, sc_halted);
+ init_waitqueue_head(&sc->halt_wait);
+@@ -1593,7 +1594,6 @@ void hfi1_sc_wantpiobuf_intr(struct send
+ static void sc_piobufavail(struct send_context *sc)
+ {
+ struct hfi1_devdata *dd = sc->dd;
+- struct hfi1_ibdev *dev = &dd->verbs_dev;
+ struct list_head *list;
+ struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE];
+ struct rvt_qp *qp;
+@@ -1612,7 +1612,7 @@ static void sc_piobufavail(struct send_c
+ * could end up with QPs on the wait list with the interrupt
+ * disabled.
+ */
+- write_seqlock_irqsave(&dev->iowait_lock, flags);
++ write_seqlock_irqsave(&sc->waitlock, flags);
+ while (!list_empty(list)) {
+ struct iowait *wait;
+
+@@ -1636,7 +1636,7 @@ static void sc_piobufavail(struct send_c
+ if (!list_empty(list))
+ hfi1_sc_wantpiobuf_intr(sc, 1);
+ }
+- write_sequnlock_irqrestore(&dev->iowait_lock, flags);
++ write_sequnlock_irqrestore(&sc->waitlock, flags);
+
+ /* Wake up the most starved one first */
+ if (n)
+--- a/drivers/infiniband/hw/hfi1/pio.h
++++ b/drivers/infiniband/hw/hfi1/pio.h
+@@ -127,6 +127,8 @@ struct send_context {
+ volatile __le64 *hw_free; /* HW free counter */
+ /* list for PIO waiters */
+ struct list_head piowait ____cacheline_aligned_in_smp;
++ seqlock_t waitlock;
++
+ spinlock_t credit_ctrl_lock ____cacheline_aligned_in_smp;
+ u32 credit_intr_count; /* count of credit intr users */
+ u64 credit_ctrl; /* cache for credit control */
+--- a/drivers/infiniband/hw/hfi1/qp.c
++++ b/drivers/infiniband/hw/hfi1/qp.c
+@@ -375,20 +375,18 @@ bool _hfi1_schedule_send(struct rvt_qp *
+
+ static void qp_pio_drain(struct rvt_qp *qp)
+ {
+- struct hfi1_ibdev *dev;
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ if (!priv->s_sendcontext)
+ return;
+- dev = to_idev(qp->ibqp.device);
+ while (iowait_pio_pending(&priv->s_iowait)) {
+- write_seqlock_irq(&dev->iowait_lock);
++ write_seqlock_irq(&priv->s_sendcontext->waitlock);
+ hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 1);
+- write_sequnlock_irq(&dev->iowait_lock);
++ write_sequnlock_irq(&priv->s_sendcontext->waitlock);
+ iowait_pio_drain(&priv->s_iowait);
+- write_seqlock_irq(&dev->iowait_lock);
++ write_seqlock_irq(&priv->s_sendcontext->waitlock);
+ hfi1_sc_wantpiobuf_intr(priv->s_sendcontext, 0);
+- write_sequnlock_irq(&dev->iowait_lock);
++ write_sequnlock_irq(&priv->s_sendcontext->waitlock);
+ }
+ }
+
+@@ -459,7 +457,6 @@ static int iowait_sleep(
+ struct hfi1_qp_priv *priv;
+ unsigned long flags;
+ int ret = 0;
+- struct hfi1_ibdev *dev;
+
+ qp = tx->qp;
+ priv = qp->priv;
+@@ -472,9 +469,8 @@ static int iowait_sleep(
+ * buffer and undoing the side effects of the copy.
+ */
+ /* Make a common routine? */
+- dev = &sde->dd->verbs_dev;
+ list_add_tail(&stx->list, &wait->tx_head);
+- write_seqlock(&dev->iowait_lock);
++ write_seqlock(&sde->waitlock);
+ if (sdma_progress(sde, seq, stx))
+ goto eagain;
+ if (list_empty(&priv->s_iowait.list)) {
+@@ -485,11 +481,11 @@ static int iowait_sleep(
+ qp->s_flags |= RVT_S_WAIT_DMA_DESC;
+ iowait_queue(pkts_sent, &priv->s_iowait,
+ &sde->dmawait);
+- priv->s_iowait.lock = &dev->iowait_lock;
++ priv->s_iowait.lock = &sde->waitlock;
+ trace_hfi1_qpsleep(qp, RVT_S_WAIT_DMA_DESC);
+ rvt_get_qp(qp);
+ }
+- write_sequnlock(&dev->iowait_lock);
++ write_sequnlock(&sde->waitlock);
+ hfi1_qp_unbusy(qp, wait);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ ret = -EBUSY;
+@@ -499,7 +495,7 @@ static int iowait_sleep(
+ }
+ return ret;
+ eagain:
+- write_sequnlock(&dev->iowait_lock);
++ write_sequnlock(&sde->waitlock);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ list_del_init(&stx->list);
+ return -EAGAIN;
+--- a/drivers/infiniband/hw/hfi1/sdma.c
++++ b/drivers/infiniband/hw/hfi1/sdma.c
+@@ -1424,6 +1424,7 @@ int sdma_init(struct hfi1_devdata *dd, u
+ seqlock_init(&sde->head_lock);
+ spin_lock_init(&sde->senddmactrl_lock);
+ spin_lock_init(&sde->flushlist_lock);
++ seqlock_init(&sde->waitlock);
+ /* insure there is always a zero bit */
+ sde->ahg_bits = 0xfffffffe00000000ULL;
+
+@@ -1757,7 +1758,6 @@ static void sdma_desc_avail(struct sdma_
+ struct iowait *wait, *nw;
+ struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
+ uint i, n = 0, seq, max_idx = 0;
+- struct hfi1_ibdev *dev = &sde->dd->verbs_dev;
+ u8 max_starved_cnt = 0;
+
+ #ifdef CONFIG_SDMA_VERBOSITY
+@@ -1767,10 +1767,10 @@ static void sdma_desc_avail(struct sdma_
+ #endif
+
+ do {
+- seq = read_seqbegin(&dev->iowait_lock);
++ seq = read_seqbegin(&sde->waitlock);
+ if (!list_empty(&sde->dmawait)) {
+ /* at least one item */
+- write_seqlock(&dev->iowait_lock);
++ write_seqlock(&sde->waitlock);
+ /* Harvest waiters wanting DMA descriptors */
+ list_for_each_entry_safe(
+ wait,
+@@ -1793,10 +1793,10 @@ static void sdma_desc_avail(struct sdma_
+ list_del_init(&wait->list);
+ waits[n++] = wait;
+ }
+- write_sequnlock(&dev->iowait_lock);
++ write_sequnlock(&sde->waitlock);
+ break;
+ }
+- } while (read_seqretry(&dev->iowait_lock, seq));
++ } while (read_seqretry(&sde->waitlock, seq));
+
+ /* Schedule the most starved one first */
+ if (n)
+--- a/drivers/infiniband/hw/hfi1/sdma.h
++++ b/drivers/infiniband/hw/hfi1/sdma.h
+@@ -382,6 +382,7 @@ struct sdma_engine {
+ u64 progress_int_cnt;
+
+ /* private: */
++ seqlock_t waitlock;
+ struct list_head dmawait;
+
+ /* CONFIG SDMA for now, just blindly duplicate */
+--- a/drivers/infiniband/hw/hfi1/user_sdma.c
++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
+@@ -130,7 +130,6 @@ static int defer_packet_queue(
+ {
+ struct hfi1_user_sdma_pkt_q *pq =
+ container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
+- struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
+ struct user_sdma_txreq *tx =
+ container_of(txreq, struct user_sdma_txreq, txreq);
+
+@@ -144,10 +143,10 @@ static int defer_packet_queue(
+ * it is supposed to be enqueued.
+ */
+ xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
+- write_seqlock(&dev->iowait_lock);
++ write_seqlock(&sde->waitlock);
+ if (list_empty(&pq->busy.list))
+ iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
+- write_sequnlock(&dev->iowait_lock);
++ write_sequnlock(&sde->waitlock);
+ return -EBUSY;
+ eagain:
+ return -EAGAIN;
+--- a/drivers/infiniband/hw/hfi1/verbs.c
++++ b/drivers/infiniband/hw/hfi1/verbs.c
+@@ -765,7 +765,6 @@ static int pio_wait(struct rvt_qp *qp,
+ {
+ struct hfi1_qp_priv *priv = qp->priv;
+ struct hfi1_devdata *dd = sc->dd;
+- struct hfi1_ibdev *dev = &dd->verbs_dev;
+ unsigned long flags;
+ int ret = 0;
+
+@@ -777,7 +776,7 @@ static int pio_wait(struct rvt_qp *qp,
+ */
+ spin_lock_irqsave(&qp->s_lock, flags);
+ if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
+- write_seqlock(&dev->iowait_lock);
++ write_seqlock(&sc->waitlock);
+ list_add_tail(&ps->s_txreq->txreq.list,
+ &ps->wait->tx_head);
+ if (list_empty(&priv->s_iowait.list)) {
+@@ -790,14 +789,14 @@ static int pio_wait(struct rvt_qp *qp,
+ was_empty = list_empty(&sc->piowait);
+ iowait_queue(ps->pkts_sent, &priv->s_iowait,
+ &sc->piowait);
+- priv->s_iowait.lock = &dev->iowait_lock;
++ priv->s_iowait.lock = &sc->waitlock;
+ trace_hfi1_qpsleep(qp, RVT_S_WAIT_PIO);
+ rvt_get_qp(qp);
+ /* counting: only call wantpiobuf_intr if first user */
+ if (was_empty)
+ hfi1_sc_wantpiobuf_intr(sc, 1);
+ }
+- write_sequnlock(&dev->iowait_lock);
++ write_sequnlock(&sc->waitlock);
+ hfi1_qp_unbusy(qp, ps->wait);
+ ret = -EBUSY;
+ }
+--- a/drivers/infiniband/hw/hfi1/vnic_sdma.c
++++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c
+@@ -232,18 +232,17 @@ static int hfi1_vnic_sdma_sleep(struct s
+ {
+ struct hfi1_vnic_sdma *vnic_sdma =
+ container_of(wait->iow, struct hfi1_vnic_sdma, wait);
+- struct hfi1_ibdev *dev = &vnic_sdma->dd->verbs_dev;
+
+- write_seqlock(&dev->iowait_lock);
++ write_seqlock(&sde->waitlock);
+ if (sdma_progress(sde, seq, txreq)) {
+- write_sequnlock(&dev->iowait_lock);
++ write_sequnlock(&sde->waitlock);
+ return -EAGAIN;
+ }
+
+ vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
+ if (list_empty(&vnic_sdma->wait.list))
+ iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
+- write_sequnlock(&dev->iowait_lock);
++ write_sequnlock(&sde->waitlock);
+ return -EBUSY;
+ }
+
diff --git a/patches.drivers/IB-hfi1-Remove-WARN_ON-when-freeing-expected-receive.patch b/patches.drivers/IB-hfi1-Remove-WARN_ON-when-freeing-expected-receive.patch
new file mode 100644
index 0000000000..2a7f0aadec
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Remove-WARN_ON-when-freeing-expected-receive.patch
@@ -0,0 +1,40 @@
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Mon, 18 Mar 2019 09:59:00 -0700
+Subject: IB/hfi1: Remove WARN_ON when freeing expected receive groups
+Patch-mainline: v5.2-rc1
+Git-commit: 8da0f0f26f80612efadc23beb72d5b66a498a386
+References: jsc#SLE-4925
+
+When PSM user receive context is freed, the expected receive groups
+allocated by the receive context will also been freed. However, if there
+are still TID entries in use, the receive groups rcd->tid_full_list or
+rcd->tid_used_list will not be empty, and thus triggering the WARN_ONs in
+the function hfi1_free_ctxt_rcv_groups(). Even if the two lists may not
+be empty, the hfi1 driver will free all TID entries and receive groups
+associated with the receive context to prevent any resource leakage. Since
+a clean user application exit is not controlled by the hfi1 driver, this
+patch will remove the WARN_ONs in hfi1_free_ctxt_rcv_groups().
+
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/exp_rcv.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/exp_rcv.c
++++ b/drivers/infiniband/hw/hfi1/exp_rcv.c
+@@ -112,9 +112,6 @@ int hfi1_alloc_ctxt_rcv_groups(struct hf
+ */
+ void hfi1_free_ctxt_rcv_groups(struct hfi1_ctxtdata *rcd)
+ {
+- WARN_ON(!EXP_TID_SET_EMPTY(rcd->tid_full_list));
+- WARN_ON(!EXP_TID_SET_EMPTY(rcd->tid_used_list));
+-
+ kfree(rcd->groups);
+ rcd->groups = NULL;
+ hfi1_exp_tid_group_init(rcd);
diff --git a/patches.drivers/IB-hfi1-Remove-overly-conservative-VM_EXEC-flag-chec.patch b/patches.drivers/IB-hfi1-Remove-overly-conservative-VM_EXEC-flag-chec.patch
new file mode 100644
index 0000000000..ae2c29b865
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Remove-overly-conservative-VM_EXEC-flag-chec.patch
@@ -0,0 +1,44 @@
+From: "Michael J. Ruhl" <michael.j.ruhl@intel.com>
+Date: Thu, 17 Jan 2019 12:42:04 -0800
+Subject: IB/hfi1: Remove overly conservative VM_EXEC flag check
+Patch-mainline: v5.0-rc5
+Git-commit: 7709b0dc265f28695487712c45f02bbd1f98415d
+References: jsc#SLE-4925
+
+Applications that use the stack for execution purposes cause userspace PSM
+jobs to fail during mmap().
+
+Both Fortran (non-standard format parsing) and C (callback functions
+located in the stack) applications can be written such that stack
+execution is required. The linker notes this via the gnu_stack ELF flag.
+
+This causes READ_IMPLIES_EXEC to be set which forces all PROT_READ mmaps
+to have PROT_EXEC for the process.
+
+Checking for VM_EXEC bit and failing the request with EPERM is overly
+conservative and will break any PSM application using executable stacks.
+
+Cc: <stable@vger.kernel.org> #v4.14+
+Fixes: 12220267645c ("IB/hfi: Protect against writable mmap")
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Reviewed-by: Ira Weiny <ira.weiny@intel.com>
+Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/file_ops.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hfi1/file_ops.c
++++ b/drivers/infiniband/hw/hfi1/file_ops.c
+@@ -488,7 +488,7 @@ static int hfi1_file_mmap(struct file *f
+ vmf = 1;
+ break;
+ case STATUS:
+- if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) {
++ if (flags & VM_WRITE) {
+ ret = -EPERM;
+ goto done;
+ }
diff --git a/patches.drivers/IB-hfi1-Unreserve-a-reserved-request-when-it-is-comp.patch b/patches.drivers/IB-hfi1-Unreserve-a-reserved-request-when-it-is-comp.patch
new file mode 100644
index 0000000000..051e64350b
--- /dev/null
+++ b/patches.drivers/IB-hfi1-Unreserve-a-reserved-request-when-it-is-comp.patch
@@ -0,0 +1,41 @@
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Wed, 28 Nov 2018 10:22:09 -0800
+Subject: IB/hfi1: Unreserve a reserved request when it is completed
+Patch-mainline: v5.0-rc1
+Git-commit: ca95f802ef5139722acc8d30aeaab6fe5bbe939e
+References: jsc#SLE-4925
+
+Currently, When a reserved operation is completed, its entry in the send
+queue will not be unreserved, which leads to the miscalculation of
+qp->s_avail and thus the triggering of a WARN_ON call trace. This patch
+fixes the problem by unreserving the reserved operation when it is
+completed.
+
+Fixes: 856cc4c237ad ("IB/hfi1: Add the capability for reserved operations")
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/rc.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/infiniband/hw/hfi1/rc.c
++++ b/drivers/infiniband/hw/hfi1/rc.c
+@@ -1157,6 +1157,7 @@ void hfi1_rc_send_complete(struct rvt_qp
+ if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
+ cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
+ break;
++ rvt_qp_wqe_unreserve(qp, wqe);
+ s_last = qp->s_last;
+ trace_hfi1_qp_send_completion(qp, wqe, s_last);
+ if (++s_last >= qp->s_size)
+@@ -1209,6 +1210,7 @@ static struct rvt_swqe *do_rc_completion
+ u32 s_last;
+
+ rvt_put_swqe(wqe);
++ rvt_qp_wqe_unreserve(qp, wqe);
+ s_last = qp->s_last;
+ trace_hfi1_qp_send_completion(qp, wqe, s_last);
+ if (++s_last >= qp->s_size)
diff --git a/patches.drivers/IB-hw-Remove-unneeded-semicolons.patch b/patches.drivers/IB-hw-Remove-unneeded-semicolons.patch
new file mode 100644
index 0000000000..60675cfddf
--- /dev/null
+++ b/patches.drivers/IB-hw-Remove-unneeded-semicolons.patch
@@ -0,0 +1,102 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Fri, 18 Jan 2019 11:09:00 +0800
+Subject: IB/hw: Remove unneeded semicolons
+Patch-mainline: v5.1-rc1
+Git-commit: 790b57f686e29b93616b3d13b38043f5ec29fa0a
+References: bsc#1136456 jsc#SLE-4689
+
+Remove unneeded semicolons.
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +-
+ drivers/infiniband/hw/qedr/qedr_iw_cm.c | 2 +-
+ drivers/infiniband/hw/qedr/verbs.c | 14 +++++++-------
+ 3 files changed, 9 insertions(+), 9 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -4568,7 +4568,7 @@ static int hns_roce_v2_aeq_int(struct hn
+ dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
+ event_type, eq->eqn, eq->cons_index);
+ break;
+- };
++ }
+
+ eq->event_type = event_type;
+ eq->sub_type = sub_type;
+--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+@@ -349,7 +349,7 @@ qedr_iw_event_handler(void *context, str
+ default:
+ DP_NOTICE(dev, "Unknown event received %d\n", params->event);
+ break;
+- };
++ }
+ return 0;
+ }
+
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -2128,7 +2128,7 @@ static int qedr_update_qp_state(struct q
+ default:
+ status = -EINVAL;
+ break;
+- };
++ }
+ break;
+ case QED_ROCE_QP_STATE_INIT:
+ switch (new_state) {
+@@ -2149,7 +2149,7 @@ static int qedr_update_qp_state(struct q
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+- };
++ }
+ break;
+ case QED_ROCE_QP_STATE_RTR:
+ /* RTR->XXX */
+@@ -2162,7 +2162,7 @@ static int qedr_update_qp_state(struct q
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+- };
++ }
+ break;
+ case QED_ROCE_QP_STATE_RTS:
+ /* RTS->XXX */
+@@ -2175,7 +2175,7 @@ static int qedr_update_qp_state(struct q
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+- };
++ }
+ break;
+ case QED_ROCE_QP_STATE_SQD:
+ /* SQD->XXX */
+@@ -2187,7 +2187,7 @@ static int qedr_update_qp_state(struct q
+ /* Invalid state change. */
+ status = -EINVAL;
+ break;
+- };
++ }
+ break;
+ case QED_ROCE_QP_STATE_ERR:
+ /* ERR->XXX */
+@@ -2205,12 +2205,12 @@ static int qedr_update_qp_state(struct q
+ default:
+ status = -EINVAL;
+ break;
+- };
++ }
+ break;
+ default:
+ status = -EINVAL;
+ break;
+- };
++ }
+
+ return status;
+ }
diff --git a/patches.drivers/IB-rdmavt-Add-wc_flags-and-wc_immdata-to-cq-entry-tr.patch b/patches.drivers/IB-rdmavt-Add-wc_flags-and-wc_immdata-to-cq-entry-tr.patch
new file mode 100644
index 0000000000..5b543aeba1
--- /dev/null
+++ b/patches.drivers/IB-rdmavt-Add-wc_flags-and-wc_immdata-to-cq-entry-tr.patch
@@ -0,0 +1,59 @@
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Thu, 17 Jan 2019 12:41:43 -0800
+Subject: IB/rdmavt: Add wc_flags and wc_immdata to cq entry trace
+Patch-mainline: v5.1-rc1
+Git-commit: 14e517e4b444a01d871893b1ea817790ee13dc0b
+References: jsc#SLE-4925
+
+These fields were missing from the trace. Add them.
+
+Fixes: c6ad9482fcb8 ("IB/rdmavt: Add tracing for cq entry and poll")
+Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/sw/rdmavt/trace_cq.h | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/sw/rdmavt/trace_cq.h
++++ b/drivers/infiniband/sw/rdmavt/trace_cq.h
+@@ -105,7 +105,7 @@ DEFINE_EVENT(rvt_cq_template, rvt_create
+ TP_ARGS(cq, attr));
+
+ #define CQ_PRN \
+-"[%s] idx %u wr_id %llx status %u opcode %u,%s length %u qpn %x"
++"[%s] idx %u wr_id %llx status %u opcode %u,%s length %u qpn %x flags %x imm %x"
+
+ DECLARE_EVENT_CLASS(
+ rvt_cq_entry_template,
+@@ -119,6 +119,8 @@ DECLARE_EVENT_CLASS(
+ __field(u32, qpn)
+ __field(u32, length)
+ __field(u32, idx)
++ __field(u32, flags)
++ __field(u32, imm)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(cq->rdi)
+@@ -128,6 +130,8 @@ DECLARE_EVENT_CLASS(
+ __entry->length = wc->byte_len;
+ __entry->qpn = wc->qp->qp_num;
+ __entry->idx = idx;
++ __entry->flags = wc->wc_flags;
++ __entry->imm = be32_to_cpu(wc->ex.imm_data);
+ ),
+ TP_printk(
+ CQ_PRN,
+@@ -137,7 +141,9 @@ DECLARE_EVENT_CLASS(
+ __entry->status,
+ __entry->opcode, show_wc_opcode(__entry->opcode),
+ __entry->length,
+- __entry->qpn
++ __entry->qpn,
++ __entry->flags,
++ __entry->imm
+ )
+ );
+
diff --git a/patches.drivers/IB-rdmavt-Fix-frwr-memory-registration.patch b/patches.drivers/IB-rdmavt-Fix-frwr-memory-registration.patch
new file mode 100644
index 0000000000..50dba7113c
--- /dev/null
+++ b/patches.drivers/IB-rdmavt-Fix-frwr-memory-registration.patch
@@ -0,0 +1,80 @@
+From: Josh Collier <josh.d.collier@intel.com>
+Date: Mon, 15 Apr 2019 11:34:22 -0700
+Subject: IB/rdmavt: Fix frwr memory registration
+Patch-mainline: v5.1-rc7
+Git-commit: 7c39f7f671d2acc0a1f39ebbbee4303ad499bbfa
+References: jsc#SLE-4925
+
+Current implementation was not properly handling frwr memory
+registrations. This was uncovered by commit 27f26cec761das ("xprtrdma:
+Plant XID in on-the-wire RDMA offset (FRWR)") in which xprtrdma, which is
+used for NFS over RDMA, started failing as it was the first ULP to modify
+the ib_mr iova resulting in the NFS server getting REMOTE ACCESS ERROR
+when attempting to perform RDMA Writes to the client.
+
+The fix is to properly capture the true iova, offset, and length in the
+call to ib_map_mr_sg, and then update the iova when processing the
+IB_WR_REG_MEM on the send queue.
+
+Fixes: a41081aa5936 ("IB/rdmavt: Add support for ib_map_mr_sg")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Josh Collier <josh.d.collier@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/sw/rdmavt/mr.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+--- a/drivers/infiniband/sw/rdmavt/mr.c
++++ b/drivers/infiniband/sw/rdmavt/mr.c
+@@ -609,11 +609,6 @@ static int rvt_set_page(struct ib_mr *ib
+ if (unlikely(mapped_segs == mr->mr.max_segs))
+ return -ENOMEM;
+
+- if (mr->mr.length == 0) {
+- mr->mr.user_base = addr;
+- mr->mr.iova = addr;
+- }
+-
+ m = mapped_segs / RVT_SEGSZ;
+ n = mapped_segs % RVT_SEGSZ;
+ mr->mr.map[m]->segs[n].vaddr = (void *)addr;
+@@ -631,17 +626,24 @@ static int rvt_set_page(struct ib_mr *ib
+ * @sg_nents: number of entries in sg
+ * @sg_offset: offset in bytes into sg
+ *
++ * Overwrite rvt_mr length with mr length calculated by ib_sg_to_pages.
++ *
+ * Return: number of sg elements mapped to the memory region
+ */
+ int rvt_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
+ {
+ struct rvt_mr *mr = to_imr(ibmr);
++ int ret;
+
+ mr->mr.length = 0;
+ mr->mr.page_shift = PAGE_SHIFT;
+- return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset,
+- rvt_set_page);
++ ret = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rvt_set_page);
++ mr->mr.user_base = ibmr->iova;
++ mr->mr.iova = ibmr->iova;
++ mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
++ mr->mr.length = (size_t)ibmr->length;
++ return ret;
+ }
+
+ /**
+@@ -672,6 +674,7 @@ int rvt_fast_reg_mr(struct rvt_qp *qp, s
+ ibmr->rkey = key;
+ mr->mr.lkey = key;
+ mr->mr.access_flags = access;
++ mr->mr.iova = ibmr->iova;
+ atomic_set(&mr->mr.lkey_invalid, 0);
+
+ return 0;
diff --git a/patches.drivers/IB-rdmavt-Fix-loopback-send-with-invalidate-ordering.patch b/patches.drivers/IB-rdmavt-Fix-loopback-send-with-invalidate-ordering.patch
new file mode 100644
index 0000000000..3475e75e69
--- /dev/null
+++ b/patches.drivers/IB-rdmavt-Fix-loopback-send-with-invalidate-ordering.patch
@@ -0,0 +1,73 @@
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Date: Tue, 26 Feb 2019 08:45:16 -0800
+Subject: IB/rdmavt: Fix loopback send with invalidate ordering
+Patch-mainline: v5.1-rc1
+Git-commit: 38bbc9f0381550d1d227fc57afa08436e36b32fc
+References: jsc#SLE-4925
+
+The IBTA spec notes:
+
+o9-5.2.1: For any HCA which supports SEND with Invalidate, upon receiving
+an IETH, the Invalidate operation must not take place until after the
+normal transport header validation checks have been successfully
+completed.
+
+The rdmavt loopback code does the validation after the invalidate.
+
+Fix by relocating the operation specific logic for all SEND variants until
+after the validity checks.
+
+Cc: <stable@vger.kernel.org> #v4.20+
+Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/sw/rdmavt/qp.c | 26 ++++++++++++++++----------
+ 1 file changed, 16 insertions(+), 10 deletions(-)
+
+--- a/drivers/infiniband/sw/rdmavt/qp.c
++++ b/drivers/infiniband/sw/rdmavt/qp.c
+@@ -2898,18 +2898,8 @@ again:
+ goto send_comp;
+
+ case IB_WR_SEND_WITH_INV:
+- if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
+- wc.wc_flags = IB_WC_WITH_INVALIDATE;
+- wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
+- }
+- goto send;
+-
+ case IB_WR_SEND_WITH_IMM:
+- wc.wc_flags = IB_WC_WITH_IMM;
+- wc.ex.imm_data = wqe->wr.ex.imm_data;
+- /* FALLTHROUGH */
+ case IB_WR_SEND:
+-send:
+ ret = rvt_get_rwqe(qp, false);
+ if (ret < 0)
+ goto op_err;
+@@ -2917,6 +2907,22 @@ send:
+ goto rnr_nak;
+ if (wqe->length > qp->r_len)
+ goto inv_err;
++ switch (wqe->wr.opcode) {
++ case IB_WR_SEND_WITH_INV:
++ if (!rvt_invalidate_rkey(qp,
++ wqe->wr.ex.invalidate_rkey)) {
++ wc.wc_flags = IB_WC_WITH_INVALIDATE;
++ wc.ex.invalidate_rkey =
++ wqe->wr.ex.invalidate_rkey;
++ }
++ break;
++ case IB_WR_SEND_WITH_IMM:
++ wc.wc_flags = IB_WC_WITH_IMM;
++ wc.ex.imm_data = wqe->wr.ex.imm_data;
++ break;
++ default:
++ break;
++ }
+ break;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
diff --git a/patches.drivers/IB-rdmavt-hfi1-Miscellaneous-comment-fixes.patch b/patches.drivers/IB-rdmavt-hfi1-Miscellaneous-comment-fixes.patch
new file mode 100644
index 0000000000..085db2a1ec
--- /dev/null
+++ b/patches.drivers/IB-rdmavt-hfi1-Miscellaneous-comment-fixes.patch
@@ -0,0 +1,40 @@
+From: Kaike Wan <kaike.wan@intel.com>
+Date: Thu, 11 Apr 2019 07:15:49 -0700
+Subject: IB/{rdmavt, hfi1): Miscellaneous comment fixes
+Patch-mainline: v5.2-rc1
+Git-commit: ea752bc5e50a03e337dfa5c8940d357c62300f8a
+References: jsc#SLE-4925
+
+This patch fixes miscellaneous comment errors.
+
+Reviewed-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hfi1/ruc.c | 2 +-
+ include/rdma/rdmavt_qp.h | 1 -
+ 2 files changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hfi1/ruc.c
++++ b/drivers/infiniband/hw/hfi1/ruc.c
+@@ -524,7 +524,7 @@ void _hfi1_do_send(struct work_struct *w
+
+ /**
+ * hfi1_do_send - perform a send on a QP
+- * @work: contains a pointer to the QP
++ * @qp: a pointer to the QP
+ * @in_thread: true if in a workqueue thread
+ *
+ * Process entries in the send work queue until credit or queue is
+--- a/include/rdma/rdmavt_qp.h
++++ b/include/rdma/rdmavt_qp.h
+@@ -83,7 +83,6 @@
+ * RVT_S_WAIT_DMA - waiting for send DMA queue to drain before generating
+ * next send completion entry not via send DMA
+ * RVT_S_WAIT_PIO - waiting for a send buffer to be available
+- * RVT_S_WAIT_PIO_DRAIN - waiting for a qp to drain pio packets
+ * RVT_S_WAIT_TX - waiting for a struct verbs_txreq to be available
+ * RVT_S_WAIT_DMA_DESC - waiting for DMA descriptors to be available
+ * RVT_S_WAIT_KMEM - waiting for kernel memory to be available
diff --git a/patches.drivers/RDMA-cxbg-Use-correct-sizing-on-buffers-holding-page.patch b/patches.drivers/RDMA-cxbg-Use-correct-sizing-on-buffers-holding-page.patch
new file mode 100644
index 0000000000..c9871f2cc1
--- /dev/null
+++ b/patches.drivers/RDMA-cxbg-Use-correct-sizing-on-buffers-holding-page.patch
@@ -0,0 +1,46 @@
+From: Shiraz Saleem <shiraz.saleem@intel.com>
+Date: Thu, 28 Mar 2019 11:49:44 -0500
+Subject: RDMA/cxbg: Use correct sizing on buffers holding page DMA addresses
+Patch-mainline: v5.2-rc1
+Git-commit: 5f818d676ac455bbc812ffaaf5bf780be5465114
+References: bsc#1136348 jsc#SLE-4684
+
+The PBL array that hold the page DMA address is sized off umem->nmap.
+This can potentially cause out of bound accesses on the PBL array when
+iterating the umem DMA-mapped SGL. This is because if umem pages are
+combined, umem->nmap can be much lower than the number of system pages
+in umem.
+
+Use ib_umem_num_pages() to size this array.
+
+Cc: Potnuri Bharat Teja <bharat@chelsio.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/cxgb3/iwch_provider.c | 2 +-
+ drivers/infiniband/hw/cxgb4/mem.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
++++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
+@@ -549,7 +549,7 @@ static struct ib_mr *iwch_reg_user_mr(st
+
+ shift = mhp->umem->page_shift;
+
+- n = mhp->umem->nmap;
++ n = ib_umem_num_pages(mhp->umem);
+
+ err = iwch_alloc_pbl(mhp, n);
+ if (err)
+--- a/drivers/infiniband/hw/cxgb4/mem.c
++++ b/drivers/infiniband/hw/cxgb4/mem.c
+@@ -543,7 +543,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib
+
+ shift = mhp->umem->page_shift;
+
+- n = mhp->umem->nmap;
++ n = ib_umem_num_pages(mhp->umem);
+ err = alloc_pbl(mhp, n);
+ if (err)
+ goto err_umem_release;
diff --git a/patches.drivers/RDMA-cxgb4-Fix-null-pointer-dereference-on-alloc_skb.patch b/patches.drivers/RDMA-cxgb4-Fix-null-pointer-dereference-on-alloc_skb.patch
new file mode 100644
index 0000000000..ddfd755781
--- /dev/null
+++ b/patches.drivers/RDMA-cxgb4-Fix-null-pointer-dereference-on-alloc_skb.patch
@@ -0,0 +1,33 @@
+From: Colin Ian King <colin.king@canonical.com>
+Date: Sat, 13 Apr 2019 17:00:26 +0100
+Subject: RDMA/cxgb4: Fix null pointer dereference on alloc_skb failure
+Patch-mainline: v5.2-rc1
+Git-commit: a6d2a5a92e67d151c98886babdc86d530d27111c
+References: bsc#1136348 jsc#SLE-4684
+
+Currently if alloc_skb fails to allocate the skb a null skb is passed to
+t4_set_arp_err_handler and this ends up dereferencing the null skb. Avoid
+the NULL pointer dereference by checking for a NULL skb and returning
+early.
+
+Addresses-Coverity: ("Dereference null return")
+Fixes: b38a0ad8ec11 ("RDMA/cxgb4: Set arp error handler for PASS_ACCEPT_RPL messages")
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Acked-by: Potnuri Bharat Teja <bharat@chelsio.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/cxgb4/cm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -459,6 +459,8 @@ static struct sk_buff *get_skb(struct sk
+ skb_reset_transport_header(skb);
+ } else {
+ skb = alloc_skb(len, gfp);
++ if (!skb)
++ return NULL;
+ }
+ t4_set_arp_err_handler(skb, NULL, NULL);
+ return skb;
diff --git a/patches.drivers/RDMA-cxgb4-Fix-spelling-mistake-immedate-immediate.patch b/patches.drivers/RDMA-cxgb4-Fix-spelling-mistake-immedate-immediate.patch
new file mode 100644
index 0000000000..c86e1b16f6
--- /dev/null
+++ b/patches.drivers/RDMA-cxgb4-Fix-spelling-mistake-immedate-immediate.patch
@@ -0,0 +1,27 @@
+From: Colin Ian King <colin.king@canonical.com>
+Date: Tue, 16 Apr 2019 15:38:04 +0100
+Subject: RDMA/cxgb4: Fix spelling mistake "immedate" -> "immediate"
+Patch-mainline: v5.2-rc1
+Git-commit: ff5eefe6d3a3a2cd93b71165741ebdeda6d58e1d
+References: bsc#1136348 jsc#SLE-4684
+
+There is a spelling mistake in a module parameter description. Fix it.
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/cxgb4/qp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -56,7 +56,7 @@ MODULE_PARM_DESC(db_coalescing_threshold
+
+ static int max_fr_immd = T4_MAX_FR_IMMD;
+ module_param(max_fr_immd, int, 0644);
+-MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
++MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immediate");
+
+ static int alloc_ird(struct c4iw_dev *dev, u32 ird)
+ {
diff --git a/patches.drivers/RDMA-cxgb4-Remove-kref-accounting-for-sync-operation.patch b/patches.drivers/RDMA-cxgb4-Remove-kref-accounting-for-sync-operation.patch
new file mode 100644
index 0000000000..3af9c69696
--- /dev/null
+++ b/patches.drivers/RDMA-cxgb4-Remove-kref-accounting-for-sync-operation.patch
@@ -0,0 +1,112 @@
+From: Leon Romanovsky <leonro@mellanox.com>
+Date: Tue, 12 Feb 2019 20:39:15 +0200
+Subject: RDMA/cxgb4: Remove kref accounting for sync operation
+Patch-mainline: v5.1-rc1
+Git-commit: cfe876d8e6b0491170d44c8040c518b121957104
+References: bsc#1136348 jsc#SLE-4684
+
+Ucontext allocation and release aren't async events and don't need kref
+accounting. The common layer of RDMA subsystem ensures that dealloc
+ucontext will be called after all other objects are released.
+
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Reviewed-by: Steve Wise <swise@opengridcomputing.com>
+Tested-by: Raju Rangoju <rajur@chelsio.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 13 -------------
+ drivers/infiniband/hw/cxgb4/provider.c | 16 +++-------------
+ drivers/infiniband/hw/cxgb4/qp.c | 3 ---
+ 3 files changed, 3 insertions(+), 29 deletions(-)
+
+--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
++++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+@@ -590,7 +590,6 @@ struct c4iw_ucontext {
+ u32 key;
+ spinlock_t mmap_lock;
+ struct list_head mmaps;
+- struct kref kref;
+ bool is_32b_cqe;
+ };
+
+@@ -599,18 +598,6 @@ static inline struct c4iw_ucontext *to_c
+ return container_of(c, struct c4iw_ucontext, ibucontext);
+ }
+
+-void _c4iw_free_ucontext(struct kref *kref);
+-
+-static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
+-{
+- kref_put(&ucontext->kref, _c4iw_free_ucontext);
+-}
+-
+-static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
+-{
+- kref_get(&ucontext->kref);
+-}
+-
+ struct c4iw_mm_entry {
+ struct list_head entry;
+ u64 addr;
+--- a/drivers/infiniband/hw/cxgb4/provider.c
++++ b/drivers/infiniband/hw/cxgb4/provider.c
+@@ -58,28 +58,19 @@ static int fastreg_support = 1;
+ module_param(fastreg_support, int, 0644);
+ MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
+
+-void _c4iw_free_ucontext(struct kref *kref)
++static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+ {
+- struct c4iw_ucontext *ucontext;
++ struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+ struct c4iw_dev *rhp;
+ struct c4iw_mm_entry *mm, *tmp;
+
+- ucontext = container_of(kref, struct c4iw_ucontext, kref);
++ pr_debug("context %p\n", context);
+ rhp = to_c4iw_dev(ucontext->ibucontext.device);
+
+- pr_debug("ucontext %p\n", ucontext);
+ list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
+ kfree(mm);
+ c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
+ kfree(ucontext);
+-}
+-
+-static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+-{
+- struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+-
+- pr_debug("context %p\n", context);
+- c4iw_put_ucontext(ucontext);
+ return 0;
+ }
+
+@@ -102,7 +93,6 @@ static struct ib_ucontext *c4iw_alloc_uc
+ c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
+ INIT_LIST_HEAD(&context->mmaps);
+ spin_lock_init(&context->mmap_lock);
+- kref_init(&context->kref);
+
+ if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
+ pr_err_once("Warning - downlevel libcxgb4 (non-fatal), device status page disabled\n");
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -903,8 +903,6 @@ static void free_qp_work(struct work_str
+ destroy_qp(&rhp->rdev, &qhp->wq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
+
+- if (ucontext)
+- c4iw_put_ucontext(ucontext);
+ c4iw_put_wr_wait(qhp->wr_waitp);
+ kfree(qhp);
+ }
+@@ -2338,7 +2336,6 @@ struct ib_qp *c4iw_create_qp(struct ib_p
+ insert_mmap(ucontext, ma_sync_key_mm);
+ }
+
+- c4iw_get_ucontext(ucontext);
+ qhp->ucontext = ucontext;
+ }
+ if (!attrs->srq) {
diff --git a/patches.drivers/RDMA-i40iw-Handle-workqueue-allocation-failure.patch b/patches.drivers/RDMA-i40iw-Handle-workqueue-allocation-failure.patch
new file mode 100644
index 0000000000..39e88293ae
--- /dev/null
+++ b/patches.drivers/RDMA-i40iw-Handle-workqueue-allocation-failure.patch
@@ -0,0 +1,90 @@
+From: Kangjie Lu <kjlu@umn.edu>
+Date: Fri, 15 Mar 2019 01:57:14 -0500
+Subject: RDMA/i40iw: Handle workqueue allocation failure
+Patch-mainline: v5.2-rc1
+Git-commit: e2a438bd7116889af36304903b92e56d0f347228
+References: jsc#SLE-4793
+
+alloc_ordered_workqueue may fail and return NULL. The fix captures the
+failure and handles it properly to avoid potential NULL pointer
+dereferences.
+
+Signed-off-by: Kangjie Lu <kjlu@umn.edu>
+Reviewed-by: Shiraz, Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/i40iw/i40iw.h | 2 +-
+ drivers/infiniband/hw/i40iw/i40iw_cm.c | 18 +++++++++++++++---
+ drivers/infiniband/hw/i40iw/i40iw_main.c | 5 ++++-
+ 3 files changed, 20 insertions(+), 5 deletions(-)
+
+--- a/drivers/infiniband/hw/i40iw/i40iw.h
++++ b/drivers/infiniband/hw/i40iw/i40iw.h
+@@ -552,7 +552,7 @@ enum i40iw_status_code i40iw_obj_aligned
+
+ void i40iw_request_reset(struct i40iw_device *iwdev);
+ void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev);
+-void i40iw_setup_cm_core(struct i40iw_device *iwdev);
++int i40iw_setup_cm_core(struct i40iw_device *iwdev);
+ void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core);
+ void i40iw_process_ceq(struct i40iw_device *, struct i40iw_ceq *iwceq);
+ void i40iw_process_aeq(struct i40iw_device *);
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -3237,7 +3237,7 @@ void i40iw_receive_ilq(struct i40iw_sc_v
+ * core
+ * @iwdev: iwarp device structure
+ */
+-void i40iw_setup_cm_core(struct i40iw_device *iwdev)
++int i40iw_setup_cm_core(struct i40iw_device *iwdev)
+ {
+ struct i40iw_cm_core *cm_core = &iwdev->cm_core;
+
+@@ -3257,9 +3257,19 @@ void i40iw_setup_cm_core(struct i40iw_de
+
+ cm_core->event_wq = alloc_ordered_workqueue("iwewq",
+ WQ_MEM_RECLAIM);
++ if (!cm_core->event_wq)
++ goto error;
+
+ cm_core->disconn_wq = alloc_ordered_workqueue("iwdwq",
+ WQ_MEM_RECLAIM);
++ if (!cm_core->disconn_wq)
++ goto error;
++
++ return 0;
++error:
++ i40iw_cleanup_cm_core(&iwdev->cm_core);
++
++ return -ENOMEM;
+ }
+
+ /**
+@@ -3279,8 +3289,10 @@ void i40iw_cleanup_cm_core(struct i40iw_
+ del_timer_sync(&cm_core->tcp_timer);
+ spin_unlock_irqrestore(&cm_core->ht_lock, flags);
+
+- destroy_workqueue(cm_core->event_wq);
+- destroy_workqueue(cm_core->disconn_wq);
++ if (cm_core->event_wq)
++ destroy_workqueue(cm_core->event_wq);
++ if (cm_core->disconn_wq)
++ destroy_workqueue(cm_core->disconn_wq);
+ }
+
+ /**
+--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
+@@ -1641,7 +1641,10 @@ static int i40iw_open(struct i40e_info *
+ iwdev = &hdl->device;
+ iwdev->hdl = hdl;
+ dev = &iwdev->sc_dev;
+- i40iw_setup_cm_core(iwdev);
++ if (i40iw_setup_cm_core(iwdev)) {
++ kfree(iwdev->hdl);
++ return -ENOMEM;
++ }
+
+ dev->back_dev = (void *)iwdev;
+ iwdev->ldev = &hdl->ldev;
diff --git a/patches.drivers/RDMA-iw_cxgb4-Always-disconnect-when-QP-is-transitio.patch b/patches.drivers/RDMA-iw_cxgb4-Always-disconnect-when-QP-is-transitio.patch
new file mode 100644
index 0000000000..d50ffb990d
--- /dev/null
+++ b/patches.drivers/RDMA-iw_cxgb4-Always-disconnect-when-QP-is-transitio.patch
@@ -0,0 +1,39 @@
+From: Potnuri Bharat Teja <bharat@chelsio.com>
+Date: Tue, 2 Apr 2019 14:46:11 +0530
+Subject: RDMA/iw_cxgb4: Always disconnect when QP is transitioning to
+ TERMINATE state
+Patch-mainline: v5.2-rc1
+Git-commit: d2c33370ae73105c7c7df8f7048d20653991b4cb
+References: bsc#1136348 jsc#SLE-4684
+
+On receiving a TERM from tje peer, Host moves the QP to TERMINATE state
+and then moves the adapter out of RDMA mode. After issuing a TERM, peer
+issues a CLOSE and at this point of time if the connectivity between peer
+and host is lost for a significant amount of time, the QP remains in
+TERMINATE state.
+
+Therefore c4iw_modify_qp() needs to initiate a close on entering terminate
+state.
+
+Signed-off-by: Potnuri Bharat Teja <bharat@chelsio.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/cxgb4/qp.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/cxgb4/qp.c
++++ b/drivers/infiniband/hw/cxgb4/qp.c
+@@ -1975,10 +1975,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp,
+ qhp->attr.layer_etype = attrs->layer_etype;
+ qhp->attr.ecode = attrs->ecode;
+ ep = qhp->ep;
++ c4iw_get_ep(&ep->com);
++ disconnect = 1;
+ if (!internal) {
+- c4iw_get_ep(&qhp->ep->com);
+ terminate = 1;
+- disconnect = 1;
+ } else {
+ terminate = qhp->attr.send_term;
+ ret = rdma_fini(rhp, qhp, ep);
diff --git a/patches.drivers/RDMA-iwcm-add-tos_set-bool-to-iw_cm-struct.patch b/patches.drivers/RDMA-iwcm-add-tos_set-bool-to-iw_cm-struct.patch
new file mode 100644
index 0000000000..5d0eeb8fa4
--- /dev/null
+++ b/patches.drivers/RDMA-iwcm-add-tos_set-bool-to-iw_cm-struct.patch
@@ -0,0 +1,47 @@
+From: Steve Wise <swise@opengridcomputing.com>
+Date: Fri, 1 Feb 2019 12:44:32 -0800
+Subject: RDMA/iwcm: add tos_set bool to iw_cm struct
+Patch-mainline: v5.1-rc1
+Git-commit: 926ba19b3574f6a80823a42484877ed65e91da9c
+References: bsc#1136348 jsc#SLE-4684
+
+This allows drivers to know the tos was actively set by the application.
+
+Signed-off-by: Steve Wise <swise@opengridcomputing.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/core/cma.c | 2 ++
+ include/rdma/iw_cm.h | 3 ++-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -2407,6 +2407,7 @@ static int cma_iw_listen(struct rdma_id_
+ return PTR_ERR(id);
+
+ id->tos = id_priv->tos;
++ id->tos_set = id_priv->tos_set;
+ id_priv->cm_id.iw = id;
+
+ memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
+@@ -3795,6 +3796,7 @@ static int cma_connect_iw(struct rdma_id
+ return PTR_ERR(cm_id);
+
+ cm_id->tos = id_priv->tos;
++ cm_id->tos_set = id_priv->tos_set;
+ id_priv->cm_id.iw = cm_id;
+
+ memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
+--- a/include/rdma/iw_cm.h
++++ b/include/rdma/iw_cm.h
+@@ -94,7 +94,8 @@ struct iw_cm_id {
+ void (*add_ref)(struct iw_cm_id *);
+ void (*rem_ref)(struct iw_cm_id *);
+ u8 tos;
+- bool mapped;
++ bool tos_set:1;
++ bool mapped:1;
+ };
+
+ struct iw_cm_conn_param {
diff --git a/patches.drivers/RDMA-qedr-Fix-out-of-bounds-index-check-in-query-pke.patch b/patches.drivers/RDMA-qedr-Fix-out-of-bounds-index-check-in-query-pke.patch
new file mode 100644
index 0000000000..295ca5ef83
--- /dev/null
+++ b/patches.drivers/RDMA-qedr-Fix-out-of-bounds-index-check-in-query-pke.patch
@@ -0,0 +1,30 @@
+From: Gal Pressman <galpress@amazon.com>
+Date: Mon, 7 Jan 2019 17:27:56 +0200
+Subject: RDMA/qedr: Fix out of bounds index check in query pkey
+Patch-mainline: v5.1-rc1
+Git-commit: dbe30dae487e1a232158c24b432d45281c2805b7
+References: bsc#1136456 jsc#SLE-4689
+
+The pkey table size is QEDR_ROCE_PKEY_TABLE_LEN, index should be tested
+for >= QEDR_ROCE_PKEY_TABLE_LEN instead of > QEDR_ROCE_PKEY_TABLE_LEN.
+
+Fixes: a7efd7773e31 ("qedr: Add support for PD,PKEY and CQ verbs")
+Signed-off-by: Gal Pressman <galpress@amazon.com>
+Acked-by: Michal Kalderon <michal.kalderon@marvell.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/qedr/verbs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -67,7 +67,7 @@ static inline int qedr_ib_copy_to_udata(
+
+ int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+ {
+- if (index > QEDR_ROCE_PKEY_TABLE_LEN)
++ if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
+ return -EINVAL;
+
+ *pkey = QEDR_ROCE_PKEY_DEFAULT;
diff --git a/patches.drivers/RDMA-rdmavt-Use-correct-sizing-on-buffers-holding-pa.patch b/patches.drivers/RDMA-rdmavt-Use-correct-sizing-on-buffers-holding-pa.patch
new file mode 100644
index 0000000000..f74a3d145d
--- /dev/null
+++ b/patches.drivers/RDMA-rdmavt-Use-correct-sizing-on-buffers-holding-pa.patch
@@ -0,0 +1,36 @@
+From: Shiraz Saleem <shiraz.saleem@intel.com>
+Date: Thu, 28 Mar 2019 11:49:47 -0500
+Subject: RDMA/rdmavt: Use correct sizing on buffers holding page DMA addresses
+Patch-mainline: v5.2-rc1
+Git-commit: 629e6f9db6bf4c5702212dd77da534b838f14859
+References: jsc#SLE-4925
+
+The buffer that holds the page DMA addresses is sized off umem->nmap.
+This can potentially cause out of bound accesses on the PBL array when
+iterating the umem DMA-mapped SGL. This is because if umem pages are
+combined, umem->nmap can be much lower than the number of system pages
+in umem.
+
+Use ib_umem_num_pages() to size this buffer.
+
+Cc: Dennis Dalessandro <dennis.dalessandro@intel.com>
+Cc: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Cc: Michael J. Ruhl <michael.j.ruhl@intel.com>
+Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/sw/rdmavt/mr.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/sw/rdmavt/mr.c
++++ b/drivers/infiniband/sw/rdmavt/mr.c
+@@ -393,7 +393,7 @@ struct ib_mr *rvt_reg_user_mr(struct ib_
+ if (IS_ERR(umem))
+ return (void *)umem;
+
+- n = umem->nmap;
++ n = ib_umem_num_pages(umem);
+
+ mr = __rvt_alloc_mr(n, pd);
+ if (IS_ERR(mr)) {
diff --git a/patches.drivers/arm64-fix-ACPI-dependencies.patch b/patches.drivers/arm64-fix-ACPI-dependencies.patch
index a80fe9f45f..5e5b11a20c 100644
--- a/patches.drivers/arm64-fix-ACPI-dependencies.patch
+++ b/patches.drivers/arm64-fix-ACPI-dependencies.patch
@@ -3,7 +3,7 @@ Date: Tue, 24 Jul 2018 11:48:45 +0200
Subject: arm64: fix ACPI dependencies
Git-commit: 2c870e61132c082a03769d2ac0a2849ba33c10e3
Patch-mainline: v4.19-rc1
-References: bsc#1117158
+References: bsc#1117158 bsc#1134671
Kconfig reports a warning on x86 builds after the ARM64 dependency
was added.
diff --git a/patches.drivers/arm64-mm-efi-Account-for-GICv3-LPI-tables-in-static-.patch b/patches.drivers/arm64-mm-efi-Account-for-GICv3-LPI-tables-in-static-.patch
index cca10d7d4a..2c0e1262ea 100644
--- a/patches.drivers/arm64-mm-efi-Account-for-GICv3-LPI-tables-in-static-.patch
+++ b/patches.drivers/arm64-mm-efi-Account-for-GICv3-LPI-tables-in-static-.patch
@@ -4,7 +4,7 @@ Subject: arm64, mm, efi: Account for GICv3 LPI tables in static memblock
reserve table
Git-commit: 8a5b403d71affa098009cc3dff1b2c45113021ad
Patch-mainline: v5.0-rc7
-References: bsc#1117158
+References: bsc#1117158 bsc#1134671
In the irqchip and EFI code, we have what basically amounts to a quirk
to work around a peculiarity in the GICv3 architecture, which permits
diff --git a/patches.drivers/bitmap-Add-bitmap_alloc-bitmap_zalloc-and-bitmap_fre.patch b/patches.drivers/bitmap-Add-bitmap_alloc-bitmap_zalloc-and-bitmap_fre.patch
new file mode 100644
index 0000000000..f0355c1bc9
--- /dev/null
+++ b/patches.drivers/bitmap-Add-bitmap_alloc-bitmap_zalloc-and-bitmap_fre.patch
@@ -0,0 +1,78 @@
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Wed, 1 Aug 2018 15:42:56 -0700
+Subject: bitmap: Add bitmap_alloc(), bitmap_zalloc() and bitmap_free()
+Patch-mainline: v4.19-rc1
+Git-commit: c42b65e363ce97a828f81b59033c3558f8fa7f70
+References: jsc#SLE-4797
+
+A lot of code become ugly because of open coding allocations for bitmaps.
+
+Introduce three helpers to allow users be more clear of intention
+and keep their code neat.
+
+Note, due to multiple circular dependencies we may not provide
+the helpers as inliners. For now we keep them exported and, perhaps,
+at some point in the future we will sort out header inclusion and
+inheritance.
+
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ include/linux/bitmap.h | 8 ++++++++
+ lib/bitmap.c | 20 ++++++++++++++++++++
+ 2 files changed, 28 insertions(+)
+
+--- a/include/linux/bitmap.h
++++ b/include/linux/bitmap.h
+@@ -99,6 +99,14 @@
+ */
+
+ /*
++ * Allocation and deallocation of bitmap.
++ * Provided in lib/bitmap.c to avoid circular dependency.
++ */
++extern unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags);
++extern unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags);
++extern void bitmap_free(const unsigned long *bitmap);
++
++/*
+ * lib/bitmap.c provides these functions:
+ */
+
+--- a/lib/bitmap.c
++++ b/lib/bitmap.c
+@@ -13,6 +13,7 @@
+ #include <linux/bitops.h>
+ #include <linux/bug.h>
+ #include <linux/kernel.h>
++#include <linux/slab.h>
+ #include <linux/string.h>
+ #include <linux/uaccess.h>
+
+@@ -1134,6 +1135,25 @@ void bitmap_copy_le(unsigned long *dst,
+ EXPORT_SYMBOL(bitmap_copy_le);
+ #endif
+
++unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags)
++{
++ return kmalloc_array(BITS_TO_LONGS(nbits), sizeof(unsigned long),
++ flags);
++}
++EXPORT_SYMBOL(bitmap_alloc);
++
++unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags)
++{
++ return bitmap_alloc(nbits, flags | __GFP_ZERO);
++}
++EXPORT_SYMBOL(bitmap_zalloc);
++
++void bitmap_free(const unsigned long *bitmap)
++{
++ kfree(bitmap);
++}
++EXPORT_SYMBOL(bitmap_free);
++
+ #if BITS_PER_LONG == 64
+ /**
+ * bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap
diff --git a/patches.drivers/bnx2x-Add-support-for-detection-of-P2P-event-packets.patch b/patches.drivers/bnx2x-Add-support-for-detection-of-P2P-event-packets.patch
new file mode 100644
index 0000000000..e02a6fd3ef
--- /dev/null
+++ b/patches.drivers/bnx2x-Add-support-for-detection-of-P2P-event-packets.patch
@@ -0,0 +1,60 @@
+From: Sudarsana Reddy Kalluru <skalluru@marvell.com>
+Date: Tue, 16 Apr 2019 01:46:13 -0700
+Subject: bnx2x: Add support for detection of P2P event packets.
+Patch-mainline: v5.2-rc1
+Git-commit: 00165c25fa3e5814f399f9a4fdd998066a06330c
+References: bsc#1136498 jsc#SLE-4699
+
+The patch adds support for detecting the P2P (peer-to-peer) event packets.
+This is required for timestamping the PTP packets in peer delay mode.
+Unmask the below bits (set to 0) for device to detect the p2p packets.
+ NIG_REG_P0/1_LLH_PTP_PARAM_MASK
+ NIG_REG_P0/1_TLLH_PTP_PARAM_MASK
+ bit 1 - IPv4 DA 1 of 224.0.0.107.
+ bit 3 - IPv6 DA 1 of 0xFF02:0:0:0:0:0:0:6B.
+ bit 9 - MAC DA 1 of 0x01-80-C2-00-00-0E.
+ NIG_REG_P0/1_LLH_PTP_RULE_MASK
+ NIG_REG_P0/1_TLLH_PTP_RULE_MASK
+ bit 2 - {IPv4 DA 1; UDP DP 0}
+ bit 6 - MAC Ethertype 0 of 0x88F7.
+ bit 9 - MAC DA 1 of 0x01-80-C2-00-00-0E.
+
+Signed-off-by: Sudarsana Reddy Kalluru <skalluru@marvell.com>
+Signed-off-by: Ariel Elior <aelior@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 22 ++++++++++++----------
+ 1 file changed, 12 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -15384,16 +15384,18 @@ static int bnx2x_enable_ptp_packets(stru
+ return 0;
+ }
+
+-#define BNX2X_PTP_TX_ON_PARAM_MASK 0x6AA
+-#define BNX2X_PTP_TX_ON_RULE_MASK 0x3EEE
+-#define BNX2X_PTP_V1_L4_PARAM_MASK 0x7EE
+-#define BNX2X_PTP_V1_L4_RULE_MASK 0x3FFE
+-#define BNX2X_PTP_V2_L4_PARAM_MASK 0x7EA
+-#define BNX2X_PTP_V2_L4_RULE_MASK 0x3FEE
+-#define BNX2X_PTP_V2_L2_PARAM_MASK 0x6BF
+-#define BNX2X_PTP_V2_L2_RULE_MASK 0x3EFF
+-#define BNX2X_PTP_V2_PARAM_MASK 0x6AA
+-#define BNX2X_PTP_V2_RULE_MASK 0x3EEE
++#define BNX2X_P2P_DETECT_PARAM_MASK 0x5F5
++#define BNX2X_P2P_DETECT_RULE_MASK 0x3DBB
++#define BNX2X_PTP_TX_ON_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
++#define BNX2X_PTP_TX_ON_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
++#define BNX2X_PTP_V1_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EE)
++#define BNX2X_PTP_V1_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FFE)
++#define BNX2X_PTP_V2_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EA)
++#define BNX2X_PTP_V2_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FEE)
++#define BNX2X_PTP_V2_L2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6BF)
++#define BNX2X_PTP_V2_L2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EFF)
++#define BNX2X_PTP_V2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
++#define BNX2X_PTP_V2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
+
+ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
+ {
diff --git a/patches.drivers/bnx2x-Bump-up-driver-version-to-1.713.36.patch b/patches.drivers/bnx2x-Bump-up-driver-version-to-1.713.36.patch
new file mode 100644
index 0000000000..ee6b18b4d5
--- /dev/null
+++ b/patches.drivers/bnx2x-Bump-up-driver-version-to-1.713.36.patch
@@ -0,0 +1,30 @@
+From: Sudarsana Reddy Kalluru <sudarsana.kalluru@cavium.com>
+Date: Tue, 22 Jan 2019 03:05:20 -0800
+Subject: bnx2x: Bump up driver version to 1.713.36
+Patch-mainline: v5.1-rc1
+Git-commit: f116465385344294edda66970734a26f5dd59a93
+References: bsc#1136498 jsc#SLE-4699
+
+Recently, there were bunch of fixes to bnx2x driver, the code is now
+aligned to out-of-box driver version 1.713.36. This patch updates
+bnx2x driver version to 1.713.36.
+
+Signed-off-by: Sudarsana Reddy Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -32,7 +32,7 @@
+ * (you will need to reboot afterwards) */
+ /* #define BNX2X_STOP_ON_ERROR */
+
+-#define DRV_MODULE_VERSION "1.712.30-0"
++#define DRV_MODULE_VERSION "1.713.36-0"
+ #define DRV_MODULE_RELDATE "2014/02/10"
+ #define BNX2X_BC_VER 0x040200
+
diff --git a/patches.drivers/bnx2x-Remove-set-but-not-used-variable-mfw_vn.patch b/patches.drivers/bnx2x-Remove-set-but-not-used-variable-mfw_vn.patch
new file mode 100644
index 0000000000..52c83e7b8e
--- /dev/null
+++ b/patches.drivers/bnx2x-Remove-set-but-not-used-variable-mfw_vn.patch
@@ -0,0 +1,47 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Mon, 18 Feb 2019 12:19:54 +0000
+Subject: bnx2x: Remove set but not used variable 'mfw_vn'
+Patch-mainline: v5.1-rc1
+Git-commit: c9b747dbc2036c917b1067fbb78dc38b105c4454
+References: bsc#1136498 jsc#SLE-4699
+
+Fixes gcc '-Wunused-but-set-variable' warning:
+
+drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c: In function 'bnx2x_get_hwinfo':
+drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c:11940:10: warning:
+ variable 'mfw_vn' set but not used [-Wunused-but-set-variable]
+
+It's never used since introduction.
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Acked-by: Sudarsana Reddy Kalluru <skalluru@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -11998,7 +11998,7 @@ static void validate_set_si_mode(struct
+ static int bnx2x_get_hwinfo(struct bnx2x *bp)
+ {
+ int /*abs*/func = BP_ABS_FUNC(bp);
+- int vn, mfw_vn;
++ int vn;
+ u32 val = 0, val2 = 0;
+ int rc = 0;
+
+@@ -12083,12 +12083,10 @@ static int bnx2x_get_hwinfo(struct bnx2x
+ /*
+ * Initialize MF configuration
+ */
+-
+ bp->mf_ov = 0;
+ bp->mf_mode = 0;
+ bp->mf_sub_mode = 0;
+ vn = BP_VN(bp);
+- mfw_vn = BP_FW_MB_IDX(bp);
+
+ if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
+ BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
diff --git a/patches.drivers/bnx2x-Replace-magic-numbers-with-macro-definitions.patch b/patches.drivers/bnx2x-Replace-magic-numbers-with-macro-definitions.patch
new file mode 100644
index 0000000000..a3cb2d5daf
--- /dev/null
+++ b/patches.drivers/bnx2x-Replace-magic-numbers-with-macro-definitions.patch
@@ -0,0 +1,120 @@
+From: Sudarsana Reddy Kalluru <skalluru@marvell.com>
+Date: Tue, 16 Apr 2019 01:46:12 -0700
+Subject: bnx2x: Replace magic numbers with macro definitions.
+Patch-mainline: v5.2-rc1
+Git-commit: b320532c9990e6d8360fcc6831c33da46289e27d
+References: bsc#1136498 jsc#SLE-4699
+
+This patch performs code cleanup by defining macros for the ptp-timestamp
+filters.
+
+Signed-off-by: Sudarsana Reddy Kalluru <skalluru@marvell.com>
+Signed-off-by: Ariel Elior <aelior@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 50 +++++++++++++----------
+ 1 file changed, 30 insertions(+), 20 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -15384,27 +15384,45 @@ static int bnx2x_enable_ptp_packets(stru
+ return 0;
+ }
+
++#define BNX2X_PTP_TX_ON_PARAM_MASK 0x6AA
++#define BNX2X_PTP_TX_ON_RULE_MASK 0x3EEE
++#define BNX2X_PTP_V1_L4_PARAM_MASK 0x7EE
++#define BNX2X_PTP_V1_L4_RULE_MASK 0x3FFE
++#define BNX2X_PTP_V2_L4_PARAM_MASK 0x7EA
++#define BNX2X_PTP_V2_L4_RULE_MASK 0x3FEE
++#define BNX2X_PTP_V2_L2_PARAM_MASK 0x6BF
++#define BNX2X_PTP_V2_L2_RULE_MASK 0x3EFF
++#define BNX2X_PTP_V2_PARAM_MASK 0x6AA
++#define BNX2X_PTP_V2_RULE_MASK 0x3EEE
++
+ int bnx2x_configure_ptp_filters(struct bnx2x *bp)
+ {
+ int port = BP_PORT(bp);
++ u32 param, rule;
+ int rc;
+
+ if (!bp->hwtstamp_ioctl_called)
+ return 0;
+
++ param = port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
++ NIG_REG_P0_TLLH_PTP_PARAM_MASK;
++ rule = port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
++ NIG_REG_P0_TLLH_PTP_RULE_MASK;
+ switch (bp->tx_type) {
+ case HWTSTAMP_TX_ON:
+ bp->flags |= TX_TIMESTAMPING_EN;
+- REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
+- NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x6AA);
+- REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
+- NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3EEE);
++ REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK);
++ REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK);
+ break;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ BNX2X_ERR("One-step timestamping is not supported\n");
+ return -ERANGE;
+ }
+
++ param = port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
++ NIG_REG_P0_LLH_PTP_PARAM_MASK;
++ rule = port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
++ NIG_REG_P0_LLH_PTP_RULE_MASK;
+ switch (bp->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+@@ -15418,30 +15436,24 @@ int bnx2x_configure_ptp_filters(struct b
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ /* Initialize PTP detection for UDP/IPv4 events */
+- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EE);
+- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFE);
++ REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK);
++ REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
+- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7EA);
+- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FEE);
++ REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK);
++ REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK);
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ /* Initialize PTP detection L2 events */
+- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6BF);
+- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EFF);
++ REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK);
++ REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK);
+
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+@@ -15449,10 +15461,8 @@ int bnx2x_configure_ptp_filters(struct b
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
+- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
+- NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x6AA);
+- REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
+- NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3EEE);
++ REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK);
++ REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK);
+ break;
+ }
+
diff --git a/patches.drivers/bnx2x-Use-struct_size-in-kzalloc.patch b/patches.drivers/bnx2x-Use-struct_size-in-kzalloc.patch
new file mode 100644
index 0000000000..8ecff6d1c8
--- /dev/null
+++ b/patches.drivers/bnx2x-Use-struct_size-in-kzalloc.patch
@@ -0,0 +1,54 @@
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Thu, 7 Feb 2019 21:29:10 -0600
+Subject: bnx2x: Use struct_size() in kzalloc()
+Patch-mainline: v5.1-rc1
+Git-commit: 370600afdd2e33665c84d06f34e7c223d5379b4a
+References: bsc#1136498 jsc#SLE-4699
+
+One of the more common cases of allocation size calculations is finding
+the size of a structure that has a zero-sized array at the end, along
+with memory for some number of elements for that array. For example:
+
+struct foo {
+ int stuff;
+ struct boo entry[];
+};
+
+size = sizeof(struct foo) + count * sizeof(struct boo);
+instance = kzalloc(size, GFP_KERNEL)
+
+Instead of leaving these open-coded and prone to type mistakes, we can
+now use the new struct_size() helper:
+
+instance = kzalloc(struct_size(instance, entry, count), GFP_KERNEL)
+
+Notice that, in this case, variable fsz is not necessary, hence
+it is removed.
+
+This code was detected with the help of Coccinelle.
+
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+@@ -1654,13 +1654,9 @@ static int bnx2x_vf_mbx_macvlan_list(str
+ {
+ int i, j;
+ struct bnx2x_vf_mac_vlan_filters *fl = NULL;
+- size_t fsz;
+
+- fsz = tlv->n_mac_vlan_filters *
+- sizeof(struct bnx2x_vf_mac_vlan_filter) +
+- sizeof(struct bnx2x_vf_mac_vlan_filters);
+-
+- fl = kzalloc(fsz, GFP_KERNEL);
++ fl = kzalloc(struct_size(fl, filters, tlv->n_mac_vlan_filters),
++ GFP_KERNEL);
+ if (!fl)
+ return -ENOMEM;
+
diff --git a/patches.drivers/bnx2x-Utilize-FW-7.13.11.0.patch b/patches.drivers/bnx2x-Utilize-FW-7.13.11.0.patch
new file mode 100644
index 0000000000..d356804a68
--- /dev/null
+++ b/patches.drivers/bnx2x-Utilize-FW-7.13.11.0.patch
@@ -0,0 +1,93 @@
+From: Sudarsana Reddy Kalluru <skalluru@marvell.com>
+Date: Wed, 27 Mar 2019 04:40:43 -0700
+Subject: bnx2x: Utilize FW 7.13.11.0.
+Patch-mainline: v5.2-rc1
+Git-commit: 32705592f944f0f7a3ec58ffd562d828b24f659a
+References: bsc#1136498 jsc#SLE-4699
+
+Commit 8fcf0ec44c11f "bnx2x: Add FW 7.13.11.0" added said .bin FW to
+linux-firmware; This patch incorporates the FW in the bnx2x driver.
+This introduces few FW fixes and the support for Tx VLAN filtering.
+
+Please consider applying it to 'net-next' tree.
+
+Signed-off-by: Sudarsana Reddy Kalluru <skalluru@marvell.com>
+Signed-off-by: Ariel Elior <aelior@marvell.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h | 21 ++++++++++++++-------
+ 1 file changed, 14 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+@@ -3024,7 +3024,7 @@ struct afex_stats {
+
+ #define BCM_5710_FW_MAJOR_VERSION 7
+ #define BCM_5710_FW_MINOR_VERSION 13
+-#define BCM_5710_FW_REVISION_VERSION 1
++#define BCM_5710_FW_REVISION_VERSION 11
+ #define BCM_5710_FW_ENGINEERING_VERSION 0
+ #define BCM_5710_FW_COMPILE_FLAGS 1
+
+@@ -3639,8 +3639,10 @@ struct client_init_rx_data {
+ #define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1
+ #define CLIENT_INIT_RX_DATA_TPA_MODE (0x1<<2)
+ #define CLIENT_INIT_RX_DATA_TPA_MODE_SHIFT 2
+-#define CLIENT_INIT_RX_DATA_RESERVED5 (0x1F<<3)
+-#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 3
++#define CLIENT_INIT_RX_DATA_TPA_OVER_VLAN_DISABLE (0x1<<3)
++#define CLIENT_INIT_RX_DATA_TPA_OVER_VLAN_DISABLE_SHIFT 3
++#define CLIENT_INIT_RX_DATA_RESERVED5 (0xF<<4)
++#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 4
+ u8 vmqueue_mode_en_flg;
+ u8 extra_data_over_sgl_en_flg;
+ u8 cache_line_alignment_log_size;
+@@ -3831,7 +3833,7 @@ struct eth_classify_cmd_header {
+ */
+ struct eth_classify_header {
+ u8 rule_cnt;
+- u8 reserved0;
++ u8 warning_on_error;
+ __le16 reserved1;
+ __le32 echo;
+ };
+@@ -4752,6 +4754,8 @@ struct tpa_update_ramrod_data {
+ __le32 sge_page_base_hi;
+ __le16 sge_pause_thr_low;
+ __le16 sge_pause_thr_high;
++ u8 tpa_over_vlan_disable;
++ u8 reserved[7];
+ };
+
+
+@@ -4946,7 +4950,7 @@ struct fairness_vars_per_port {
+ u32 upper_bound;
+ u32 fair_threshold;
+ u32 fairness_timeout;
+- u32 reserved0;
++ u32 size_thr;
+ };
+
+ /*
+@@ -5415,7 +5419,9 @@ struct function_start_data {
+ u8 sd_vlan_force_pri_val;
+ u8 c2s_pri_tt_valid;
+ u8 c2s_pri_default;
+- u8 reserved2[6];
++ u8 tx_vlan_filtering_enable;
++ u8 tx_vlan_filtering_use_pvid;
++ u8 reserved2[4];
+ struct c2s_pri_trans_table_entry c2s_pri_trans_table;
+ };
+
+@@ -5448,7 +5454,8 @@ struct function_update_data {
+ u8 reserved1;
+ __le16 sd_vlan_tag;
+ __le16 sd_vlan_eth_type;
+- __le16 reserved0;
++ u8 tx_vlan_filtering_pvid_change_flg;
++ u8 reserved0;
+ __le32 reserved2;
+ };
+
diff --git a/patches.drivers/bnx2x-fix-spelling-mistake-dicline-decline.patch b/patches.drivers/bnx2x-fix-spelling-mistake-dicline-decline.patch
new file mode 100644
index 0000000000..9f97379e57
--- /dev/null
+++ b/patches.drivers/bnx2x-fix-spelling-mistake-dicline-decline.patch
@@ -0,0 +1,27 @@
+From: Colin Ian King <colin.king@canonical.com>
+Date: Mon, 15 Apr 2019 16:47:03 +0100
+Subject: bnx2x: fix spelling mistake "dicline" -> "decline"
+Patch-mainline: v5.1-rc6
+Git-commit: 614c70f35cd77a9af8e2ca841dcdb121cec3068f
+References: bsc#1136498 jsc#SLE-4699
+
+There is a spelling mistake in a BNX2X_ERR message, fix it.
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+@@ -957,7 +957,7 @@ int bnx2x_vfpf_update_vlan(struct bnx2x
+ bnx2x_sample_bulletin(bp);
+
+ if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
+- BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
++ BNX2X_ERR("Hypervisor will decline the request, avoiding\n");
+ rc = -EINVAL;
+ goto out;
+ }
diff --git a/patches.drivers/bnx2x-fix-various-indentation-issues.patch b/patches.drivers/bnx2x-fix-various-indentation-issues.patch
new file mode 100644
index 0000000000..617fffc1b3
--- /dev/null
+++ b/patches.drivers/bnx2x-fix-various-indentation-issues.patch
@@ -0,0 +1,324 @@
+From: Colin Ian King <colin.king@canonical.com>
+Date: Mon, 14 Jan 2019 15:15:16 +0000
+Subject: bnx2x: fix various indentation issues
+Patch-mainline: v5.1-rc1
+Git-commit: 9fb0969f75823e59f1af14d587aec279c66bf4a7
+References: bsc#1136498 jsc#SLE-4699
+
+There are lines that have indentation issues, fix these.
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h | 2
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | 178 +++++++++++------------
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 2
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c | 12 -
+ 4 files changed, 95 insertions(+), 99 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+@@ -449,7 +449,7 @@ static inline void bnx2x_init_fw_wrr(con
+ ccd[cos] =
+ (u32)input_data->cos_min_rate[cos] * 100 *
+ (T_FAIR_COEF / (8 * 100 * cosWeightSum));
+- if (ccd[cos] < pdata->fair_vars.fair_threshold
++ if (ccd[cos] < pdata->fair_vars.fair_threshold
+ + MIN_ABOVE_THRESH) {
+ ccd[cos] =
+ pdata->fair_vars.fair_threshold +
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+@@ -837,49 +837,45 @@ static int bnx2x_ets_e3b0_set_cos_bw(str
+
+ switch (cos_entry) {
+ case 0:
+- nig_reg_adress_crd_weight =
+- (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
+- pbf_reg_adress_crd_weight = (port) ?
+- PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
+- break;
++ nig_reg_adress_crd_weight =
++ (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
++ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
++ pbf_reg_adress_crd_weight = (port) ?
++ PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
++ break;
+ case 1:
+- nig_reg_adress_crd_weight = (port) ?
+- NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
+- pbf_reg_adress_crd_weight = (port) ?
+- PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
+- break;
++ nig_reg_adress_crd_weight = (port) ?
++ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
++ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
++ pbf_reg_adress_crd_weight = (port) ?
++ PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
++ break;
+ case 2:
+- nig_reg_adress_crd_weight = (port) ?
+- NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
++ nig_reg_adress_crd_weight = (port) ?
++ NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
++ NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
+
+- pbf_reg_adress_crd_weight = (port) ?
+- PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
+- break;
++ pbf_reg_adress_crd_weight = (port) ?
++ PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
++ break;
+ case 3:
+- if (port)
++ if (port)
+ return -EINVAL;
+- nig_reg_adress_crd_weight =
+- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
+- pbf_reg_adress_crd_weight =
+- PBF_REG_COS3_WEIGHT_P0;
+- break;
++ nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
++ pbf_reg_adress_crd_weight = PBF_REG_COS3_WEIGHT_P0;
++ break;
+ case 4:
+- if (port)
+- return -EINVAL;
+- nig_reg_adress_crd_weight =
+- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
+- pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
+- break;
++ if (port)
++ return -EINVAL;
++ nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
++ pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
++ break;
+ case 5:
+- if (port)
+- return -EINVAL;
+- nig_reg_adress_crd_weight =
+- NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
+- pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0;
+- break;
++ if (port)
++ return -EINVAL;
++ nig_reg_adress_crd_weight = NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
++ pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0;
++ break;
+ }
+
+ REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig);
+@@ -966,7 +962,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_
+ if (pri >= max_num_of_cos) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
+ "parameter Illegal strict priority\n");
+- return -EINVAL;
++ return -EINVAL;
+ }
+
+ if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
+@@ -1845,28 +1841,28 @@ static int bnx2x_emac_enable(struct link
+ bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+ EMAC_TX_MODE_RESET);
+
+- /* pause enable/disable */
+- bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
+- EMAC_RX_MODE_FLOW_EN);
+-
+- bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+- (EMAC_TX_MODE_EXT_PAUSE_EN |
+- EMAC_TX_MODE_FLOW_EN));
+- if (!(params->feature_config_flags &
+- FEATURE_CONFIG_PFC_ENABLED)) {
+- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+- bnx2x_bits_en(bp, emac_base +
+- EMAC_REG_EMAC_RX_MODE,
+- EMAC_RX_MODE_FLOW_EN);
+-
+- if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+- bnx2x_bits_en(bp, emac_base +
+- EMAC_REG_EMAC_TX_MODE,
+- (EMAC_TX_MODE_EXT_PAUSE_EN |
+- EMAC_TX_MODE_FLOW_EN));
+- } else
+- bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+- EMAC_TX_MODE_FLOW_EN);
++ /* pause enable/disable */
++ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
++ EMAC_RX_MODE_FLOW_EN);
++
++ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
++ (EMAC_TX_MODE_EXT_PAUSE_EN |
++ EMAC_TX_MODE_FLOW_EN));
++ if (!(params->feature_config_flags &
++ FEATURE_CONFIG_PFC_ENABLED)) {
++ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
++ bnx2x_bits_en(bp, emac_base +
++ EMAC_REG_EMAC_RX_MODE,
++ EMAC_RX_MODE_FLOW_EN);
++
++ if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
++ bnx2x_bits_en(bp, emac_base +
++ EMAC_REG_EMAC_TX_MODE,
++ (EMAC_TX_MODE_EXT_PAUSE_EN |
++ EMAC_TX_MODE_FLOW_EN));
++ } else
++ bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
++ EMAC_TX_MODE_FLOW_EN);
+
+ /* KEEP_VLAN_TAG, promiscuous */
+ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
+@@ -6478,9 +6474,9 @@ int bnx2x_test_link(struct link_params *
+ MDIO_REG_BANK_GP_STATUS,
+ MDIO_GP_STATUS_TOP_AN_STATUS1,
+ &gp_status);
+- /* Link is up only if both local phy and external phy are up */
+- if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
+- return -ESRCH;
++ /* Link is up only if both local phy and external phy are up */
++ if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
++ return -ESRCH;
+ }
+ /* In XGXS loopback mode, do not check external PHY */
+ if (params->loopback_mode == LOOPBACK_XGXS)
+@@ -7293,8 +7289,8 @@ static int bnx2x_8073_xaui_wa(struct bnx
+ DP(NETIF_MSG_LINK,
+ "XAUI workaround has completed\n");
+ return 0;
+- }
+- usleep_range(3000, 6000);
++ }
++ usleep_range(3000, 6000);
+ }
+ break;
+ }
+@@ -12675,39 +12671,39 @@ static void bnx2x_init_bmac_loopback(str
+ struct link_vars *vars)
+ {
+ struct bnx2x *bp = params->bp;
+- vars->link_up = 1;
+- vars->line_speed = SPEED_10000;
+- vars->duplex = DUPLEX_FULL;
+- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+- vars->mac_type = MAC_TYPE_BMAC;
++ vars->link_up = 1;
++ vars->line_speed = SPEED_10000;
++ vars->duplex = DUPLEX_FULL;
++ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
++ vars->mac_type = MAC_TYPE_BMAC;
+
+- vars->phy_flags = PHY_XGXS_FLAG;
++ vars->phy_flags = PHY_XGXS_FLAG;
+
+- bnx2x_xgxs_deassert(params);
++ bnx2x_xgxs_deassert(params);
+
+- /* Set bmac loopback */
+- bnx2x_bmac_enable(params, vars, 1, 1);
++ /* Set bmac loopback */
++ bnx2x_bmac_enable(params, vars, 1, 1);
+
+- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
++ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
+ }
+
+ static void bnx2x_init_emac_loopback(struct link_params *params,
+ struct link_vars *vars)
+ {
+ struct bnx2x *bp = params->bp;
+- vars->link_up = 1;
+- vars->line_speed = SPEED_1000;
+- vars->duplex = DUPLEX_FULL;
+- vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+- vars->mac_type = MAC_TYPE_EMAC;
++ vars->link_up = 1;
++ vars->line_speed = SPEED_1000;
++ vars->duplex = DUPLEX_FULL;
++ vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
++ vars->mac_type = MAC_TYPE_EMAC;
+
+- vars->phy_flags = PHY_XGXS_FLAG;
++ vars->phy_flags = PHY_XGXS_FLAG;
+
+- bnx2x_xgxs_deassert(params);
+- /* Set bmac loopback */
+- bnx2x_emac_enable(params, vars, 1);
+- bnx2x_emac_program(params, vars);
+- REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
++ bnx2x_xgxs_deassert(params);
++ /* Set bmac loopback */
++ bnx2x_emac_enable(params, vars, 1);
++ bnx2x_emac_program(params, vars);
++ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port * 4, 0);
+ }
+
+ static void bnx2x_init_xmac_loopback(struct link_params *params,
+@@ -13073,12 +13069,12 @@ int bnx2x_link_reset(struct link_params
+ REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
+ }
+
+- if (!CHIP_IS_E3(bp)) {
+- bnx2x_set_bmac_rx(bp, params->chip_id, port, 0);
+- } else {
+- bnx2x_set_xmac_rxtx(params, 0);
+- bnx2x_set_umac_rxtx(params, 0);
+- }
++ if (!CHIP_IS_E3(bp)) {
++ bnx2x_set_bmac_rx(bp, params->chip_id, port, 0);
++ } else {
++ bnx2x_set_xmac_rxtx(params, 0);
++ bnx2x_set_umac_rxtx(params, 0);
++ }
+ /* Disable emac */
+ if (!CHIP_IS_E3(bp))
+ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -11298,7 +11298,7 @@ static void bnx2x_link_settings_supporte
+ dev_info.port_hw_config[port].external_phy_config),
+ SHMEM_RD(bp,
+ dev_info.port_hw_config[port].external_phy_config2));
+- return;
++ return;
+ }
+
+ if (CHIP_IS_E3(bp))
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+@@ -2977,8 +2977,8 @@ static inline void bnx2x_mcast_hdl_pendi
+
+ cmd_pos->data.macs_num--;
+
+- DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
+- cmd_pos->data.macs_num, cnt);
++ DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
++ cmd_pos->data.macs_num, cnt);
+
+ /* Break if we reached the maximum
+ * number of rules.
+@@ -3597,8 +3597,8 @@ static int bnx2x_mcast_validate_e1(struc
+ /* RESTORE command will restore the entire multicast configuration */
+ case BNX2X_MCAST_CMD_RESTORE:
+ p->mcast_list_len = reg_sz;
+- DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
+- cmd, p->mcast_list_len);
++ DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
++ cmd, p->mcast_list_len);
+ break;
+
+ case BNX2X_MCAST_CMD_ADD:
+@@ -3735,8 +3735,8 @@ static inline int bnx2x_mcast_handle_res
+
+ i++;
+
+- DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
+- cfg_data.mac);
++ DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
++ cfg_data.mac);
+ }
+
+ *rdata_idx = i;
diff --git a/patches.drivers/bnxt_en-Fix-aggregation-buffer-leak-under-OOM-condit.patch b/patches.drivers/bnxt_en-Fix-aggregation-buffer-leak-under-OOM-condit.patch
new file mode 100644
index 0000000000..93cc33b8f4
--- /dev/null
+++ b/patches.drivers/bnxt_en-Fix-aggregation-buffer-leak-under-OOM-condit.patch
@@ -0,0 +1,35 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Wed, 22 May 2019 19:12:54 -0400
+Subject: bnxt_en: Fix aggregation buffer leak under OOM condition.
+Patch-mainline: v5.2-rc3
+Git-commit: 296d5b54163964b7ae536b8b57dfbd21d4e868e1
+References: bsc#1134090 jsc#SLE-5954
+
+For every RX packet, the driver replenishes all buffers used for that
+packet and puts them back into the RX ring and RX aggregation ring.
+In one code path where the RX packet has one RX buffer and one or more
+aggregation buffers, we missed recycling the aggregation buffer(s) if
+we are unable to allocate a new SKB buffer. This leads to the
+aggregation ring slowly running out of buffers over time. Fix it
+by properly recycling the aggregation buffers.
+
+Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.")
+Reported-by: Rakesh Hemnani <rhemnani@fb.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1636,6 +1636,8 @@ static int bnxt_rx_pkt(struct bnxt *bp,
+ skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
+ bnxt_reuse_rx_data(rxr, cons, data);
+ if (!skb) {
++ if (agg_bufs)
++ bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
+ rc = -ENOMEM;
+ goto next_rx;
+ }
diff --git a/patches.drivers/bnxt_en-Fix-possible-BUG-condition-when-calling-pci_.patch b/patches.drivers/bnxt_en-Fix-possible-BUG-condition-when-calling-pci_.patch
new file mode 100644
index 0000000000..2bf7a04c09
--- /dev/null
+++ b/patches.drivers/bnxt_en-Fix-possible-BUG-condition-when-calling-pci_.patch
@@ -0,0 +1,101 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Wed, 22 May 2019 19:12:55 -0400
+Subject: bnxt_en: Fix possible BUG() condition when calling
+ pci_disable_msix().
+Patch-mainline: v5.2-rc3
+Git-commit: 1b3f0b75c39f534278a895c117282014e9d0ae1f
+References: bsc#1134090 jsc#SLE-5954
+
+When making configuration changes, the driver calls bnxt_close_nic()
+and then bnxt_open_nic() for the changes to take effect. A parameter
+irq_re_init is passed to the call sequence to indicate if IRQ
+should be re-initialized. This irq_re_init parameter needs to
+be included in the bnxt_reserve_rings() call. bnxt_reserve_rings()
+can only call pci_disable_msix() if the irq_re_init parameter is
+true, otherwise it may hit BUG() because some IRQs may not have been
+freed yet.
+
+Fixes: 41e8d7983752 ("bnxt_en: Modify the ring reservation functions for 57500 series chips.")
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 13 +++++++------
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 +-
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 2 +-
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | 2 +-
+ 4 files changed, 10 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -7508,22 +7508,23 @@ static void bnxt_clear_int_mode(struct b
+ bp->flags &= ~BNXT_FLAG_USING_MSIX;
+ }
+
+-int bnxt_reserve_rings(struct bnxt *bp)
++int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
+ {
+ int tcs = netdev_get_num_tc(bp->dev);
+- bool reinit_irq = false;
++ bool irq_cleared = false;
+ int rc;
+
+ if (!bnxt_need_reserve_rings(bp))
+ return 0;
+
+- if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
++ if (irq_re_init && BNXT_NEW_RM(bp) &&
++ bnxt_get_num_msix(bp) != bp->total_irqs) {
+ bnxt_ulp_irq_stop(bp);
+ bnxt_clear_int_mode(bp);
+- reinit_irq = true;
++ irq_cleared = true;
+ }
+ rc = __bnxt_reserve_rings(bp);
+- if (reinit_irq) {
++ if (irq_cleared) {
+ if (!rc)
+ rc = bnxt_init_int_mode(bp);
+ bnxt_ulp_irq_restart(bp, rc);
+@@ -8422,7 +8423,7 @@ static int __bnxt_open_nic(struct bnxt *
+ return rc;
+ }
+ }
+- rc = bnxt_reserve_rings(bp);
++ rc = bnxt_reserve_rings(bp, irq_re_init);
+ if (rc)
+ return rc;
+ if ((bp->flags & BNXT_FLAG_RFS) &&
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1776,7 +1776,7 @@ unsigned int bnxt_get_avail_stat_ctxs_fo
+ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
+ unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp);
+ int bnxt_get_avail_msix(struct bnxt *bp, int num);
+-int bnxt_reserve_rings(struct bnxt *bp);
++int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
+ void bnxt_tx_disable(struct bnxt *bp);
+ void bnxt_tx_enable(struct bnxt *bp);
+ int bnxt_hwrm_set_pause(struct bnxt *);
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -788,7 +788,7 @@ static int bnxt_set_channels(struct net_
+ */
+ }
+ } else {
+- rc = bnxt_reserve_rings(bp);
++ rc = bnxt_reserve_rings(bp, true);
+ }
+
+ return rc;
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+@@ -147,7 +147,7 @@ static int bnxt_req_msix_vecs(struct bnx
+ bnxt_close_nic(bp, true, false);
+ rc = bnxt_open_nic(bp, true, false);
+ } else {
+- rc = bnxt_reserve_rings(bp);
++ rc = bnxt_reserve_rings(bp, true);
+ }
+ }
+ if (rc) {
diff --git a/patches.drivers/bnxt_en-Fix-possible-crash-in-bnxt_hwrm_ring_free-un.patch b/patches.drivers/bnxt_en-Fix-possible-crash-in-bnxt_hwrm_ring_free-un.patch
new file mode 100644
index 0000000000..ae9167f4e8
--- /dev/null
+++ b/patches.drivers/bnxt_en-Fix-possible-crash-in-bnxt_hwrm_ring_free-un.patch
@@ -0,0 +1,65 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Thu, 25 Apr 2019 22:31:52 -0400
+Subject: bnxt_en: Fix possible crash in bnxt_hwrm_ring_free() under error
+ conditions.
+Patch-mainline: v5.1
+Git-commit: 1f83391bd6fc48f92f627b0ec0bce686d100c6a5
+References: bsc#1134090 jsc#SLE-5954
+
+If we encounter errors during open and proceed to clean up,
+bnxt_hwrm_ring_free() may crash if the rings we try to free have never
+been allocated. bnxt_cp_ring_for_rx() or bnxt_cp_ring_for_tx()
+may reference pointers that have not been allocated.
+
+Fix it by checking for valid fw_ring_id first before calling
+bnxt_cp_ring_for_rx() or bnxt_cp_ring_for_tx().
+
+Fixes: 2c61d2117ecb ("bnxt_en: Add helper functions to get firmware CP ring ID.")
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -5131,10 +5131,10 @@ static void bnxt_hwrm_ring_free(struct b
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+- u32 cmpl_ring_id;
+
+- cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
++ u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
++
+ hwrm_ring_free_send_msg(bp, ring,
+ RING_FREE_REQ_RING_TYPE_TX,
+ close_path ? cmpl_ring_id :
+@@ -5147,10 +5147,10 @@ static void bnxt_hwrm_ring_free(struct b
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+- u32 cmpl_ring_id;
+
+- cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
++ u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
++
+ hwrm_ring_free_send_msg(bp, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+@@ -5169,10 +5169,10 @@ static void bnxt_hwrm_ring_free(struct b
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+- u32 cmpl_ring_id;
+
+- cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ if (ring->fw_ring_id != INVALID_HW_RING_ID) {
++ u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
++
+ hwrm_ring_free_send_msg(bp, ring, type,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
diff --git a/patches.drivers/bnxt_en-Fix-statistics-context-reservation-logic.patch b/patches.drivers/bnxt_en-Fix-statistics-context-reservation-logic.patch
new file mode 100644
index 0000000000..def45086a7
--- /dev/null
+++ b/patches.drivers/bnxt_en-Fix-statistics-context-reservation-logic.patch
@@ -0,0 +1,62 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Thu, 25 Apr 2019 22:31:54 -0400
+Subject: bnxt_en: Fix statistics context reservation logic.
+Patch-mainline: v5.1
+Git-commit: 3f93cd3f098e284c851acb89265ebe35b994a5c8
+References: bsc#1134090 jsc#SLE-5954
+
+In an earlier commit that fixes the number of stats contexts to
+reserve for the RDMA driver, we added a function parameter to pass in
+the number of stats contexts to all the relevant functions. The passed
+in parameter should have been used to set the enables field of the
+firmware message.
+
+Fixes: 780baad44f0f ("bnxt_en: Reserve 1 stat_ctx for RDMA driver.")
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -5311,17 +5311,16 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt
+ req->num_tx_rings = cpu_to_le16(tx_rings);
+ if (BNXT_NEW_RM(bp)) {
+ enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
++ enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
+ enables |= tx_rings + ring_grps ?
+- FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+- FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
++ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
+ enables |= rx_rings ?
+ FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+ } else {
+ enables |= cp_rings ?
+- FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+- FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
++ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
+ enables |= ring_grps ?
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+ FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
+@@ -5361,14 +5360,13 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt
+ enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+ enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
+ FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
++ enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+ if (bp->flags & BNXT_FLAG_CHIP_P5) {
+ enables |= tx_rings + ring_grps ?
+- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+- FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
++ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
+ } else {
+ enables |= cp_rings ?
+- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+- FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
++ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
+ enables |= ring_grps ?
+ FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ }
diff --git a/patches.drivers/bnxt_en-Fix-uninitialized-variable-usage-in-bnxt_rx_.patch b/patches.drivers/bnxt_en-Fix-uninitialized-variable-usage-in-bnxt_rx_.patch
new file mode 100644
index 0000000000..846194a49e
--- /dev/null
+++ b/patches.drivers/bnxt_en-Fix-uninitialized-variable-usage-in-bnxt_rx_.patch
@@ -0,0 +1,53 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Thu, 25 Apr 2019 22:31:55 -0400
+Subject: bnxt_en: Fix uninitialized variable usage in bnxt_rx_pkt().
+Patch-mainline: v5.1
+Git-commit: 0b397b17a4120cb80f7bf89eb30587b3dd9b0d1d
+References: bsc#1134090 jsc#SLE-5954
+
+In bnxt_rx_pkt(), if the driver encounters BD errors, it will recycle
+the buffers and jump to the end where the uninitailized variable "len"
+is referenced. Fix it by adding a new jump label that will skip
+the length update. This is the most correct fix since the length
+may not be valid when we get this type of error.
+
+Fixes: 6a8788f25625 ("bnxt_en: add support for software dynamic interrupt moderation")
+Reported-by: Nathan Chancellor <natechancellor@gmail.com>
+Cc: Nathan Chancellor <natechancellor@gmail.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Reviewed-by: Nathan Chancellor <natechancellor@gmail.com>
+Tested-by: Nathan Chancellor <natechancellor@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1621,7 +1621,7 @@ static int bnxt_rx_pkt(struct bnxt *bp,
+ netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
+ bnxt_sched_reset(bp, rxr);
+ }
+- goto next_rx;
++ goto next_rx_no_len;
+ }
+
+ len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
+@@ -1702,12 +1702,13 @@ static int bnxt_rx_pkt(struct bnxt *bp,
+ rc = 1;
+
+ next_rx:
+- rxr->rx_prod = NEXT_RX(prod);
+- rxr->rx_next_cons = NEXT_RX(cons);
+-
+ cpr->rx_packets += 1;
+ cpr->rx_bytes += len;
+
++next_rx_no_len:
++ rxr->rx_prod = NEXT_RX(prod);
++ rxr->rx_next_cons = NEXT_RX(cons);
++
+ next_rx_no_prod_no_len:
+ *raw_cons = tmp_raw_cons;
+
diff --git a/patches.drivers/bnxt_en-Improve-NQ-reservations.patch b/patches.drivers/bnxt_en-Improve-NQ-reservations.patch
new file mode 100644
index 0000000000..6880594624
--- /dev/null
+++ b/patches.drivers/bnxt_en-Improve-NQ-reservations.patch
@@ -0,0 +1,40 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Sun, 5 May 2019 07:17:05 -0400
+Subject: bnxt_en: Improve NQ reservations.
+Patch-mainline: v5.2-rc1
+Git-commit: 01989c6b69d91a0df0af8d5c6b5f33d82a239ae0
+References: bsc#1134090 jsc#SLE-5954
+
+bnxt_need_reserve_rings() determines if any resources have changed and
+requires new reservation with firmware. The NQ checking is currently
+just an approximation. Improve the NQ checking logic to make it
+accurate. NQ reservation is only needed on 57500 PFs. This fix will
+eliminate unnecessary reservations and will reduce NQ reservations
+when some NQs have been released on 57500 PFs.
+
+Fixes: c0b8cda05e1d ("bnxt_en: Fix NQ/CP rings accounting on the new 57500 chips.")
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -5499,11 +5499,13 @@ static bool bnxt_need_reserve_rings(stru
+ stat = bnxt_get_func_stat_ctxs(bp);
+ if (BNXT_NEW_RM(bp) &&
+ (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
+- hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic ||
+- hw_resc->resv_stat_ctxs != stat ||
++ hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
+ (hw_resc->resv_hw_ring_grps != grp &&
+ !(bp->flags & BNXT_FLAG_CHIP_P5))))
+ return true;
++ if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
++ hw_resc->resv_irqs != nq)
++ return true;
+ return false;
+ }
+
diff --git a/patches.drivers/bnxt_en-Improve-multicast-address-setup-logic.patch b/patches.drivers/bnxt_en-Improve-multicast-address-setup-logic.patch
new file mode 100644
index 0000000000..f1d3a362b8
--- /dev/null
+++ b/patches.drivers/bnxt_en-Improve-multicast-address-setup-logic.patch
@@ -0,0 +1,41 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Thu, 25 Apr 2019 22:31:50 -0400
+Subject: bnxt_en: Improve multicast address setup logic.
+Patch-mainline: v5.1
+Git-commit: b4e30e8e7ea1d1e35ffd64ca46f7d9a7f227b4bf
+References: bsc#1134090 jsc#SLE-5954
+
+The driver builds a list of multicast addresses and sends it to the
+firmware when the driver's ndo_set_rx_mode() is called. In rare
+cases, the firmware can fail this call if internal resources to
+add multicast addresses are exhausted. In that case, we should
+try the call again by setting the ALL_MCAST flag which is more
+guaranteed to succeed.
+
+Fixes: c0c050c58d84 ("bnxt_en: New Broadcom ethernet driver.")
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -8889,8 +8889,15 @@ static int bnxt_cfg_rx_mode(struct bnxt
+
+ skip_uc:
+ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
++ if (rc && vnic->mc_list_count) {
++ netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
++ rc);
++ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
++ vnic->mc_list_count = 0;
++ rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
++ }
+ if (rc)
+- netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
++ netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
+ rc);
+
+ return rc;
diff --git a/patches.drivers/bnxt_en-Pass-correct-extended-TX-port-statistics-siz.patch b/patches.drivers/bnxt_en-Pass-correct-extended-TX-port-statistics-siz.patch
new file mode 100644
index 0000000000..7a57cee44f
--- /dev/null
+++ b/patches.drivers/bnxt_en-Pass-correct-extended-TX-port-statistics-siz.patch
@@ -0,0 +1,51 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Thu, 25 Apr 2019 22:31:53 -0400
+Subject: bnxt_en: Pass correct extended TX port statistics size to firmware.
+Patch-mainline: v5.1
+Git-commit: ad361adf0d08f1135f3845c6b3a36be7cc0bfda5
+References: bsc#1134090 jsc#SLE-5954
+
+If driver determines that extended TX port statistics are not supported
+or allocation of the data structure fails, make sure to pass 0 TX stats
+size to firmware to disable it. The firmware returned TX stats size should
+also be set to 0 for consistency. This will prevent
+bnxt_get_ethtool_stats() from accessing the NULL TX stats pointer in
+case there is mismatch between firmware and driver.
+
+Fixes: 36e53349b60b ("bnxt_en: Add additional extended port statistics.")
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -6745,6 +6745,7 @@ static int bnxt_hwrm_port_qstats_ext(str
+ struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
+ struct hwrm_port_qstats_ext_input req = {0};
+ struct bnxt_pf_info *pf = &bp->pf;
++ u32 tx_stat_size;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
+@@ -6754,13 +6755,16 @@ static int bnxt_hwrm_port_qstats_ext(str
+ req.port_id = cpu_to_le16(pf->port_id);
+ req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
+ req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
+- req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext));
++ tx_stat_size = bp->hw_tx_port_stats_ext ?
++ sizeof(*bp->hw_tx_port_stats_ext) : 0;
++ req.tx_stat_size = cpu_to_le16(tx_stat_size);
+ req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
+- bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8;
++ bp->fw_tx_stats_ext_size = tx_stat_size ?
++ le16_to_cpu(resp->tx_stat_size) / 8 : 0;
+ } else {
+ bp->fw_rx_stats_ext_size = 0;
+ bp->fw_tx_stats_ext_size = 0;
diff --git a/patches.drivers/bnxt_en-Reduce-memory-usage-when-running-in-kdump-ke.patch b/patches.drivers/bnxt_en-Reduce-memory-usage-when-running-in-kdump-ke.patch
new file mode 100644
index 0000000000..0cf53f174b
--- /dev/null
+++ b/patches.drivers/bnxt_en-Reduce-memory-usage-when-running-in-kdump-ke.patch
@@ -0,0 +1,61 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Wed, 22 May 2019 19:12:56 -0400
+Subject: bnxt_en: Reduce memory usage when running in kdump kernel.
+Patch-mainline: v5.2-rc3
+Git-commit: d629522e1d66561f38e5c8d4f52bb6d254ec0707
+References: bsc#1134090 jsc#SLE-5954
+
+Skip RDMA context memory allocations, reduce to 1 ring, and disable
+TPA when running in the kdump kernel. Without this patch, the driver
+fails to initialize with memory allocation errors when running in a
+typical kdump kernel.
+
+Fixes: cf6daed098d1 ("bnxt_en: Increase context memory allocations on 57500 chips for RDMA.")
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 4 ++--
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 4 +++-
+ 2 files changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -6340,7 +6340,7 @@ static int bnxt_alloc_ctx_mem(struct bnx
+ if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
+ return 0;
+
+- if (bp->flags & BNXT_FLAG_ROCE_CAP) {
++ if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
+ pg_lvl = 2;
+ extra_qps = 65536;
+ extra_srqs = 8192;
+@@ -10281,7 +10281,7 @@ static int bnxt_set_dflt_rings(struct bn
+
+ if (sh)
+ bp->flags |= BNXT_FLAG_SHARED_RINGS;
+- dflt_rings = netif_get_num_default_rss_queues();
++ dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
+ /* Reduce default rings on multi-port cards so that total default
+ * rings do not exceed CPU count.
+ */
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -20,6 +20,7 @@
+
+ #include <linux/interrupt.h>
+ #include <linux/rhashtable.h>
++#include <linux/crash_dump.h>
+ #include <net/devlink.h>
+ #include <net/dst_metadata.h>
+ #include <net/switchdev.h>
+@@ -1367,7 +1368,8 @@ struct bnxt {
+ #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
+ #define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
+ #define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
+- !(bp->flags & BNXT_FLAG_CHIP_P5))
++ !(bp->flags & BNXT_FLAG_CHIP_P5) && \
++ !is_kdump_kernel())
+
+ /* Chip class phase 5 */
+ #define BNXT_CHIP_P5(bp) \
diff --git a/patches.drivers/broadcom-Mark-expected-switch-fall-throughs.patch b/patches.drivers/broadcom-Mark-expected-switch-fall-throughs.patch
new file mode 100644
index 0000000000..aa1dd2a75f
--- /dev/null
+++ b/patches.drivers/broadcom-Mark-expected-switch-fall-throughs.patch
@@ -0,0 +1,75 @@
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Wed, 23 Jan 2019 01:41:19 -0600
+Subject: broadcom: Mark expected switch fall-throughs
+Patch-mainline: v5.1-rc1
+Git-commit: 015496c43f399675589132d3dd48a55d8c5fc5ba
+References: bsc#1136498 jsc#SLE-4699
+
+In preparation to enabling -Wimplicit-fallthrough, mark switch cases
+where we are expecting to fall through.
+
+This patch fixes the following warnings:
+
+drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c:6336:6: warning: this statement may fall through [-Wimplicit-fallthrough=]
+drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c:2231:6: warning: this statement may fall through [-Wimplicit-fallthrough=]
+drivers/net/ethernet/broadcom/tg3.c:722:6: warning: this statement may fall through [-Wimplicit-fallthrough=]
+drivers/net/ethernet/broadcom/tg3.c:783:6: warning: this statement may fall through [-Wimplicit-fallthrough=]
+
+Warning level 3 was used: -Wimplicit-fallthrough=3
+
+This patch is part of the ongoing efforts to enabling
+-Wimplicit-fallthrough.
+
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Acked-by: Sudarsana Kalluru <Sudarsana.Kalluru@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | 2 +-
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | 2 +-
+ drivers/net/ethernet/broadcom/tg3.c | 4 ++--
+ 3 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+@@ -6335,7 +6335,7 @@ int bnx2x_set_led(struct link_params *pa
+ */
+ if (!vars->link_up)
+ break;
+- /* else: fall through */
++ /* fall through */
+ case LED_MODE_ON:
+ if (((params->phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+@@ -2229,7 +2229,7 @@ int bnx2x_vf_free(struct bnx2x *bp, stru
+ rc = bnx2x_vf_close(bp, vf);
+ if (rc)
+ goto op_err;
+- /* Fallthrough to release resources */
++ /* Fall through - to release resources */
+ case VF_ACQUIRED:
+ DP(BNX2X_MSG_IOV, "about to free resources\n");
+ bnx2x_vf_free_resc(bp, vf);
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -719,7 +719,7 @@ static int tg3_ape_lock(struct tg3 *tp,
+ case TG3_APE_LOCK_GPIO:
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
+ return 0;
+- /* else: fall through */
++ /* fall through */
+ case TG3_APE_LOCK_GRC:
+ case TG3_APE_LOCK_MEM:
+ if (!tp->pci_fn)
+@@ -780,7 +780,7 @@ static void tg3_ape_unlock(struct tg3 *t
+ case TG3_APE_LOCK_GPIO:
+ if (tg3_asic_rev(tp) == ASIC_REV_5761)
+ return;
+- /* else: fall through */
++ /* fall through */
+ case TG3_APE_LOCK_GRC:
+ case TG3_APE_LOCK_MEM:
+ if (!tp->pci_fn)
diff --git a/patches.drivers/chelsio-use-BUG-instead-of-BUG_ON-1.patch b/patches.drivers/chelsio-use-BUG-instead-of-BUG_ON-1.patch
new file mode 100644
index 0000000000..7c38cac597
--- /dev/null
+++ b/patches.drivers/chelsio-use-BUG-instead-of-BUG_ON-1.patch
@@ -0,0 +1,68 @@
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Mon, 25 Mar 2019 13:49:16 +0100
+Subject: chelsio: use BUG() instead of BUG_ON(1)
+Patch-mainline: v5.1-rc3
+Git-commit: 047a013f8d0af8299ce2d02af152de6a30165ccc
+References: bsc#1136345 jsc#SLE-4681
+
+clang warns about possible bugs in a dead code branch after
+BUG_ON(1) when CONFIG_PROFILE_ALL_BRANCHES is enabled:
+
+ drivers/net/ethernet/chelsio/cxgb4/sge.c:479:3: error: variable 'buf_size' is used uninitialized whenever 'if'
+ condition is false [-Werror,-Wsometimes-uninitialized]
+ BUG_ON(1);
+ ^~~~~~~~~
+ include/asm-generic/bug.h:61:36: note: expanded from macro 'BUG_ON'
+ #define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
+ ^~~~~~~~~~~~~~~~~~~
+ include/linux/compiler.h:48:23: note: expanded from macro 'unlikely'
+ # define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x)))
+ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ drivers/net/ethernet/chelsio/cxgb4/sge.c:482:9: note: uninitialized use occurs here
+ return buf_size;
+ ^~~~~~~~
+ drivers/net/ethernet/chelsio/cxgb4/sge.c:479:3: note: remove the 'if' if its condition is always true
+ BUG_ON(1);
+ ^
+ include/asm-generic/bug.h:61:32: note: expanded from macro 'BUG_ON'
+ #define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
+ ^
+ drivers/net/ethernet/chelsio/cxgb4/sge.c:459:14: note: initialize the variable 'buf_size' to silence this warning
+ int buf_size;
+ ^
+ = 0
+
+Use BUG() here to create simpler code that clang understands
+correctly.
+
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 2 +-
+ drivers/net/ethernet/chelsio/cxgb4/sge.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+@@ -2620,7 +2620,7 @@ static inline struct port_info *ethqset2
+ }
+
+ /* should never happen! */
+- BUG_ON(1);
++ BUG();
+ return NULL;
+ }
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+@@ -476,7 +476,7 @@ static inline int get_buf_size(struct ad
+ break;
+
+ default:
+- BUG_ON(1);
++ BUG();
+ }
+
+ return buf_size;
diff --git a/patches.drivers/crypto-chcr-ESN-for-Inline-IPSec-Tx.patch b/patches.drivers/crypto-chcr-ESN-for-Inline-IPSec-Tx.patch
new file mode 100644
index 0000000000..6eef3ecb15
--- /dev/null
+++ b/patches.drivers/crypto-chcr-ESN-for-Inline-IPSec-Tx.patch
@@ -0,0 +1,353 @@
+From: Atul Gupta <atul.gupta@chelsio.com>
+Date: Fri, 30 Nov 2018 14:32:09 +0530
+Subject: crypto: chcr - ESN for Inline IPSec Tx
+Patch-mainline: v5.0-rc1
+Git-commit: 8362ea16f69fe59c4d012f0748e586ad09391f41
+References: bsc#1136353 jsc#SLE-4688
+
+Send SPI, 64b seq nos and 64b IV with aadiv drop for inline crypto.
+This information is added in outgoing packet after the CPL TX PKT XT
+and removed by hardware.
+The aad, auth and cipher offsets are then adjusted for ESN enabled tunnel.
+
+Signed-off-by: Atul Gupta <atul.gupta@chelsio.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chcr_core.h | 9 +
+ drivers/crypto/chelsio/chcr_ipsec.c | 175 ++++++++++++++++++++++++++++--------
+ 2 files changed, 148 insertions(+), 36 deletions(-)
+
+--- a/drivers/crypto/chelsio/chcr_core.h
++++ b/drivers/crypto/chelsio/chcr_core.h
+@@ -159,8 +159,17 @@ struct chcr_ipsec_wr {
+ struct chcr_ipsec_req req;
+ };
+
++#define ESN_IV_INSERT_OFFSET 12
++struct chcr_ipsec_aadiv {
++ __be32 spi;
++ u8 seq_no[8];
++ u8 iv[8];
++};
++
+ struct ipsec_sa_entry {
+ int hmac_ctrl;
++ u16 esn;
++ u16 imm;
+ unsigned int enckey_len;
+ unsigned int kctx_len;
+ unsigned int authsize;
+--- a/drivers/crypto/chelsio/chcr_ipsec.c
++++ b/drivers/crypto/chelsio/chcr_ipsec.c
+@@ -76,12 +76,14 @@ static int chcr_xfrm_add_state(struct xf
+ static void chcr_xfrm_del_state(struct xfrm_state *x);
+ static void chcr_xfrm_free_state(struct xfrm_state *x);
+ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
++static void chcr_advance_esn_state(struct xfrm_state *x);
+
+ static const struct xfrmdev_ops chcr_xfrmdev_ops = {
+ .xdo_dev_state_add = chcr_xfrm_add_state,
+ .xdo_dev_state_delete = chcr_xfrm_del_state,
+ .xdo_dev_state_free = chcr_xfrm_free_state,
+ .xdo_dev_offload_ok = chcr_ipsec_offload_ok,
++ .xdo_dev_state_advance_esn = chcr_advance_esn_state,
+ };
+
+ /* Add offload xfrms to Chelsio Interface */
+@@ -210,10 +212,6 @@ static int chcr_xfrm_add_state(struct xf
+ pr_debug("CHCR: Cannot offload compressed xfrm states\n");
+ return -EINVAL;
+ }
+- if (x->props.flags & XFRM_STATE_ESN) {
+- pr_debug("CHCR: Cannot offload ESN xfrm states\n");
+- return -EINVAL;
+- }
+ if (x->props.family != AF_INET &&
+ x->props.family != AF_INET6) {
+ pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
+@@ -266,6 +264,8 @@ static int chcr_xfrm_add_state(struct xf
+ }
+
+ sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
++ if (x->props.flags & XFRM_STATE_ESN)
++ sa_entry->esn = 1;
+ chcr_ipsec_setkey(x, sa_entry);
+ x->xso.offload_handle = (unsigned long)sa_entry;
+ try_module_get(THIS_MODULE);
+@@ -294,31 +294,57 @@ static void chcr_xfrm_free_state(struct
+
+ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+ {
+- /* Offload with IP options is not supported yet */
+- if (ip_hdr(skb)->ihl > 5)
+- return false;
+-
++ if (x->props.family == AF_INET) {
++ /* Offload with IP options is not supported yet */
++ if (ip_hdr(skb)->ihl > 5)
++ return false;
++ } else {
++ /* Offload with IPv6 extension headers is not support yet */
++ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
++ return false;
++ }
+ return true;
+ }
+
+-static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
++static void chcr_advance_esn_state(struct xfrm_state *x)
++{
++ /* do nothing */
++ if (!x->xso.offload_handle)
++ return;
++}
++
++static inline int is_eth_imm(const struct sk_buff *skb,
++ struct ipsec_sa_entry *sa_entry)
+ {
++ unsigned int kctx_len;
+ int hdrlen;
+
++ kctx_len = sa_entry->kctx_len;
+ hdrlen = sizeof(struct fw_ulptx_wr) +
+ sizeof(struct chcr_ipsec_req) + kctx_len;
+
+ hdrlen += sizeof(struct cpl_tx_pkt);
++ if (sa_entry->esn)
++ hdrlen += (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16)
++ << 4);
+ if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
+ return hdrlen;
+ return 0;
+ }
+
+ static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
+- unsigned int kctx_len)
++ struct ipsec_sa_entry *sa_entry)
+ {
++ unsigned int kctx_len;
+ unsigned int flits;
+- int hdrlen = is_eth_imm(skb, kctx_len);
++ int aadivlen;
++ int hdrlen;
++
++ kctx_len = sa_entry->kctx_len;
++ hdrlen = is_eth_imm(skb, sa_entry);
++ aadivlen = sa_entry->esn ? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
++ 16) : 0;
++ aadivlen <<= 4;
+
+ /* If the skb is small enough, we can pump it out as a work request
+ * with only immediate data. In that case we just have to have the
+@@ -341,13 +367,69 @@ static inline unsigned int calc_tx_sec_f
+ flits += (sizeof(struct fw_ulptx_wr) +
+ sizeof(struct chcr_ipsec_req) +
+ kctx_len +
+- sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
++ sizeof(struct cpl_tx_pkt_core) +
++ aadivlen) / sizeof(__be64);
+ return flits;
+ }
+
++inline void *copy_esn_pktxt(struct sk_buff *skb,
++ struct net_device *dev,
++ void *pos,
++ struct ipsec_sa_entry *sa_entry)
++{
++ struct chcr_ipsec_aadiv *aadiv;
++ struct ulptx_idata *sc_imm;
++ struct ip_esp_hdr *esphdr;
++ struct xfrm_offload *xo;
++ struct sge_eth_txq *q;
++ struct adapter *adap;
++ struct port_info *pi;
++ __be64 seqno;
++ u32 qidx;
++ u32 seqlo;
++ u8 *iv;
++ int eoq;
++ int len;
++
++ pi = netdev_priv(dev);
++ adap = pi->adapter;
++ qidx = skb->queue_mapping;
++ q = &adap->sge.ethtxq[qidx + pi->first_qset];
++
++ /* end of queue, reset pos to start of queue */
++ eoq = (void *)q->q.stat - pos;
++ if (!eoq)
++ pos = q->q.desc;
++
++ len = DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) << 4;
++ memset(pos, 0, len);
++ aadiv = (struct chcr_ipsec_aadiv *)pos;
++ esphdr = (struct ip_esp_hdr *)skb_transport_header(skb);
++ iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
++ xo = xfrm_offload(skb);
++
++ aadiv->spi = (esphdr->spi);
++ seqlo = htonl(esphdr->seq_no);
++ seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32));
++ memcpy(aadiv->seq_no, &seqno, 8);
++ iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
++ memcpy(aadiv->iv, iv, 8);
++
++ if (sa_entry->imm) {
++ sc_imm = (struct ulptx_idata *)(pos +
++ (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
++ sizeof(__be64)) << 3));
++ sc_imm->cmd_more = FILL_CMD_MORE(!sa_entry->imm);
++ sc_imm->len = cpu_to_be32(sa_entry->imm);
++ }
++ pos += len;
++ return pos;
++}
++
+ inline void *copy_cpltx_pktxt(struct sk_buff *skb,
+- struct net_device *dev,
+- void *pos)
++ struct net_device *dev,
++ void *pos,
++ struct ipsec_sa_entry *sa_entry)
+ {
+ struct cpl_tx_pkt_core *cpl;
+ struct sge_eth_txq *q;
+@@ -382,6 +464,9 @@ inline void *copy_cpltx_pktxt(struct sk_
+ cpl->ctrl1 = cpu_to_be64(cntrl);
+
+ pos += sizeof(struct cpl_tx_pkt_core);
++ /* Copy ESN info for HW */
++ if (sa_entry->esn)
++ pos = copy_esn_pktxt(skb, dev, pos, sa_entry);
+ return pos;
+ }
+
+@@ -428,7 +513,7 @@ inline void *copy_key_cpltx_pktxt(struct
+ pos = (u8 *)q->q.desc + (key_len - left);
+ }
+ /* Copy CPL TX PKT XT */
+- pos = copy_cpltx_pktxt(skb, dev, pos);
++ pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry);
+
+ return pos;
+ }
+@@ -441,10 +526,16 @@ inline void *chcr_crypto_wreq(struct sk_
+ {
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+- unsigned int immdatalen = 0;
+ unsigned int ivsize = GCM_ESP_IV_SIZE;
+ struct chcr_ipsec_wr *wr;
++ u16 immdatalen = 0;
+ unsigned int flits;
++ u32 ivinoffset;
++ u32 aadstart;
++ u32 aadstop;
++ u32 ciphstart;
++ u32 ivdrop = 0;
++ u32 esnlen = 0;
+ u32 wr_mid;
+ int qidx = skb_get_queue_mapping(skb);
+ struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
+@@ -453,10 +544,17 @@ inline void *chcr_crypto_wreq(struct sk_
+
+ atomic_inc(&adap->chcr_stats.ipsec_cnt);
+
+- flits = calc_tx_sec_flits(skb, kctx_len);
++ flits = calc_tx_sec_flits(skb, sa_entry);
++ if (sa_entry->esn)
++ ivdrop = 1;
+
+- if (is_eth_imm(skb, kctx_len))
++ if (is_eth_imm(skb, sa_entry)) {
+ immdatalen = skb->len;
++ sa_entry->imm = immdatalen;
++ }
++
++ if (sa_entry->esn)
++ esnlen = sizeof(struct chcr_ipsec_aadiv);
+
+ /* WR Header */
+ wr = (struct chcr_ipsec_wr *)pos;
+@@ -481,33 +579,38 @@ inline void *chcr_crypto_wreq(struct sk_
+ sizeof(wr->req.key_ctx) +
+ kctx_len +
+ sizeof(struct cpl_tx_pkt_core) +
+- immdatalen);
++ esnlen +
++ (esnlen ? 0 : immdatalen));
+
+ /* CPL_SEC_PDU */
++ ivinoffset = sa_entry->esn ? (ESN_IV_INSERT_OFFSET + 1) :
++ (skb_transport_offset(skb) +
++ sizeof(struct ip_esp_hdr) + 1);
+ wr->req.sec_cpl.op_ivinsrtofst = htonl(
+ CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
+ CPL_TX_SEC_PDU_CPLLEN_V(2) |
+ CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
+ CPL_TX_SEC_PDU_IVINSRTOFST_V(
+- (skb_transport_offset(skb) +
+- sizeof(struct ip_esp_hdr) + 1)));
++ ivinoffset));
+
+- wr->req.sec_cpl.pldlen = htonl(skb->len);
++ wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen);
++ aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1);
++ aadstop = sa_entry->esn ? ESN_IV_INSERT_OFFSET :
++ (skb_transport_offset(skb) +
++ sizeof(struct ip_esp_hdr));
++ ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) +
++ GCM_ESP_IV_SIZE + 1;
++ ciphstart += sa_entry->esn ? esnlen : 0;
+
+ wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+- (skb_transport_offset(skb) + 1),
+- (skb_transport_offset(skb) +
+- sizeof(struct ip_esp_hdr)),
+- (skb_transport_offset(skb) +
+- sizeof(struct ip_esp_hdr) +
+- GCM_ESP_IV_SIZE + 1), 0);
++ aadstart,
++ aadstop,
++ ciphstart, 0);
+
+ wr->req.sec_cpl.cipherstop_lo_authinsert =
+- FILL_SEC_CPL_AUTHINSERT(0, skb_transport_offset(skb) +
+- sizeof(struct ip_esp_hdr) +
+- GCM_ESP_IV_SIZE + 1,
+- sa_entry->authsize,
+- sa_entry->authsize);
++ FILL_SEC_CPL_AUTHINSERT(0, ciphstart,
++ sa_entry->authsize,
++ sa_entry->authsize);
+ wr->req.sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
+ CHCR_SCMD_CIPHER_MODE_AES_GCM,
+@@ -515,7 +618,7 @@ inline void *chcr_crypto_wreq(struct sk_
+ sa_entry->hmac_ctrl,
+ ivsize >> 1);
+ wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
+- 0, 0, 0);
++ 0, ivdrop, 0);
+
+ pos += sizeof(struct fw_ulptx_wr) +
+ sizeof(struct ulp_txpkt) +
+@@ -593,7 +696,7 @@ out_free: dev_kfree_skb_any(skb);
+
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
+
+- flits = calc_tx_sec_flits(skb, sa_entry->kctx_len);
++ flits = calc_tx_sec_flits(skb, sa_entry);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&q->q) - ndesc;
+
+@@ -606,7 +709,7 @@ out_free: dev_kfree_skb_any(skb);
+ return NETDEV_TX_BUSY;
+ }
+
+- if (is_eth_imm(skb, kctx_len))
++ if (is_eth_imm(skb, sa_entry))
+ immediate = true;
+
+ if (!immediate &&
diff --git a/patches.drivers/crypto-chcr-small-packet-Tx-stalls-the-queue.patch b/patches.drivers/crypto-chcr-small-packet-Tx-stalls-the-queue.patch
new file mode 100644
index 0000000000..59fd70de4d
--- /dev/null
+++ b/patches.drivers/crypto-chcr-small-packet-Tx-stalls-the-queue.patch
@@ -0,0 +1,34 @@
+From: Atul Gupta <atul.gupta@chelsio.com>
+Date: Fri, 30 Nov 2018 14:31:48 +0530
+Subject: crypto: chcr - small packet Tx stalls the queue
+Patch-mainline: v5.0-rc1
+Git-commit: c35828ea906a7c76632a0211e59c392903cd4615
+References: bsc#1136353 jsc#SLE-4688
+
+Immediate packets sent to hardware should include the work
+request length in calculating the flits. WR occupy one flit and
+if not accounted result in invalid request which stalls the HW
+queue.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Atul Gupta <atul.gupta@chelsio.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chcr_ipsec.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/crypto/chelsio/chcr_ipsec.c
++++ b/drivers/crypto/chelsio/chcr_ipsec.c
+@@ -303,7 +303,10 @@ static bool chcr_ipsec_offload_ok(struct
+
+ static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
+ {
+- int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len;
++ int hdrlen;
++
++ hdrlen = sizeof(struct fw_ulptx_wr) +
++ sizeof(struct chcr_ipsec_req) + kctx_len;
+
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
diff --git a/patches.drivers/crypto-chelsio-Fix-NULL-pointer-dereference.patch b/patches.drivers/crypto-chelsio-Fix-NULL-pointer-dereference.patch
new file mode 100644
index 0000000000..e94ca941dd
--- /dev/null
+++ b/patches.drivers/crypto-chelsio-Fix-NULL-pointer-dereference.patch
@@ -0,0 +1,33 @@
+From: Atul Gupta <atul.gupta@chelsio.com>
+Date: Thu, 2 May 2019 03:46:55 -0700
+Subject: crypto: chelsio - Fix NULL pointer dereference
+Patch-mainline: v5.2-rc1
+Git-commit: b4f91664307d5bf4f0d93da12a6146f9cf3559fb
+References: bsc#1136353 jsc#SLE-4688
+
+Do not request FW to generate cidx update if there is less
+space in tx queue to post new request.
+SGE DBP 1 pidx increment too large
+BUG: unable to handle kernel NULL pointer dereference at
+0000000000000124
+SGE error for queue 101
+
+Signed-off-by: Atul Gupta <atul.gupta@chelsio.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chcr_ipsec.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/crypto/chelsio/chcr_ipsec.c
++++ b/drivers/crypto/chelsio/chcr_ipsec.c
+@@ -575,7 +575,8 @@ inline void *chcr_crypto_wreq(struct sk_
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ netif_tx_stop_queue(q->txq);
+ q->q.stops++;
+- wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
++ if (!q->dbqt)
++ wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
+ }
+ wr_mid |= FW_ULPTX_WR_DATA_F;
+ wr->wreq.flowid_len16 = htonl(wr_mid);
diff --git a/patches.drivers/crypto-chelsio-Fix-passing-zero-to-PTR_ERR-warning-i.patch b/patches.drivers/crypto-chelsio-Fix-passing-zero-to-PTR_ERR-warning-i.patch
new file mode 100644
index 0000000000..9e6caa8b9d
--- /dev/null
+++ b/patches.drivers/crypto-chelsio-Fix-passing-zero-to-PTR_ERR-warning-i.patch
@@ -0,0 +1,34 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Wed, 23 Jan 2019 14:55:17 +0800
+Subject: crypto: chelsio - Fix passing zero to 'PTR_ERR' warning in
+ chcr_aead_op
+Patch-mainline: v5.1-rc1
+Git-commit: b04a27ca175d66f3662ad334f538254d560838f9
+References: bsc#1136353 jsc#SLE-4688
+
+Fix a static code checker warning:
+drivers/crypto/chelsio/chcr_algo.c:3681
+ chcr_aead_op() warn: passing zero to 'PTR_ERR'
+
+Fixes: 2debd3325e55 ("crypto: chcr - Add AEAD algos.")
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chcr_algo.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/chelsio/chcr_algo.c
++++ b/drivers/crypto/chelsio/chcr_algo.c
+@@ -3676,9 +3676,9 @@ static int chcr_aead_op(struct aead_requ
+ /* Form a WR from req */
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
+
+- if (IS_ERR(skb) || !skb) {
++ if (IS_ERR_OR_NULL(skb)) {
+ chcr_dec_wrcount(cdev);
+- return PTR_ERR(skb);
++ return PTR_ERR_OR_ZERO(skb);
+ }
+
+ skb->dev = u_ctx->lldi.ports[0];
diff --git a/patches.drivers/crypto-chelsio-Fix-softlockup-with-heavy-I-O.patch b/patches.drivers/crypto-chelsio-Fix-softlockup-with-heavy-I-O.patch
new file mode 100644
index 0000000000..b66c4877d1
--- /dev/null
+++ b/patches.drivers/crypto-chelsio-Fix-softlockup-with-heavy-I-O.patch
@@ -0,0 +1,59 @@
+From: Atul Gupta <atul.gupta@chelsio.com>
+Date: Thu, 2 May 2019 03:47:27 -0700
+Subject: crypto: chelsio - Fix softlockup with heavy I/O
+Patch-mainline: v5.2-rc1
+Git-commit: 33ddc108c5a1cfba72a08a92f35a0ba5bbd1cfd8
+References: bsc#1136353 jsc#SLE-4688
+
+removed un-necessary lock_chcr_dev to protect device state
+DETACH. lock is not required to protect I/O count
+
+Signed-off-by: Atul Gupta <atul.gupta@chelsio.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chcr_algo.c | 13 +++----------
+ drivers/crypto/chelsio/chcr_core.c | 4 ----
+ 2 files changed, 3 insertions(+), 14 deletions(-)
+
+--- a/drivers/crypto/chelsio/chcr_algo.c
++++ b/drivers/crypto/chelsio/chcr_algo.c
+@@ -200,17 +200,10 @@ void chcr_verify_tag(struct aead_request
+
+ static int chcr_inc_wrcount(struct chcr_dev *dev)
+ {
+- int err = 0;
+-
+- spin_lock_bh(&dev->lock_chcr_dev);
+ if (dev->state == CHCR_DETACH)
+- err = 1;
+- else
+- atomic_inc(&dev->inflight);
+-
+- spin_unlock_bh(&dev->lock_chcr_dev);
+-
+- return err;
++ return 1;
++ atomic_inc(&dev->inflight);
++ return 0;
+ }
+
+ static inline void chcr_dec_wrcount(struct chcr_dev *dev)
+--- a/drivers/crypto/chelsio/chcr_core.c
++++ b/drivers/crypto/chelsio/chcr_core.c
+@@ -243,15 +243,11 @@ static void chcr_detach_device(struct ul
+ {
+ struct chcr_dev *dev = &u_ctx->dev;
+
+- spin_lock_bh(&dev->lock_chcr_dev);
+ if (dev->state == CHCR_DETACH) {
+- spin_unlock_bh(&dev->lock_chcr_dev);
+ pr_debug("Detached Event received for already detach device\n");
+ return;
+ }
+ dev->state = CHCR_DETACH;
+- spin_unlock_bh(&dev->lock_chcr_dev);
+-
+ if (atomic_read(&dev->inflight) != 0) {
+ schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
+ wait_for_completion(&dev->detach_comp);
diff --git a/patches.drivers/crypto-chelsio-Fix-wrong-error-counter-increments.patch b/patches.drivers/crypto-chelsio-Fix-wrong-error-counter-increments.patch
new file mode 100644
index 0000000000..538ba77ef1
--- /dev/null
+++ b/patches.drivers/crypto-chelsio-Fix-wrong-error-counter-increments.patch
@@ -0,0 +1,82 @@
+From: Harsh Jain <harsh@chelsio.com>
+Date: Tue, 11 Dec 2018 16:21:42 +0530
+Subject: crypto: chelsio - Fix wrong error counter increments
+Patch-mainline: v5.0-rc1
+Git-commit: f31ba0f95f1998118098978dbfb25ecbec6b0891
+References: bsc#1136353 jsc#SLE-4688
+
+Fix error counter increment in AEAD decrypt operation when
+validation of tag is done in Driver instead of H/W.
+
+Signed-off-by: Harsh Jain <harsh@chelsio.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chcr_algo.c | 9 +++++----
+ drivers/crypto/chelsio/chcr_core.c | 11 +++++------
+ 2 files changed, 10 insertions(+), 10 deletions(-)
+
+--- a/drivers/crypto/chelsio/chcr_algo.c
++++ b/drivers/crypto/chelsio/chcr_algo.c
+@@ -218,7 +218,7 @@ static inline void chcr_dec_wrcount(stru
+ atomic_dec(&dev->inflight);
+ }
+
+-static inline void chcr_handle_aead_resp(struct aead_request *req,
++static inline int chcr_handle_aead_resp(struct aead_request *req,
+ unsigned char *input,
+ int err)
+ {
+@@ -233,6 +233,8 @@ static inline void chcr_handle_aead_resp
+ }
+ chcr_dec_wrcount(dev);
+ req->base.complete(&req->base, err);
++
++ return err;
+ }
+
+ static void get_aes_decrypt_key(unsigned char *dec_key,
+@@ -2072,14 +2074,13 @@ int chcr_handle_resp(struct crypto_async
+
+ switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AEAD:
+- chcr_handle_aead_resp(aead_request_cast(req), input, err);
++ err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
+ break;
+
+ case CRYPTO_ALG_TYPE_ABLKCIPHER:
+- err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
++ chcr_handle_cipher_resp(ablkcipher_request_cast(req),
+ input, err);
+ break;
+-
+ case CRYPTO_ALG_TYPE_AHASH:
+ chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
+ }
+--- a/drivers/crypto/chelsio/chcr_core.c
++++ b/drivers/crypto/chelsio/chcr_core.c
+@@ -169,12 +169,8 @@ static int cpl_fw6_pld_handler(struct ch
+
+ ack_err_status =
+ ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4));
+- if (ack_err_status) {
+- if (CHK_MAC_ERR_BIT(ack_err_status) ||
+- CHK_PAD_ERR_BIT(ack_err_status))
+- error_status = -EBADMSG;
+- atomic_inc(&adap->chcr_stats.error);
+- }
++ if (CHK_MAC_ERR_BIT(ack_err_status) || CHK_PAD_ERR_BIT(ack_err_status))
++ error_status = -EBADMSG;
+ /* call completion callback with failure status */
+ if (req) {
+ error_status = chcr_handle_resp(req, input, error_status);
+@@ -182,6 +178,9 @@ static int cpl_fw6_pld_handler(struct ch
+ pr_err("Incorrect request address from the firmware\n");
+ return -EFAULT;
+ }
++ if (error_status)
++ atomic_inc(&adap->chcr_stats.error);
++
+ return 0;
+ }
+
diff --git a/patches.drivers/crypto-chelsio-Fixed-Traffic-Stall.patch b/patches.drivers/crypto-chelsio-Fixed-Traffic-Stall.patch
new file mode 100644
index 0000000000..253b6fea43
--- /dev/null
+++ b/patches.drivers/crypto-chelsio-Fixed-Traffic-Stall.patch
@@ -0,0 +1,132 @@
+From: Atul Gupta <atul.gupta@chelsio.com>
+Date: Mon, 18 Feb 2019 02:04:37 -0800
+Subject: crypto: chelsio - Fixed Traffic Stall
+Patch-mainline: v5.1-rc1
+Git-commit: 8cd9d183731a8b54e7ca40de1c72e3c6bec40113
+References: bsc#1136353 jsc#SLE-4688
+
+Fixed Traffic Stall caused by
+- Subcommands except last should have more bit set
+- For esn case subcommand is required for linear skb only
+- Also Optimized is_eth_imm usage
+
+Signed-off-by: Atul Gupta <atul.gupta@chelsio.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chcr_ipsec.c | 33 ++++++++++++++++++++-------------
+ 1 file changed, 20 insertions(+), 13 deletions(-)
+
+--- a/drivers/crypto/chelsio/chcr_ipsec.c
++++ b/drivers/crypto/chelsio/chcr_ipsec.c
+@@ -336,7 +336,8 @@ static inline int is_eth_imm(const struc
+ }
+
+ static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
+- struct ipsec_sa_entry *sa_entry)
++ struct ipsec_sa_entry *sa_entry,
++ bool *immediate)
+ {
+ unsigned int kctx_len;
+ unsigned int flits;
+@@ -354,8 +355,10 @@ static inline unsigned int calc_tx_sec_f
+ * TX Packet header plus the skb data in the Work Request.
+ */
+
+- if (hdrlen)
++ if (hdrlen) {
++ *immediate = true;
+ return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
++ }
+
+ flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
+
+@@ -418,7 +421,7 @@ inline void *copy_esn_pktxt(struct sk_bu
+ iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
+ memcpy(aadiv->iv, iv, 8);
+
+- if (is_eth_imm(skb, sa_entry)) {
++ if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) {
+ sc_imm = (struct ulptx_idata *)(pos +
+ (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
+ sizeof(__be64)) << 3));
+@@ -531,15 +534,18 @@ inline void *chcr_crypto_wreq(struct sk_
+ struct adapter *adap = pi->adapter;
+ unsigned int ivsize = GCM_ESP_IV_SIZE;
+ struct chcr_ipsec_wr *wr;
++ bool immediate = false;
+ u16 immdatalen = 0;
+ unsigned int flits;
+ u32 ivinoffset;
+ u32 aadstart;
+ u32 aadstop;
+ u32 ciphstart;
++ u16 sc_more = 0;
+ u32 ivdrop = 0;
+ u32 esnlen = 0;
+ u32 wr_mid;
++ u16 ndesc;
+ int qidx = skb_get_queue_mapping(skb);
+ struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
+ unsigned int kctx_len = sa_entry->kctx_len;
+@@ -547,20 +553,24 @@ inline void *chcr_crypto_wreq(struct sk_
+
+ atomic_inc(&adap->chcr_stats.ipsec_cnt);
+
+- flits = calc_tx_sec_flits(skb, sa_entry);
++ flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
++ ndesc = DIV_ROUND_UP(flits, 2);
+ if (sa_entry->esn)
+ ivdrop = 1;
+
+- if (is_eth_imm(skb, sa_entry))
++ if (immediate)
+ immdatalen = skb->len;
+
+- if (sa_entry->esn)
++ if (sa_entry->esn) {
+ esnlen = sizeof(struct chcr_ipsec_aadiv);
++ if (!skb_is_nonlinear(skb))
++ sc_more = 1;
++ }
+
+ /* WR Header */
+ wr = (struct chcr_ipsec_wr *)pos;
+ wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
+- wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
++ wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc);
+
+ if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+ netif_tx_stop_queue(q->txq);
+@@ -572,10 +582,10 @@ inline void *chcr_crypto_wreq(struct sk_
+
+ /* ULPTX */
+ wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
+- wr->req.ulptx.len = htonl(DIV_ROUND_UP(flits, 2) - 1);
++ wr->req.ulptx.len = htonl(ndesc - 1);
+
+ /* Sub-command */
+- wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen);
++ wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more);
+ wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
+ sizeof(wr->req.key_ctx) +
+ kctx_len +
+@@ -696,7 +706,7 @@ out_free: dev_kfree_skb_any(skb);
+
+ cxgb4_reclaim_completed_tx(adap, &q->q, true);
+
+- flits = calc_tx_sec_flits(skb, sa_entry);
++ flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
+ ndesc = flits_to_desc(flits);
+ credits = txq_avail(&q->q) - ndesc;
+
+@@ -709,9 +719,6 @@ out_free: dev_kfree_skb_any(skb);
+ return NETDEV_TX_BUSY;
+ }
+
+- if (is_eth_imm(skb, sa_entry))
+- immediate = true;
+-
+ if (!immediate &&
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ q->mapping_err++;
diff --git a/patches.drivers/crypto-chelsio-Handle-PCI-shutdown-event.patch b/patches.drivers/crypto-chelsio-Handle-PCI-shutdown-event.patch
new file mode 100644
index 0000000000..16950f965c
--- /dev/null
+++ b/patches.drivers/crypto-chelsio-Handle-PCI-shutdown-event.patch
@@ -0,0 +1,761 @@
+From: Harsh Jain <harsh@chelsio.com>
+Date: Tue, 11 Dec 2018 16:21:40 +0530
+Subject: crypto: chelsio - Handle PCI shutdown event
+Patch-mainline: v5.0-rc1
+Git-commit: fef4912b66d6246d958d97382d20d0dd23bcf0bc
+References: bsc#1136353 jsc#SLE-4688
+
+chcr receives "CXGB4_STATE_DETACH" event on PCI Shutdown.
+Wait for processing of inflight request and Mark the device unavailable.
+
+Signed-off-by: Harsh Jain <harsh@chelsio.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chcr_algo.c | 157 ++++++++++++++++++++++++++-----
+ drivers/crypto/chelsio/chcr_core.c | 184 ++++++++++++++++++++++++-------------
+ drivers/crypto/chelsio/chcr_core.h | 34 +++++-
+ 3 files changed, 280 insertions(+), 95 deletions(-)
+
+--- a/drivers/crypto/chelsio/chcr_algo.c
++++ b/drivers/crypto/chelsio/chcr_algo.c
+@@ -123,7 +123,7 @@ static inline struct chcr_authenc_ctx *A
+
+ static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
+ {
+- return ctx->dev->u_ctx;
++ return container_of(ctx->dev, struct uld_ctx, dev);
+ }
+
+ static inline int is_ofld_imm(const struct sk_buff *skb)
+@@ -198,17 +198,40 @@ void chcr_verify_tag(struct aead_request
+ *err = 0;
+ }
+
++static int chcr_inc_wrcount(struct chcr_dev *dev)
++{
++ int err = 0;
++
++ spin_lock_bh(&dev->lock_chcr_dev);
++ if (dev->state == CHCR_DETACH)
++ err = 1;
++ else
++ atomic_inc(&dev->inflight);
++
++ spin_unlock_bh(&dev->lock_chcr_dev);
++
++ return err;
++}
++
++static inline void chcr_dec_wrcount(struct chcr_dev *dev)
++{
++ atomic_dec(&dev->inflight);
++}
++
+ static inline void chcr_handle_aead_resp(struct aead_request *req,
+ unsigned char *input,
+ int err)
+ {
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
++ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
++ struct chcr_dev *dev = a_ctx(tfm)->dev;
+
+ chcr_aead_common_exit(req);
+ if (reqctx->verify == VERIFY_SW) {
+ chcr_verify_tag(req, input, &err);
+ reqctx->verify = VERIFY_HW;
+ }
++ chcr_dec_wrcount(dev);
+ req->base.complete(&req->base, err);
+ }
+
+@@ -1100,6 +1123,7 @@ static int chcr_handle_cipher_resp(struc
+ struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
+ struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
+ struct cipher_wr_param wrparam;
++ struct chcr_dev *dev = c_ctx(tfm)->dev;
+ int bytes;
+
+ if (err)
+@@ -1161,6 +1185,7 @@ static int chcr_handle_cipher_resp(struc
+ unmap:
+ chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
+ complete:
++ chcr_dec_wrcount(dev);
+ req->base.complete(&req->base, err);
+ return err;
+ }
+@@ -1187,7 +1212,10 @@ static int process_cipher(struct ablkcip
+ ablkctx->enckey_len, req->nbytes, ivsize);
+ goto error;
+ }
+- chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
++
++ err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
++ if (err)
++ goto error;
+ if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
+ AES_MIN_KEY_SIZE +
+ sizeof(struct cpl_rx_phys_dsgl) +
+@@ -1276,15 +1304,21 @@ error:
+ static int chcr_aes_encrypt(struct ablkcipher_request *req)
+ {
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
++ struct chcr_dev *dev = c_ctx(tfm)->dev;
+ struct sk_buff *skb = NULL;
+ int err, isfull = 0;
+ struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
+
++ err = chcr_inc_wrcount(dev);
++ if (err)
++ return -ENXIO;
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ c_ctx(tfm)->tx_qidx))) {
+ isfull = 1;
+- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+- return -ENOSPC;
++ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
++ err = -ENOSPC;
++ goto error;
++ }
+ }
+
+ err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+@@ -1295,15 +1329,23 @@ static int chcr_aes_encrypt(struct ablkc
+ set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
+ chcr_send_wr(skb);
+ return isfull ? -EBUSY : -EINPROGRESS;
++error:
++ chcr_dec_wrcount(dev);
++ return err;
+ }
+
+ static int chcr_aes_decrypt(struct ablkcipher_request *req)
+ {
+ struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
++ struct chcr_dev *dev = c_ctx(tfm)->dev;
+ struct sk_buff *skb = NULL;
+ int err, isfull = 0;
+
++ err = chcr_inc_wrcount(dev);
++ if (err)
++ return -ENXIO;
++
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ c_ctx(tfm)->tx_qidx))) {
+ isfull = 1;
+@@ -1333,10 +1375,11 @@ static int chcr_device_init(struct chcr_
+ if (!ctx->dev) {
+ u_ctx = assign_chcr_device();
+ if (!u_ctx) {
++ err = -ENXIO;
+ pr_err("chcr device assignment fails\n");
+ goto out;
+ }
+- ctx->dev = u_ctx->dev;
++ ctx->dev = &u_ctx->dev;
+ adap = padap(ctx->dev);
+ ntxq = u_ctx->lldi.ntxq;
+ rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
+@@ -1561,6 +1604,7 @@ static int chcr_ahash_update(struct ahas
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
+ struct uld_ctx *u_ctx = NULL;
++ struct chcr_dev *dev = h_ctx(rtfm)->dev;
+ struct sk_buff *skb;
+ u8 remainder = 0, bs;
+ unsigned int nbytes = req->nbytes;
+@@ -1569,12 +1613,6 @@ static int chcr_ahash_update(struct ahas
+
+ bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+ u_ctx = ULD_CTX(h_ctx(rtfm));
+- if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+- h_ctx(rtfm)->tx_qidx))) {
+- isfull = 1;
+- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+- return -ENOSPC;
+- }
+
+ if (nbytes + req_ctx->reqlen >= bs) {
+ remainder = (nbytes + req_ctx->reqlen) % bs;
+@@ -1585,10 +1623,27 @@ static int chcr_ahash_update(struct ahas
+ req_ctx->reqlen += nbytes;
+ return 0;
+ }
++ error = chcr_inc_wrcount(dev);
++ if (error)
++ return -ENXIO;
++ /* Detach state for CHCR means lldi or padap is freed. Increasing
++ * inflight count for dev guarantees that lldi and padap is valid
++ */
++ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
++ h_ctx(rtfm)->tx_qidx))) {
++ isfull = 1;
++ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
++ error = -ENOSPC;
++ goto err;
++ }
++ }
++
+ chcr_init_hctx_per_wr(req_ctx);
+ error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+- if (error)
+- return -ENOMEM;
++ if (error) {
++ error = -ENOMEM;
++ goto err;
++ }
+ get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+ params.kctx_len = roundup(params.alg_prm.result_size, 16);
+ params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
+@@ -1628,6 +1683,8 @@ static int chcr_ahash_update(struct ahas
+ return isfull ? -EBUSY : -EINPROGRESS;
+ unmap:
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
++err:
++ chcr_dec_wrcount(dev);
+ return error;
+ }
+
+@@ -1645,10 +1702,16 @@ static int chcr_ahash_final(struct ahash
+ {
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
++ struct chcr_dev *dev = h_ctx(rtfm)->dev;
+ struct hash_wr_param params;
+ struct sk_buff *skb;
+ struct uld_ctx *u_ctx = NULL;
+ u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
++ int error = -EINVAL;
++
++ error = chcr_inc_wrcount(dev);
++ if (error)
++ return -ENXIO;
+
+ chcr_init_hctx_per_wr(req_ctx);
+ u_ctx = ULD_CTX(h_ctx(rtfm));
+@@ -1685,19 +1748,25 @@ static int chcr_ahash_final(struct ahash
+ }
+ params.hash_size = crypto_ahash_digestsize(rtfm);
+ skb = create_hash_wr(req, &params);
+- if (IS_ERR(skb))
+- return PTR_ERR(skb);
++ if (IS_ERR(skb)) {
++ error = PTR_ERR(skb);
++ goto err;
++ }
+ req_ctx->reqlen = 0;
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
+ chcr_send_wr(skb);
+ return -EINPROGRESS;
++err:
++ chcr_dec_wrcount(dev);
++ return error;
+ }
+
+ static int chcr_ahash_finup(struct ahash_request *req)
+ {
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
++ struct chcr_dev *dev = h_ctx(rtfm)->dev;
+ struct uld_ctx *u_ctx = NULL;
+ struct sk_buff *skb;
+ struct hash_wr_param params;
+@@ -1706,17 +1775,24 @@ static int chcr_ahash_finup(struct ahash
+
+ bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
+ u_ctx = ULD_CTX(h_ctx(rtfm));
++ error = chcr_inc_wrcount(dev);
++ if (error)
++ return -ENXIO;
+
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ h_ctx(rtfm)->tx_qidx))) {
+ isfull = 1;
+- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+- return -ENOSPC;
++ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
++ error = -ENOSPC;
++ goto err;
++ }
+ }
+ chcr_init_hctx_per_wr(req_ctx);
+ error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+- if (error)
+- return -ENOMEM;
++ if (error) {
++ error = -ENOMEM;
++ goto err;
++ }
+
+ get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+ params.kctx_len = roundup(params.alg_prm.result_size, 16);
+@@ -1773,6 +1849,8 @@ static int chcr_ahash_finup(struct ahash
+ return isfull ? -EBUSY : -EINPROGRESS;
+ unmap:
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
++err:
++ chcr_dec_wrcount(dev);
+ return error;
+ }
+
+@@ -1780,6 +1858,7 @@ static int chcr_ahash_digest(struct ahas
+ {
+ struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
+ struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
++ struct chcr_dev *dev = h_ctx(rtfm)->dev;
+ struct uld_ctx *u_ctx = NULL;
+ struct sk_buff *skb;
+ struct hash_wr_param params;
+@@ -1788,19 +1867,26 @@ static int chcr_ahash_digest(struct ahas
+
+ rtfm->init(req);
+ bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
++ error = chcr_inc_wrcount(dev);
++ if (error)
++ return -ENXIO;
+
+ u_ctx = ULD_CTX(h_ctx(rtfm));
+ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ h_ctx(rtfm)->tx_qidx))) {
+ isfull = 1;
+- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+- return -ENOSPC;
++ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
++ error = -ENOSPC;
++ goto err;
++ }
+ }
+
+ chcr_init_hctx_per_wr(req_ctx);
+ error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
+- if (error)
+- return -ENOMEM;
++ if (error) {
++ error = -ENOMEM;
++ goto err;
++ }
+
+ get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
+ params.kctx_len = roundup(params.alg_prm.result_size, 16);
+@@ -1853,6 +1939,8 @@ static int chcr_ahash_digest(struct ahas
+ return isfull ? -EBUSY : -EINPROGRESS;
+ unmap:
+ chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
++err:
++ chcr_dec_wrcount(dev);
+ return error;
+ }
+
+@@ -1924,6 +2012,7 @@ static inline void chcr_handle_ahash_res
+ int digestsize, updated_digestsize;
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
++ struct chcr_dev *dev = h_ctx(tfm)->dev;
+
+ if (input == NULL)
+ goto out;
+@@ -1966,6 +2055,7 @@ unmap:
+
+
+ out:
++ chcr_dec_wrcount(dev);
+ req->base.complete(&req->base, err);
+ }
+
+@@ -3553,27 +3643,42 @@ static int chcr_aead_op(struct aead_requ
+ create_wr_t create_wr_fn)
+ {
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
++ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+ struct uld_ctx *u_ctx;
+ struct sk_buff *skb;
+ int isfull = 0;
++ struct chcr_dev *cdev;
+
+- if (!a_ctx(tfm)->dev) {
++ cdev = a_ctx(tfm)->dev;
++ if (!cdev) {
+ pr_err("chcr : %s : No crypto device.\n", __func__);
+ return -ENXIO;
+ }
++
++ if (chcr_inc_wrcount(cdev)) {
++ /* Detach state for CHCR means lldi or padap is freed.
++ * We cannot increment fallback here.
++ */
++ return chcr_aead_fallback(req, reqctx->op);
++ }
++
+ u_ctx = ULD_CTX(a_ctx(tfm));
+ if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
+ a_ctx(tfm)->tx_qidx)) {
+ isfull = 1;
+- if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
++ if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
++ chcr_dec_wrcount(cdev);
+ return -ENOSPC;
++ }
+ }
+
+ /* Form a WR from req */
+ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
+
+- if (IS_ERR(skb) || !skb)
++ if (IS_ERR(skb) || !skb) {
++ chcr_dec_wrcount(cdev);
+ return PTR_ERR(skb);
++ }
+
+ skb->dev = u_ctx->lldi.ports[0];
+ set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
+--- a/drivers/crypto/chelsio/chcr_core.c
++++ b/drivers/crypto/chelsio/chcr_core.c
+@@ -26,10 +26,7 @@
+ #include "chcr_core.h"
+ #include "cxgb4_uld.h"
+
+-static LIST_HEAD(uld_ctx_list);
+-static DEFINE_MUTEX(dev_mutex);
+-static atomic_t dev_count;
+-static struct uld_ctx *ctx_rr;
++static struct chcr_driver_data drv_data;
+
+ typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input);
+ static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input);
+@@ -53,6 +50,29 @@ static struct cxgb4_uld_info chcr_uld_in
+ #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+ };
+
++static void detach_work_fn(struct work_struct *work)
++{
++ struct chcr_dev *dev;
++
++ dev = container_of(work, struct chcr_dev, detach_work.work);
++
++ if (atomic_read(&dev->inflight)) {
++ dev->wqretry--;
++ if (dev->wqretry) {
++ pr_debug("Request Inflight Count %d\n",
++ atomic_read(&dev->inflight));
++
++ schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
++ } else {
++ WARN(1, "CHCR:%d request Still Pending\n",
++ atomic_read(&dev->inflight));
++ complete(&dev->detach_comp);
++ }
++ } else {
++ complete(&dev->detach_comp);
++ }
++}
++
+ struct uld_ctx *assign_chcr_device(void)
+ {
+ struct uld_ctx *u_ctx = NULL;
+@@ -63,56 +83,70 @@ struct uld_ctx *assign_chcr_device(void)
+ * Although One session must use the same device to
+ * maintain request-response ordering.
+ */
+- mutex_lock(&dev_mutex);
+- if (!list_empty(&uld_ctx_list)) {
+- u_ctx = ctx_rr;
+- if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
+- ctx_rr = list_first_entry(&uld_ctx_list,
+- struct uld_ctx,
+- entry);
++ mutex_lock(&drv_data.drv_mutex);
++ if (!list_empty(&drv_data.act_dev)) {
++ u_ctx = drv_data.last_dev;
++ if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
++ drv_data.last_dev = list_first_entry(&drv_data.act_dev,
++ struct uld_ctx, entry);
+ else
+- ctx_rr = list_next_entry(ctx_rr, entry);
++ drv_data.last_dev =
++ list_next_entry(drv_data.last_dev, entry);
+ }
+- mutex_unlock(&dev_mutex);
++ mutex_unlock(&drv_data.drv_mutex);
+ return u_ctx;
+ }
+
+-static int chcr_dev_add(struct uld_ctx *u_ctx)
++static void chcr_dev_add(struct uld_ctx *u_ctx)
+ {
+ struct chcr_dev *dev;
+
+- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+- if (!dev)
+- return -ENXIO;
+-
+- spin_lock_init(&dev->lock_chcr_dev);
+- u_ctx->dev = dev;
+- dev->u_ctx = u_ctx;
+- atomic_inc(&dev_count);
+- mutex_lock(&dev_mutex);
+- list_add_tail(&u_ctx->entry, &uld_ctx_list);
+- if (!ctx_rr)
+- ctx_rr = u_ctx;
+- mutex_unlock(&dev_mutex);
+- return 0;
++ dev = &u_ctx->dev;
++ dev->state = CHCR_ATTACH;
++ atomic_set(&dev->inflight, 0);
++ mutex_lock(&drv_data.drv_mutex);
++ list_move(&u_ctx->entry, &drv_data.act_dev);
++ if (!drv_data.last_dev)
++ drv_data.last_dev = u_ctx;
++ mutex_unlock(&drv_data.drv_mutex);
+ }
+
+-static int chcr_dev_remove(struct uld_ctx *u_ctx)
++static void chcr_dev_init(struct uld_ctx *u_ctx)
+ {
+- if (ctx_rr == u_ctx) {
+- if (list_is_last(&ctx_rr->entry, &uld_ctx_list))
+- ctx_rr = list_first_entry(&uld_ctx_list,
+- struct uld_ctx,
+- entry);
++ struct chcr_dev *dev;
++
++ dev = &u_ctx->dev;
++ spin_lock_init(&dev->lock_chcr_dev);
++ INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn);
++ init_completion(&dev->detach_comp);
++ dev->state = CHCR_INIT;
++ dev->wqretry = WQ_RETRY;
++ atomic_inc(&drv_data.dev_count);
++ atomic_set(&dev->inflight, 0);
++ mutex_lock(&drv_data.drv_mutex);
++ list_add_tail(&u_ctx->entry, &drv_data.inact_dev);
++ if (!drv_data.last_dev)
++ drv_data.last_dev = u_ctx;
++ mutex_unlock(&drv_data.drv_mutex);
++}
++
++static int chcr_dev_move(struct uld_ctx *u_ctx)
++{
++ mutex_lock(&drv_data.drv_mutex);
++ if (drv_data.last_dev == u_ctx) {
++ if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
++ drv_data.last_dev = list_first_entry(&drv_data.act_dev,
++ struct uld_ctx, entry);
+ else
+- ctx_rr = list_next_entry(ctx_rr, entry);
++ drv_data.last_dev =
++ list_next_entry(drv_data.last_dev, entry);
+ }
+- list_del(&u_ctx->entry);
+- if (list_empty(&uld_ctx_list))
+- ctx_rr = NULL;
+- kfree(u_ctx->dev);
+- u_ctx->dev = NULL;
+- atomic_dec(&dev_count);
++ list_move(&u_ctx->entry, &drv_data.inact_dev);
++ if (list_empty(&drv_data.act_dev))
++ drv_data.last_dev = NULL;
++ atomic_dec(&drv_data.dev_count);
++ mutex_unlock(&drv_data.drv_mutex);
++
+ return 0;
+ }
+
+@@ -167,6 +201,7 @@ static void *chcr_uld_add(const struct c
+ goto out;
+ }
+ u_ctx->lldi = *lld;
++ chcr_dev_init(u_ctx);
+ #ifdef CONFIG_CHELSIO_IPSEC_INLINE
+ if (lld->crypto & ULP_CRYPTO_IPSEC_INLINE)
+ chcr_add_xfrmops(lld);
+@@ -179,7 +214,7 @@ int chcr_uld_rx_handler(void *handle, co
+ const struct pkt_gl *pgl)
+ {
+ struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
+- struct chcr_dev *dev = u_ctx->dev;
++ struct chcr_dev *dev = &u_ctx->dev;
+ const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp;
+
+ if (rpl->opcode != CPL_FW6_PLD) {
+@@ -201,6 +236,28 @@ int chcr_uld_tx_handler(struct sk_buff *
+ }
+ #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
++static void chcr_detach_device(struct uld_ctx *u_ctx)
++{
++ struct chcr_dev *dev = &u_ctx->dev;
++
++ spin_lock_bh(&dev->lock_chcr_dev);
++ if (dev->state == CHCR_DETACH) {
++ spin_unlock_bh(&dev->lock_chcr_dev);
++ pr_debug("Detached Event received for already detach device\n");
++ return;
++ }
++ dev->state = CHCR_DETACH;
++ spin_unlock_bh(&dev->lock_chcr_dev);
++
++ if (atomic_read(&dev->inflight) != 0) {
++ schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
++ wait_for_completion(&dev->detach_comp);
++ }
++
++ // Move u_ctx to inactive_dev list
++ chcr_dev_move(u_ctx);
++}
++
+ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
+ {
+ struct uld_ctx *u_ctx = handle;
+@@ -208,23 +265,16 @@ static int chcr_uld_state_change(void *h
+
+ switch (state) {
+ case CXGB4_STATE_UP:
+- if (!u_ctx->dev) {
+- ret = chcr_dev_add(u_ctx);
+- if (ret != 0)
+- return ret;
++ if (u_ctx->dev.state != CHCR_INIT) {
++ // ALready Initialised.
++ return 0;
+ }
+- if (atomic_read(&dev_count) == 1)
+- ret = start_crypto();
++ chcr_dev_add(u_ctx);
++ ret = start_crypto();
+ break;
+
+ case CXGB4_STATE_DETACH:
+- if (u_ctx->dev) {
+- mutex_lock(&dev_mutex);
+- chcr_dev_remove(u_ctx);
+- mutex_unlock(&dev_mutex);
+- }
+- if (!atomic_read(&dev_count))
+- stop_crypto();
++ chcr_detach_device(u_ctx);
+ break;
+
+ case CXGB4_STATE_START_RECOVERY:
+@@ -237,7 +287,13 @@ static int chcr_uld_state_change(void *h
+
+ static int __init chcr_crypto_init(void)
+ {
++ INIT_LIST_HEAD(&drv_data.act_dev);
++ INIT_LIST_HEAD(&drv_data.inact_dev);
++ atomic_set(&drv_data.dev_count, 0);
++ mutex_init(&drv_data.drv_mutex);
++ drv_data.last_dev = NULL;
+ cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info);
++
+ return 0;
+ }
+
+@@ -245,18 +301,20 @@ static void __exit chcr_crypto_exit(void
+ {
+ struct uld_ctx *u_ctx, *tmp;
+
+- if (atomic_read(&dev_count))
+- stop_crypto();
++ stop_crypto();
+
++ cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
+ /* Remove all devices from list */
+- mutex_lock(&dev_mutex);
+- list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
+- if (u_ctx->dev)
+- chcr_dev_remove(u_ctx);
++ mutex_lock(&drv_data.drv_mutex);
++ list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
++ list_del(&u_ctx->entry);
+ kfree(u_ctx);
+ }
+- mutex_unlock(&dev_mutex);
+- cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
++ list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
++ list_del(&u_ctx->entry);
++ kfree(u_ctx);
++ }
++ mutex_unlock(&drv_data.drv_mutex);
+ }
+
+ module_init(chcr_crypto_init);
+--- a/drivers/crypto/chelsio/chcr_core.h
++++ b/drivers/crypto/chelsio/chcr_core.h
+@@ -47,7 +47,7 @@
+
+ #define MAX_PENDING_REQ_TO_HW 20
+ #define CHCR_TEST_RESPONSE_TIMEOUT 1000
+-
++#define WQ_DETACH_TM (msecs_to_jiffies(50))
+ #define PAD_ERROR_BIT 1
+ #define CHK_PAD_ERR_BIT(x) (((x) >> PAD_ERROR_BIT) & 1)
+
+@@ -61,9 +61,6 @@
+ #define HASH_WR_MIN_LEN (sizeof(struct chcr_wr) + \
+ DUMMY_BYTES + \
+ sizeof(struct ulptx_sgl))
+-
+-#define padap(dev) pci_get_drvdata(dev->u_ctx->lldi.pdev)
+-
+ struct uld_ctx;
+
+ struct _key_ctx {
+@@ -121,6 +118,20 @@ struct _key_ctx {
+ #define KEYCTX_TX_WR_AUTHIN_G(x) \
+ (((x) >> KEYCTX_TX_WR_AUTHIN_S) & KEYCTX_TX_WR_AUTHIN_M)
+
++#define WQ_RETRY 5
++struct chcr_driver_data {
++ struct list_head act_dev;
++ struct list_head inact_dev;
++ atomic_t dev_count;
++ struct mutex drv_mutex;
++ struct uld_ctx *last_dev;
++};
++
++enum chcr_state {
++ CHCR_INIT = 0,
++ CHCR_ATTACH,
++ CHCR_DETACH,
++};
+ struct chcr_wr {
+ struct fw_crypto_lookaside_wr wreq;
+ struct ulp_txpkt ulptx;
+@@ -131,14 +142,18 @@ struct chcr_wr {
+
+ struct chcr_dev {
+ spinlock_t lock_chcr_dev;
+- struct uld_ctx *u_ctx;
++ enum chcr_state state;
++ atomic_t inflight;
++ int wqretry;
++ struct delayed_work detach_work;
++ struct completion detach_comp;
+ unsigned char tx_channel_id;
+ };
+
+ struct uld_ctx {
+ struct list_head entry;
+ struct cxgb4_lld_info lldi;
+- struct chcr_dev *dev;
++ struct chcr_dev dev;
+ };
+
+ struct sge_opaque_hdr {
+@@ -189,6 +204,13 @@ static inline unsigned int sgl_len(unsig
+ return (3 * n) / 2 + (n & 1) + 2;
+ }
+
++static inline void *padap(struct chcr_dev *dev)
++{
++ struct uld_ctx *u_ctx = container_of(dev, struct uld_ctx, dev);
++
++ return pci_get_drvdata(u_ctx->lldi.pdev);
++}
++
+ struct uld_ctx *assign_chcr_device(void);
+ int chcr_send_wr(struct sk_buff *skb);
+ int start_crypto(void);
diff --git a/patches.drivers/crypto-chelsio-Inline-single-pdu-only.patch b/patches.drivers/crypto-chelsio-Inline-single-pdu-only.patch
new file mode 100644
index 0000000000..430a635b03
--- /dev/null
+++ b/patches.drivers/crypto-chelsio-Inline-single-pdu-only.patch
@@ -0,0 +1,28 @@
+From: Atul Gupta <atul.gupta@chelsio.com>
+Date: Thu, 17 Jan 2019 09:19:19 -0800
+Subject: crypto: chelsio - Inline single pdu only
+Patch-mainline: v5.1-rc1
+Git-commit: 27c6feb0fb33a665a746346e76714826a5be5d10
+References: bsc#1136353 jsc#SLE-4688
+
+Inline single pdu else take co-pro path
+
+Signed-off-by: Atul Gupta <atul.gupta@chelsio.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chcr_ipsec.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/crypto/chelsio/chcr_ipsec.c
++++ b/drivers/crypto/chelsio/chcr_ipsec.c
+@@ -303,6 +303,9 @@ static bool chcr_ipsec_offload_ok(struct
+ if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
+ return false;
+ }
++ /* Inline single pdu */
++ if (skb_shinfo(skb)->gso_size)
++ return false;
+ return true;
+ }
+
diff --git a/patches.drivers/crypto-chelsio-Reset-counters-on-cxgb4-Detach.patch b/patches.drivers/crypto-chelsio-Reset-counters-on-cxgb4-Detach.patch
new file mode 100644
index 0000000000..3909bb9dc6
--- /dev/null
+++ b/patches.drivers/crypto-chelsio-Reset-counters-on-cxgb4-Detach.patch
@@ -0,0 +1,36 @@
+From: Harsh Jain <harsh@chelsio.com>
+Date: Tue, 11 Dec 2018 16:21:41 +0530
+Subject: crypto: chelsio - Reset counters on cxgb4 Detach
+Patch-mainline: v5.0-rc1
+Git-commit: 6501ab5ed4d925cce4c2a1c49b63583c42e65bd8
+References: bsc#1136353 jsc#SLE-4688
+
+Reset the counters on receiving detach from Cxgb4.
+
+Signed-off-by: Atul Gupta <atul.gupta@chelsio.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chcr_core.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/crypto/chelsio/chcr_core.c
++++ b/drivers/crypto/chelsio/chcr_core.c
+@@ -132,6 +132,8 @@ static void chcr_dev_init(struct uld_ctx
+
+ static int chcr_dev_move(struct uld_ctx *u_ctx)
+ {
++ struct adapter *adap;
++
+ mutex_lock(&drv_data.drv_mutex);
+ if (drv_data.last_dev == u_ctx) {
+ if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
+@@ -144,6 +146,8 @@ static int chcr_dev_move(struct uld_ctx
+ list_move(&u_ctx->entry, &drv_data.inact_dev);
+ if (list_empty(&drv_data.act_dev))
+ drv_data.last_dev = NULL;
++ adap = padap(&u_ctx->dev);
++ memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
+ atomic_dec(&drv_data.dev_count);
+ mutex_unlock(&drv_data.drv_mutex);
+
diff --git a/patches.drivers/crypto-chelsio-Swap-location-of-AAD-and-IV-sent-in-W.patch b/patches.drivers/crypto-chelsio-Swap-location-of-AAD-and-IV-sent-in-W.patch
new file mode 100644
index 0000000000..854c6d0fe4
--- /dev/null
+++ b/patches.drivers/crypto-chelsio-Swap-location-of-AAD-and-IV-sent-in-W.patch
@@ -0,0 +1,537 @@
+From: Harsh Jain <harsh@chelsio.com>
+Date: Tue, 11 Dec 2018 16:21:37 +0530
+Subject: crypto: chelsio - Swap location of AAD and IV sent in WR
+Patch-mainline: v5.0-rc1
+Git-commit: 1f479e4cfd08f20e48dfde07b27e3180e0901252
+References: bsc#1136353 jsc#SLE-4688
+
+Send input as IV | AAD | Data. It will allow sending IV as Immediate
+Data and Creates space in Work request to add more dma mapped entries.
+
+Signed-off-by: Harsh Jain <harsh@chelsio.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chcr_algo.c | 212 ++++++++++++++++-------------------
+ drivers/crypto/chelsio/chcr_algo.h | 2
+ drivers/crypto/chelsio/chcr_crypto.h | 10 -
+ 3 files changed, 104 insertions(+), 120 deletions(-)
+
+--- a/drivers/crypto/chelsio/chcr_algo.c
++++ b/drivers/crypto/chelsio/chcr_algo.c
+@@ -2215,10 +2215,7 @@ static int chcr_aead_common_init(struct
+ error = -ENOMEM;
+ goto err;
+ }
+- reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
+- CHCR_SRC_SG_SIZE, 0);
+- reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
+- CHCR_SRC_SG_SIZE, req->assoclen);
++
+ return 0;
+ err:
+ return error;
+@@ -2268,10 +2265,10 @@ static struct sk_buff *create_authenc_wr
+ struct ulptx_sgl *ulptx;
+ unsigned int transhdr_len;
+ unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
+- unsigned int kctx_len = 0, dnents;
+- unsigned int assoclen = req->assoclen;
++ unsigned int kctx_len = 0, dnents, snents;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int error = -EINVAL;
++ u8 *ivptr;
+ int null = 0;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+@@ -2288,24 +2285,20 @@ static struct sk_buff *create_authenc_wr
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+ null = 1;
+- assoclen = 0;
+- reqctx->aad_nents = 0;
+ }
+- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+- dnents += sg_nents_xlen(req->dst, req->cryptlen +
+- (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE,
+- req->assoclen);
++ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
++ (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
+ dnents += MIN_AUTH_SG; // For IV
+-
++ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
++ CHCR_SRC_SG_SIZE, 0);
+ dst_size = get_space_for_phys_dsgl(dnents);
+ kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
+ - sizeof(chcr_req->key_ctx);
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
++ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
+ SGE_MAX_WR_LEN;
+- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16)
+- : (sgl_len(reqctx->src_nents + reqctx->aad_nents
+- + MIN_GCM_SG) * 8);
++ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
++ : (sgl_len(snents) * 8);
+ transhdr_len += temp;
+ transhdr_len = roundup(transhdr_len, 16);
+
+@@ -2315,7 +2308,7 @@ static struct sk_buff *create_authenc_wr
+ chcr_aead_common_exit(req);
+ return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
+ }
+- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
++ skb = alloc_skb(transhdr_len, flags);
+ if (!skb) {
+ error = -ENOMEM;
+ goto err;
+@@ -2331,16 +2324,16 @@ static struct sk_buff *create_authenc_wr
+ * to the hardware spec
+ */
+ chcr_req->sec_cpl.op_ivinsrtofst =
+- FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
+- assoclen + 1);
+- chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
++ FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2, 1);
++ chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+- assoclen ? 1 : 0, assoclen,
+- assoclen + IV + 1,
++ null ? 0 : 1 + IV,
++ null ? 0 : IV + req->assoclen,
++ req->assoclen + IV + 1,
+ (temp & 0x1F0) >> 4);
+ chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
+ temp & 0xF,
+- null ? 0 : assoclen + IV + 1,
++ null ? 0 : req->assoclen + IV + 1,
+ temp, temp);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
+@@ -2367,23 +2360,24 @@ static struct sk_buff *create_authenc_wr
+
+ memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
+ actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
++ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
++ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
++ ulptx = (struct ulptx_sgl *)(ivptr + IV);
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
+ subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
+- memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
+- memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
++ memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
++ memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
+ CTR_RFC3686_IV_SIZE);
+- *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
++ *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
+ CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
+ } else {
+- memcpy(reqctx->iv, req->iv, IV);
++ memcpy(ivptr, req->iv, IV);
+ }
+- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
+- chcr_add_aead_src_ent(req, ulptx, assoclen);
++ chcr_add_aead_dst_ent(req, phys_cpl, qid);
++ chcr_add_aead_src_ent(req, ulptx);
+ atomic_inc(&adap->chcr_stats.cipher_rqst);
+- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
+- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
++ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
++ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
+ create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
+ transhdr_len, temp, 0);
+ reqctx->skb = skb;
+@@ -2470,8 +2464,7 @@ void chcr_aead_dma_unmap(struct device *
+ }
+
+ void chcr_add_aead_src_ent(struct aead_request *req,
+- struct ulptx_sgl *ulptx,
+- unsigned int assoclen)
++ struct ulptx_sgl *ulptx)
+ {
+ struct ulptx_walk ulp_walk;
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+@@ -2484,28 +2477,20 @@ void chcr_add_aead_src_ent(struct aead_r
+ buf += reqctx->b0_len;
+ }
+ sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+- buf, assoclen, 0);
+- buf += assoclen;
+- memcpy(buf, reqctx->iv, IV);
+- buf += IV;
+- sg_pcopy_to_buffer(req->src, sg_nents(req->src),
+- buf, req->cryptlen, req->assoclen);
++ buf, req->cryptlen + req->assoclen, 0);
+ } else {
+ ulptx_walk_init(&ulp_walk, ulptx);
+ if (reqctx->b0_len)
+ ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
+ &reqctx->b0_dma);
+- ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
+- ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
+- ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
+- req->assoclen);
++ ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
++ req->assoclen, 0);
+ ulptx_walk_end(&ulp_walk);
+ }
+ }
+
+ void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+- unsigned int assoclen,
+ unsigned short qid)
+ {
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
+@@ -2516,12 +2501,10 @@ void chcr_add_aead_dst_ent(struct aead_r
+ u32 temp;
+
+ dsgl_walk_init(&dsgl_walk, phys_cpl);
+- if (reqctx->b0_len)
+- dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
+- dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
+- dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
+- temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
+- dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
++ dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, &reqctx->iv_dma);
++ temp = req->assoclen + req->cryptlen +
++ (reqctx->op ? -authsize : authsize);
++ dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
+ dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
+ }
+
+@@ -2689,8 +2672,7 @@ static int set_msg_len(u8 *block, unsign
+ return 0;
+ }
+
+-static void generate_b0(struct aead_request *req,
+- struct chcr_aead_ctx *aeadctx,
++static void generate_b0(struct aead_request *req, u8 *ivptr,
+ unsigned short op_type)
+ {
+ unsigned int l, lp, m;
+@@ -2701,7 +2683,7 @@ static void generate_b0(struct aead_requ
+
+ m = crypto_aead_authsize(aead);
+
+- memcpy(b0, reqctx->iv, 16);
++ memcpy(b0, ivptr, 16);
+
+ lp = b0[0];
+ l = lp + 1;
+@@ -2727,29 +2709,31 @@ static inline int crypto_ccm_check_iv(co
+ }
+
+ static int ccm_format_packet(struct aead_request *req,
+- struct chcr_aead_ctx *aeadctx,
++ u8 *ivptr,
+ unsigned int sub_type,
+ unsigned short op_type,
+ unsigned int assoclen)
+ {
+ struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
++ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
++ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+ int rc = 0;
+
+ if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
+- reqctx->iv[0] = 3;
+- memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
+- memcpy(reqctx->iv + 4, req->iv, 8);
+- memset(reqctx->iv + 12, 0, 4);
++ ivptr[0] = 3;
++ memcpy(ivptr + 1, &aeadctx->salt[0], 3);
++ memcpy(ivptr + 4, req->iv, 8);
++ memset(ivptr + 12, 0, 4);
+ } else {
+- memcpy(reqctx->iv, req->iv, 16);
++ memcpy(ivptr, req->iv, 16);
+ }
+ if (assoclen)
+ *((unsigned short *)(reqctx->scratch_pad + 16)) =
+ htons(assoclen);
+
+- generate_b0(req, aeadctx, op_type);
++ generate_b0(req, ivptr, op_type);
+ /* zero the ctr value */
+- memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
++ memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
+ return rc;
+ }
+
+@@ -2775,7 +2759,7 @@ static void fill_sec_cpl_for_aead(struct
+ ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
+
+ auth_offset = req->cryptlen ?
+- (assoclen + IV + 1 + ccm_xtra) : 0;
++ (req->assoclen + IV + 1 + ccm_xtra) : 0;
+ if (op_type == CHCR_DECRYPT_OP) {
+ if (crypto_aead_authsize(tfm) != req->cryptlen)
+ tag_offset = crypto_aead_authsize(tfm);
+@@ -2785,13 +2769,13 @@ static void fill_sec_cpl_for_aead(struct
+
+
+ sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
+- 2, assoclen + 1 + ccm_xtra);
++ 2, 1);
+ sec_cpl->pldlen =
+- htonl(assoclen + IV + req->cryptlen + ccm_xtra);
++ htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
+ /* For CCM there wil be b0 always. So AAD start will be 1 always */
+ sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+- 1, assoclen + ccm_xtra, assoclen
+- + IV + 1 + ccm_xtra, 0);
++ 1 + IV, IV + assoclen + ccm_xtra,
++ req->assoclen + IV + 1 + ccm_xtra, 0);
+
+ sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
+ auth_offset, tag_offset,
+@@ -2838,10 +2822,11 @@ static struct sk_buff *create_aead_ccm_w
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct ulptx_sgl *ulptx;
+ unsigned int transhdr_len;
+- unsigned int dst_size = 0, kctx_len, dnents, temp;
++ unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
+ unsigned int sub_type, assoclen = req->assoclen;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int error = -EINVAL;
++ u8 *ivptr;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+ struct adapter *adap = padap(a_ctx(tfm)->dev);
+@@ -2857,37 +2842,38 @@ static struct sk_buff *create_aead_ccm_w
+ error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
+ if (error)
+ goto err;
+- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+- dnents += sg_nents_xlen(req->dst, req->cryptlen
++ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
+ + (reqctx->op ? -authsize : authsize),
+- CHCR_DST_SG_SIZE, req->assoclen);
++ CHCR_DST_SG_SIZE, 0);
+ dnents += MIN_CCM_SG; // For IV and B0
+ dst_size = get_space_for_phys_dsgl(dnents);
++ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
++ CHCR_SRC_SG_SIZE, 0);
++ snents += MIN_CCM_SG; //For B0
+ kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
++ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
+ reqctx->b0_len) <= SGE_MAX_WR_LEN;
+- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen +
++ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
+ reqctx->b0_len, 16) :
+- (sgl_len(reqctx->src_nents + reqctx->aad_nents +
+- MIN_CCM_SG) * 8);
++ (sgl_len(snents) * 8);
+ transhdr_len += temp;
+ transhdr_len = roundup(transhdr_len, 16);
+
+ if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
+- reqctx->b0_len, transhdr_len, reqctx->op)) {
++ reqctx->b0_len, transhdr_len, reqctx->op)) {
+ atomic_inc(&adap->chcr_stats.fallback);
+ chcr_aead_common_exit(req);
+ return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
+ }
+- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
++ skb = alloc_skb(transhdr_len, flags);
+
+ if (!skb) {
+ error = -ENOMEM;
+ goto err;
+ }
+
+- chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
++ chcr_req = __skb_put_zero(skb, transhdr_len);
+
+ fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
+
+@@ -2897,16 +2883,17 @@ static struct sk_buff *create_aead_ccm_w
+ aeadctx->key, aeadctx->enckey_len);
+
+ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
+- error = ccm_format_packet(req, aeadctx, sub_type, reqctx->op, assoclen);
++ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
++ ulptx = (struct ulptx_sgl *)(ivptr + IV);
++ error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
+ if (error)
+ goto dstmap_fail;
+- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
+- chcr_add_aead_src_ent(req, ulptx, assoclen);
++ chcr_add_aead_dst_ent(req, phys_cpl, qid);
++ chcr_add_aead_src_ent(req, ulptx);
+
+ atomic_inc(&adap->chcr_stats.aead_rqst);
+- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
+- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
++ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
++ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
+ reqctx->b0_len) : 0);
+ create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
+ transhdr_len, temp, 0);
+@@ -2931,10 +2918,11 @@ static struct sk_buff *create_gcm_wr(str
+ struct chcr_wr *chcr_req;
+ struct cpl_rx_phys_dsgl *phys_cpl;
+ struct ulptx_sgl *ulptx;
+- unsigned int transhdr_len, dnents = 0;
++ unsigned int transhdr_len, dnents = 0, snents;
+ unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
+ unsigned int authsize = crypto_aead_authsize(tfm);
+ int error = -EINVAL;
++ u8 *ivptr;
+ gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
+ GFP_ATOMIC;
+ struct adapter *adap = padap(a_ctx(tfm)->dev);
+@@ -2946,19 +2934,19 @@ static struct sk_buff *create_gcm_wr(str
+ error = chcr_aead_common_init(req);
+ if (error)
+ return ERR_PTR(error);
+- dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
+- dnents += sg_nents_xlen(req->dst, req->cryptlen +
++ dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
+ (reqctx->op ? -authsize : authsize),
+- CHCR_DST_SG_SIZE, req->assoclen);
++ CHCR_DST_SG_SIZE, 0);
++ snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
++ CHCR_SRC_SG_SIZE, 0);
+ dnents += MIN_GCM_SG; // For IV
+ dst_size = get_space_for_phys_dsgl(dnents);
+ kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
+ transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
+- reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
++ reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
+ SGE_MAX_WR_LEN;
+- temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) :
+- (sgl_len(reqctx->src_nents +
+- reqctx->aad_nents + MIN_GCM_SG) * 8);
++ temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
++ (sgl_len(snents) * 8);
+ transhdr_len += temp;
+ transhdr_len = roundup(transhdr_len, 16);
+ if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
+@@ -2968,7 +2956,7 @@ static struct sk_buff *create_gcm_wr(str
+ chcr_aead_common_exit(req);
+ return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
+ }
+- skb = alloc_skb(SGE_MAX_WR_LEN, flags);
++ skb = alloc_skb(transhdr_len, flags);
+ if (!skb) {
+ error = -ENOMEM;
+ goto err;
+@@ -2979,15 +2967,15 @@ static struct sk_buff *create_gcm_wr(str
+ //Offset of tag from end
+ temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
+ chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
+- a_ctx(tfm)->dev->rx_channel_id, 2,
+- (assoclen + 1));
++ a_ctx(tfm)->dev->rx_channel_id, 2, 1);
+ chcr_req->sec_cpl.pldlen =
+- htonl(assoclen + IV + req->cryptlen);
++ htonl(req->assoclen + IV + req->cryptlen);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+- assoclen ? 1 : 0, assoclen,
+- assoclen + IV + 1, 0);
++ assoclen ? 1 + IV : 0,
++ assoclen ? IV + assoclen : 0,
++ req->assoclen + IV + 1, 0);
+ chcr_req->sec_cpl.cipherstop_lo_authinsert =
+- FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
++ FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
+ temp, temp);
+ chcr_req->sec_cpl.seqno_numivs =
+ FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
+@@ -3002,25 +2990,26 @@ static struct sk_buff *create_gcm_wr(str
+ memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
+ GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
+
++ phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
++ ivptr = (u8 *)(phys_cpl + 1) + dst_size;
+ /* prepare a 16 byte iv */
+ /* S A L T | IV | 0x00000001 */
+ if (get_aead_subtype(tfm) ==
+ CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
+- memcpy(reqctx->iv, aeadctx->salt, 4);
+- memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
++ memcpy(ivptr, aeadctx->salt, 4);
++ memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
+ } else {
+- memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
++ memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
+ }
+- *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
++ *((unsigned int *)(ivptr + 12)) = htonl(0x01);
+
+- phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
+- ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
++ ulptx = (struct ulptx_sgl *)(ivptr + 16);
+
+- chcr_add_aead_dst_ent(req, phys_cpl, assoclen, qid);
+- chcr_add_aead_src_ent(req, ulptx, assoclen);
++ chcr_add_aead_dst_ent(req, phys_cpl, qid);
++ chcr_add_aead_src_ent(req, ulptx);
+ atomic_inc(&adap->chcr_stats.aead_rqst);
+- temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
+- kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
++ temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
++ kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
+ create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
+ transhdr_len, temp, reqctx->verify);
+ reqctx->skb = skb;
+@@ -4178,7 +4167,6 @@ static struct chcr_alg_template driver_a
+ .setauthsize = chcr_authenc_null_setauthsize,
+ }
+ },
+-
+ };
+
+ /*
+--- a/drivers/crypto/chelsio/chcr_algo.h
++++ b/drivers/crypto/chelsio/chcr_algo.h
+@@ -262,7 +262,7 @@
+ #define MIN_AUTH_SG 1 /* IV */
+ #define MIN_GCM_SG 1 /* IV */
+ #define MIN_DIGEST_SG 1 /*Partial Buffer*/
+-#define MIN_CCM_SG 2 /*IV+B0*/
++#define MIN_CCM_SG 1 /*IV+B0*/
+ #define CIP_SPACE_LEFT(len) \
+ ((SGE_MAX_WR_LEN - CIP_WR_MIN_LEN - (len)))
+ #define HASH_SPACE_LEFT(len) \
+--- a/drivers/crypto/chelsio/chcr_crypto.h
++++ b/drivers/crypto/chelsio/chcr_crypto.h
+@@ -41,7 +41,8 @@
+
+ #define CCM_B0_SIZE 16
+ #define CCM_AAD_FIELD_SIZE 2
+-#define T6_MAX_AAD_SIZE 511
++// 511 - 16(For IV)
++#define T6_MAX_AAD_SIZE 495
+
+
+ /* Define following if h/w is not dropping the AAD and IV data before
+@@ -185,9 +186,6 @@ struct chcr_aead_reqctx {
+ dma_addr_t b0_dma;
+ unsigned int b0_len;
+ unsigned int op;
+- short int aad_nents;
+- short int src_nents;
+- short int dst_nents;
+ u16 imm;
+ u16 verify;
+ u8 iv[CHCR_MAX_CRYPTO_IV_LEN + MAX_SCRATCH_PAD_SIZE];
+@@ -322,10 +320,8 @@ void chcr_aead_dma_unmap(struct device *
+ unsigned short op_type);
+ void chcr_add_aead_dst_ent(struct aead_request *req,
+ struct cpl_rx_phys_dsgl *phys_cpl,
+- unsigned int assoclen,
+ unsigned short qid);
+-void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx,
+- unsigned int assoclen);
++void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx);
+ void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
+ void *ulptx,
+ struct cipher_wr_param *wrparam);
diff --git a/patches.drivers/crypto-chelsio-Use-same-value-for-both-channel-in-si.patch b/patches.drivers/crypto-chelsio-Use-same-value-for-both-channel-in-si.patch
new file mode 100644
index 0000000000..01e2cf69be
--- /dev/null
+++ b/patches.drivers/crypto-chelsio-Use-same-value-for-both-channel-in-si.patch
@@ -0,0 +1,91 @@
+From: Harsh Jain <harsh@chelsio.com>
+Date: Tue, 11 Dec 2018 16:21:38 +0530
+Subject: crypto: chelsio - Use same value for both channel in single WR
+Patch-mainline: v5.0-rc1
+Git-commit: d5a4dfbdaf54cbd845755a5415cff57688bb983c
+References: bsc#1136353 jsc#SLE-4688
+
+Use tx_channel_id instead of rx_channel_id.
+
+Signed-off-by: Harsh Jain <harsh@chelsio.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chcr_algo.c | 13 ++++++-------
+ drivers/crypto/chelsio/chcr_core.h | 1 -
+ 2 files changed, 6 insertions(+), 8 deletions(-)
+
+--- a/drivers/crypto/chelsio/chcr_algo.c
++++ b/drivers/crypto/chelsio/chcr_algo.c
+@@ -717,7 +717,7 @@ static inline void create_wreq(struct ch
+ htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
+ chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
+ chcr_req->wreq.rx_chid_to_rx_q_id =
+- FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
++ FILL_WR_RX_Q_ID(ctx->tx_chan_id, qid,
+ !!lcb, ctx->tx_qidx);
+
+ chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
+@@ -773,7 +773,7 @@ static struct sk_buff *create_cipher_wr(
+ }
+ chcr_req = __skb_put_zero(skb, transhdr_len);
+ chcr_req->sec_cpl.op_ivinsrtofst =
+- FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
++ FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->tx_chan_id, 2, 1);
+
+ chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi =
+@@ -1344,7 +1344,6 @@ static int chcr_device_init(struct chcr_
+ spin_lock(&ctx->dev->lock_chcr_dev);
+ ctx->tx_chan_id = ctx->dev->tx_channel_id;
+ ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
+- ctx->dev->rx_channel_id = 0;
+ spin_unlock(&ctx->dev->lock_chcr_dev);
+ rxq_idx = ctx->tx_chan_id * rxq_perchan;
+ rxq_idx += id % rxq_perchan;
+@@ -1498,7 +1497,7 @@ static struct sk_buff *create_hash_wr(st
+ chcr_req = __skb_put_zero(skb, transhdr_len);
+
+ chcr_req->sec_cpl.op_ivinsrtofst =
+- FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
++ FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->tx_chan_id, 2, 0);
+ chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
+
+ chcr_req->sec_cpl.aadstart_cipherstop_hi =
+@@ -2324,7 +2323,7 @@ static struct sk_buff *create_authenc_wr
+ * to the hardware spec
+ */
+ chcr_req->sec_cpl.op_ivinsrtofst =
+- FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2, 1);
++ FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->tx_chan_id, 2, 1);
+ chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+ null ? 0 : 1 + IV,
+@@ -2746,7 +2745,7 @@ static void fill_sec_cpl_for_aead(struct
+ struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
+ unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
+ unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
+- unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
++ unsigned int c_id = a_ctx(tfm)->tx_chan_id;
+ unsigned int ccm_xtra;
+ unsigned char tag_offset = 0, auth_offset = 0;
+ unsigned int assoclen;
+@@ -2967,7 +2966,7 @@ static struct sk_buff *create_gcm_wr(str
+ //Offset of tag from end
+ temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
+ chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
+- a_ctx(tfm)->dev->rx_channel_id, 2, 1);
++ a_ctx(tfm)->tx_chan_id, 2, 1);
+ chcr_req->sec_cpl.pldlen =
+ htonl(req->assoclen + IV + req->cryptlen);
+ chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
+--- a/drivers/crypto/chelsio/chcr_core.h
++++ b/drivers/crypto/chelsio/chcr_core.h
+@@ -133,7 +133,6 @@ struct chcr_dev {
+ spinlock_t lock_chcr_dev;
+ struct uld_ctx *u_ctx;
+ unsigned char tx_channel_id;
+- unsigned char rx_channel_id;
+ };
+
+ struct uld_ctx {
diff --git a/patches.drivers/crypto-chelsio-avoid-using-sa_entry-imm.patch b/patches.drivers/crypto-chelsio-avoid-using-sa_entry-imm.patch
new file mode 100644
index 0000000000..b339aa9342
--- /dev/null
+++ b/patches.drivers/crypto-chelsio-avoid-using-sa_entry-imm.patch
@@ -0,0 +1,59 @@
+From: Atul Gupta <atul.gupta@chelsio.com>
+Date: Thu, 17 Jan 2019 09:18:35 -0800
+Subject: crypto: chelsio - avoid using sa_entry imm
+Patch-mainline: v5.1-rc1
+Git-commit: 4da66b758b25938a5e0b6df830d08e8d5c316936
+References: bsc#1136353 jsc#SLE-4688
+
+use is_eth_imm to determine immediate data than use sa_entry
+field which is common for tunnel and not per skb.
+
+Signed-off-by: Atul Gupta <atul.gupta@chelsio.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chcr_core.h | 2 +-
+ drivers/crypto/chelsio/chcr_ipsec.c | 10 ++++------
+ 2 files changed, 5 insertions(+), 7 deletions(-)
+
+--- a/drivers/crypto/chelsio/chcr_core.h
++++ b/drivers/crypto/chelsio/chcr_core.h
+@@ -183,7 +183,7 @@ struct chcr_ipsec_aadiv {
+ struct ipsec_sa_entry {
+ int hmac_ctrl;
+ u16 esn;
+- u16 imm;
++ u16 resv;
+ unsigned int enckey_len;
+ unsigned int kctx_len;
+ unsigned int authsize;
+--- a/drivers/crypto/chelsio/chcr_ipsec.c
++++ b/drivers/crypto/chelsio/chcr_ipsec.c
+@@ -415,12 +415,12 @@ inline void *copy_esn_pktxt(struct sk_bu
+ iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
+ memcpy(aadiv->iv, iv, 8);
+
+- if (sa_entry->imm) {
++ if (is_eth_imm(skb, sa_entry)) {
+ sc_imm = (struct ulptx_idata *)(pos +
+ (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
+ sizeof(__be64)) << 3));
+- sc_imm->cmd_more = FILL_CMD_MORE(!sa_entry->imm);
+- sc_imm->len = cpu_to_be32(sa_entry->imm);
++ sc_imm->cmd_more = FILL_CMD_MORE(0);
++ sc_imm->len = cpu_to_be32(skb->len);
+ }
+ pos += len;
+ return pos;
+@@ -548,10 +548,8 @@ inline void *chcr_crypto_wreq(struct sk_
+ if (sa_entry->esn)
+ ivdrop = 1;
+
+- if (is_eth_imm(skb, sa_entry)) {
++ if (is_eth_imm(skb, sa_entry))
+ immdatalen = skb->len;
+- sa_entry->imm = immdatalen;
+- }
+
+ if (sa_entry->esn)
+ esnlen = sizeof(struct chcr_ipsec_aadiv);
diff --git a/patches.drivers/crypto-chelsio-check-set_msg_len-overflow-in-generat.patch b/patches.drivers/crypto-chelsio-check-set_msg_len-overflow-in-generat.patch
new file mode 100644
index 0000000000..38e43865fd
--- /dev/null
+++ b/patches.drivers/crypto-chelsio-check-set_msg_len-overflow-in-generat.patch
@@ -0,0 +1,47 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Fri, 28 Dec 2018 14:41:00 +0800
+Subject: crypto: chelsio - check set_msg_len overflow in generate_b0
+Patch-mainline: v5.1-rc1
+Git-commit: 66af86d93ce32ff5b262ace9a6696873cc1bdb3e
+References: bsc#1136353 jsc#SLE-4688
+
+set_msg_len may fails with -EOVERFLOW, It should be propagate
+to upstream.
+
+Fixes: 2debd3325e55 ("crypto: chcr - Add AEAD algos.")
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chcr_algo.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/chelsio/chcr_algo.c
++++ b/drivers/crypto/chelsio/chcr_algo.c
+@@ -2762,7 +2762,7 @@ static int set_msg_len(u8 *block, unsign
+ return 0;
+ }
+
+-static void generate_b0(struct aead_request *req, u8 *ivptr,
++static int generate_b0(struct aead_request *req, u8 *ivptr,
+ unsigned short op_type)
+ {
+ unsigned int l, lp, m;
+@@ -2787,6 +2787,8 @@ static void generate_b0(struct aead_requ
+ rc = set_msg_len(b0 + 16 - l,
+ (op_type == CHCR_DECRYPT_OP) ?
+ req->cryptlen - m : req->cryptlen, l);
++
++ return rc;
+ }
+
+ static inline int crypto_ccm_check_iv(const u8 *iv)
+@@ -2821,7 +2823,7 @@ static int ccm_format_packet(struct aead
+ *((unsigned short *)(reqctx->scratch_pad + 16)) =
+ htons(assoclen);
+
+- generate_b0(req, ivptr, op_type);
++ rc = generate_b0(req, ivptr, op_type);
+ /* zero the ctr value */
+ memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
+ return rc;
diff --git a/patches.drivers/crypto-chelsio-clean-up-various-indentation-issues.patch b/patches.drivers/crypto-chelsio-clean-up-various-indentation-issues.patch
new file mode 100644
index 0000000000..148113da7c
--- /dev/null
+++ b/patches.drivers/crypto-chelsio-clean-up-various-indentation-issues.patch
@@ -0,0 +1,64 @@
+From: Colin Ian King <colin.king@canonical.com>
+Date: Tue, 30 Oct 2018 12:01:58 +0000
+Subject: crypto: chelsio - clean up various indentation issues
+Patch-mainline: v5.0-rc1
+Git-commit: fc6176a240ae93850be445f355c1dba769fe8467
+References: bsc#1136353 jsc#SLE-4688
+
+Trivial fix to clean up varous indentation issue
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chcr_algo.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/crypto/chelsio/chcr_algo.c
++++ b/drivers/crypto/chelsio/chcr_algo.c
+@@ -1311,8 +1311,8 @@ static int chcr_aes_decrypt(struct ablkc
+ return -ENOSPC;
+ }
+
+- err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
+- &skb, CHCR_DECRYPT_OP);
++ err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
++ &skb, CHCR_DECRYPT_OP);
+ if (err || !skb)
+ return err;
+ skb->dev = u_ctx->lldi.ports[0];
+@@ -2008,7 +2008,7 @@ static int chcr_ahash_export(struct ahas
+ memcpy(state->partial_hash, req_ctx->partial_hash,
+ CHCR_HASH_MAX_DIGEST_SIZE);
+ chcr_init_hctx_per_wr(state);
+- return 0;
++ return 0;
+ }
+
+ static int chcr_ahash_import(struct ahash_request *areq, const void *in)
+@@ -2249,7 +2249,7 @@ static int chcr_aead_fallback(struct aea
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+- aead_request_set_ad(subreq, req->assoclen);
++ aead_request_set_ad(subreq, req->assoclen);
+ return op_type ? crypto_aead_decrypt(subreq) :
+ crypto_aead_encrypt(subreq);
+ }
+@@ -3118,12 +3118,12 @@ static int chcr_gcm_setauthsize(struct c
+ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_12:
+- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
+- aeadctx->mayverify = VERIFY_HW;
++ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
++ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_14:
+- aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
+- aeadctx->mayverify = VERIFY_HW;
++ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
++ aeadctx->mayverify = VERIFY_HW;
+ break;
+ case ICV_16:
+ aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
diff --git a/patches.drivers/crypto-chelsio-cleanup-send-addr-as-value-in-functio.patch b/patches.drivers/crypto-chelsio-cleanup-send-addr-as-value-in-functio.patch
new file mode 100644
index 0000000000..6bcd960d64
--- /dev/null
+++ b/patches.drivers/crypto-chelsio-cleanup-send-addr-as-value-in-functio.patch
@@ -0,0 +1,83 @@
+From: Harsh Jain <harsh@chelsio.com>
+Date: Tue, 11 Dec 2018 16:21:39 +0530
+Subject: crypto: chelsio - cleanup:send addr as value in function argument
+Patch-mainline: v5.0-rc1
+Git-commit: c4f6d44d774eff382b6fc79a9fe1ff376b5ac6d7
+References: bsc#1136353 jsc#SLE-4688
+
+Send dma address as value to function arguments instead of pointer.
+
+Signed-off-by: Harsh Jain <harsh@chelsio.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chcr_algo.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/drivers/crypto/chelsio/chcr_algo.c
++++ b/drivers/crypto/chelsio/chcr_algo.c
+@@ -391,7 +391,7 @@ static inline void dsgl_walk_end(struct
+
+ static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
+ size_t size,
+- dma_addr_t *addr)
++ dma_addr_t addr)
+ {
+ int j;
+
+@@ -399,7 +399,7 @@ static inline void dsgl_walk_add_page(st
+ return;
+ j = walk->nents;
+ walk->to->len[j % 8] = htons(size);
+- walk->to->addr[j % 8] = cpu_to_be64(*addr);
++ walk->to->addr[j % 8] = cpu_to_be64(addr);
+ j++;
+ if ((j % 8) == 0)
+ walk->to++;
+@@ -473,16 +473,16 @@ static inline void ulptx_walk_end(struct
+
+ static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
+ size_t size,
+- dma_addr_t *addr)
++ dma_addr_t addr)
+ {
+ if (!size)
+ return;
+
+ if (walk->nents == 0) {
+ walk->sgl->len0 = cpu_to_be32(size);
+- walk->sgl->addr0 = cpu_to_be64(*addr);
++ walk->sgl->addr0 = cpu_to_be64(addr);
+ } else {
+- walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
++ walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
+ walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
+ walk->pair_idx = !walk->pair_idx;
+ if (!walk->pair_idx)
+@@ -2481,7 +2481,7 @@ void chcr_add_aead_src_ent(struct aead_r
+ ulptx_walk_init(&ulp_walk, ulptx);
+ if (reqctx->b0_len)
+ ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
+- &reqctx->b0_dma);
++ reqctx->b0_dma);
+ ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
+ req->assoclen, 0);
+ ulptx_walk_end(&ulp_walk);
+@@ -2500,7 +2500,7 @@ void chcr_add_aead_dst_ent(struct aead_r
+ u32 temp;
+
+ dsgl_walk_init(&dsgl_walk, phys_cpl);
+- dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, &reqctx->iv_dma);
++ dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
+ temp = req->assoclen + req->cryptlen +
+ (reqctx->op ? -authsize : authsize);
+ dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
+@@ -2571,7 +2571,7 @@ void chcr_add_hash_src_ent(struct ahash_
+ ulptx_walk_init(&ulp_walk, ulptx);
+ if (param->bfr_len)
+ ulptx_walk_add_page(&ulp_walk, param->bfr_len,
+- &reqctx->hctx_wr.dma_addr);
++ reqctx->hctx_wr.dma_addr);
+ ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
+ param->sg_len, reqctx->hctx_wr.src_ofst);
+ reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
diff --git a/patches.drivers/crypto-chelsio-count-incomplete-block-in-IV.patch b/patches.drivers/crypto-chelsio-count-incomplete-block-in-IV.patch
new file mode 100644
index 0000000000..76db2aff05
--- /dev/null
+++ b/patches.drivers/crypto-chelsio-count-incomplete-block-in-IV.patch
@@ -0,0 +1,31 @@
+From: Atul Gupta <atul.gupta@chelsio.com>
+Date: Thu, 2 May 2019 03:47:57 -0700
+Subject: crypto: chelsio - count incomplete block in IV
+Patch-mainline: v5.2-rc1
+Git-commit: 0a4491d3febe0b8c328870d1909fc91756970237
+References: bsc#1136353 jsc#SLE-4688
+
+The partial block should count as one and appropriately appended
+to IV. eg 499B for AES CTR should count 32 block than 31 and
+correct count value is updated in iv out.
+
+Signed-off-by: Atul Gupta <atul.gupta@chelsio.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chcr_algo.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/chelsio/chcr_algo.c
++++ b/drivers/crypto/chelsio/chcr_algo.c
+@@ -1094,8 +1094,8 @@ static int chcr_final_cipher_iv(struct a
+ int ret = 0;
+
+ if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
+- ctr_add_iv(iv, req->info, (reqctx->processed /
+- AES_BLOCK_SIZE));
++ ctr_add_iv(iv, req->info, DIV_ROUND_UP(reqctx->processed,
++ AES_BLOCK_SIZE));
+ else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
+ ret = chcr_update_tweak(req, iv, 1);
+ else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
diff --git a/patches.drivers/crypto-chelsio-remove-set-but-not-used-variable-kctx.patch b/patches.drivers/crypto-chelsio-remove-set-but-not-used-variable-kctx.patch
new file mode 100644
index 0000000000..f4e16d9446
--- /dev/null
+++ b/patches.drivers/crypto-chelsio-remove-set-but-not-used-variable-kctx.patch
@@ -0,0 +1,42 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Tue, 11 Dec 2018 08:11:59 +0000
+Subject: crypto: chelsio - remove set but not used variable 'kctx_len'
+Patch-mainline: v5.0-rc1
+Git-commit: 3cc04c160208ec55940db652343d236515d88af5
+References: bsc#1136353 jsc#SLE-4688
+
+Fixes gcc '-Wunused-but-set-variable' warning:
+
+drivers/crypto/chelsio/chcr_ipsec.c: In function 'chcr_ipsec_xmit':
+drivers/crypto/chelsio/chcr_ipsec.c:674:33: warning:
+ variable 'kctx_len' set but not used [-Wunused-but-set-variable]
+ unsigned int flits = 0, ndesc, kctx_len;
+
+It not used since commit 8362ea16f69f ("crypto: chcr - ESN for Inline IPSec Tx")
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chcr_ipsec.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/crypto/chelsio/chcr_ipsec.c
++++ b/drivers/crypto/chelsio/chcr_ipsec.c
+@@ -671,7 +671,7 @@ int chcr_ipsec_xmit(struct sk_buff *skb,
+ struct ipsec_sa_entry *sa_entry;
+ u64 *pos, *end, *before, *sgl;
+ int qidx, left, credits;
+- unsigned int flits = 0, ndesc, kctx_len;
++ unsigned int flits = 0, ndesc;
+ struct adapter *adap;
+ struct sge_eth_txq *q;
+ struct port_info *pi;
+@@ -682,7 +682,6 @@ int chcr_ipsec_xmit(struct sk_buff *skb,
+ return NETDEV_TX_BUSY;
+
+ sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
+- kctx_len = sa_entry->kctx_len;
+
+ if (skb->sp->len != 1) {
+ out_free: dev_kfree_skb_any(skb);
diff --git a/patches.drivers/crypto-chelsio-remove-set-but-not-used-variables-ada.patch b/patches.drivers/crypto-chelsio-remove-set-but-not-used-variables-ada.patch
new file mode 100644
index 0000000000..bf542993e0
--- /dev/null
+++ b/patches.drivers/crypto-chelsio-remove-set-but-not-used-variables-ada.patch
@@ -0,0 +1,41 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Fri, 28 Dec 2018 06:53:53 +0000
+Subject: crypto: chelsio - remove set but not used variables 'adap'
+Patch-mainline: v5.1-rc1
+Git-commit: e12468241b19653d87534f3ff0778f1ad4668f5e
+References: bsc#1136353 jsc#SLE-4688
+
+Fixes gcc '-Wunused-but-set-variable' warning:
+
+drivers/crypto/chelsio/chcr_algo.c: In function 'chcr_device_init':
+drivers/crypto/chelsio/chcr_algo.c:1371:18: warning:
+ variable 'adap' set but not used [-Wunused-but-set-variable]
+
+It not used since commit a1c6fd4308d3 ("crypto: chelsio - Update ntx queue
+received from cxgb4")
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chcr_algo.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/crypto/chelsio/chcr_algo.c
++++ b/drivers/crypto/chelsio/chcr_algo.c
+@@ -1368,7 +1368,6 @@ static int chcr_aes_decrypt(struct ablkc
+ static int chcr_device_init(struct chcr_context *ctx)
+ {
+ struct uld_ctx *u_ctx = NULL;
+- struct adapter *adap;
+ unsigned int id;
+ int txq_perchan, txq_idx, ntxq;
+ int err = 0, rxq_perchan, rxq_idx;
+@@ -1382,7 +1381,6 @@ static int chcr_device_init(struct chcr_
+ goto out;
+ }
+ ctx->dev = &u_ctx->dev;
+- adap = padap(ctx->dev);
+ ntxq = u_ctx->lldi.ntxq;
+ rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
+ txq_perchan = ntxq / u_ctx->lldi.nchan;
diff --git a/patches.drivers/crypto-chtls-remove-cdev_list_lock.patch b/patches.drivers/crypto-chtls-remove-cdev_list_lock.patch
new file mode 100644
index 0000000000..1176e63fe3
--- /dev/null
+++ b/patches.drivers/crypto-chtls-remove-cdev_list_lock.patch
@@ -0,0 +1,30 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 11 Feb 2019 12:02:50 +0100
+Subject: crypto: chtls - remove cdev_list_lock
+Patch-mainline: v5.1-rc1
+Git-commit: 7d220dabc2192f37a8ad446d75e903ba9c0781fe
+References: bsc#1136353 jsc#SLE-4688
+
+Last user of cdev_list_lock was removed in commit
+
+ 6422ccc5fbefb ("crypto/chelsio/chtls: listen fails with multiadapt")
+
+Cc: Atul Gupta <atul.gupta@chelsio.com>
+Cc: Harsh Jain <harsh@chelsio.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chtls/chtls_main.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/crypto/chelsio/chtls/chtls_main.c
++++ b/drivers/crypto/chelsio/chtls/chtls_main.c
+@@ -30,7 +30,6 @@
+ */
+ static LIST_HEAD(cdev_list);
+ static DEFINE_MUTEX(cdev_mutex);
+-static DEFINE_MUTEX(cdev_list_lock);
+
+ static DEFINE_MUTEX(notify_mutex);
+ static RAW_NOTIFIER_HEAD(listen_notify_list);
diff --git a/patches.drivers/crypto-chtls-remove-set-but-not-used-variables-err-a.patch b/patches.drivers/crypto-chtls-remove-set-but-not-used-variables-err-a.patch
new file mode 100644
index 0000000000..ff07287b2f
--- /dev/null
+++ b/patches.drivers/crypto-chtls-remove-set-but-not-used-variables-err-a.patch
@@ -0,0 +1,105 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Tue, 18 Dec 2018 02:26:14 +0000
+Subject: crypto: chtls - remove set but not used variables 'err, adap,
+ request, hws'
+Patch-mainline: v5.1-rc1
+Git-commit: 8d555c528565ad7261f0ce27c0a808ec9377ad33
+References: bsc#1136353 jsc#SLE-4688
+
+Fixes gcc '-Wunused-but-set-variable' warning:
+
+drivers/crypto/chelsio/chtls/chtls_io.c: In function 'csk_wait_memory':
+drivers/crypto/chelsio/chtls/chtls_io.c:925:6: warning:
+ variable 'sndbuf' set but not used [-Wunused-but-set-variable]
+
+drivers/crypto/chelsio/chtls/chtls_io.c: In function 'chtls_pt_recvmsg':
+drivers/crypto/chelsio/chtls/chtls_io.c:1411:6: warning:
+ variable 'request' set but not used [-Wunused-but-set-variable]
+
+drivers/crypto/chelsio/chtls/chtls_io.c:1407:18: warning:
+ variable 'adap' set but not used [-Wunused-but-set-variable]
+
+drivers/crypto/chelsio/chtls/chtls_io.c: In function 'chtls_recvmsg':
+drivers/crypto/chelsio/chtls/chtls_io.c:1701:6: warning:
+ variable 'request' set but not used [-Wunused-but-set-variable]
+
+drivers/crypto/chelsio/chtls/chtls_io.c:1697:20: warning:
+ variable 'hws' set but not used [-Wunused-but-set-variable]
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chtls/chtls_io.c | 12 +-----------
+ 1 file changed, 1 insertion(+), 11 deletions(-)
+
+--- a/drivers/crypto/chelsio/chtls/chtls_io.c
++++ b/drivers/crypto/chelsio/chtls/chtls_io.c
+@@ -922,14 +922,13 @@ static int csk_wait_memory(struct chtls_
+ struct sock *sk, long *timeo_p)
+ {
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+- int sndbuf, err = 0;
++ int err = 0;
+ long current_timeo;
+ long vm_wait = 0;
+ bool noblock;
+
+ current_timeo = *timeo_p;
+ noblock = (*timeo_p ? false : true);
+- sndbuf = cdev->max_host_sndbuf;
+ if (csk_mem_free(cdev, sk)) {
+ current_timeo = (prandom_u32() % (HZ / 5)) + 2;
+ vm_wait = (prandom_u32() % (HZ / 5)) + 2;
+@@ -1401,23 +1400,18 @@ static int chtls_pt_recvmsg(struct sock
+ int nonblock, int flags, int *addr_len)
+ {
+ struct chtls_sock *csk = rcu_dereference_sk_user_data(sk);
+- struct net_device *dev = csk->egress_dev;
+ struct chtls_hws *hws = &csk->tlshws;
+ struct tcp_sock *tp = tcp_sk(sk);
+- struct adapter *adap;
+ unsigned long avail;
+ int buffers_freed;
+ int copied = 0;
+- int request;
+ int target;
+ long timeo;
+
+- adap = netdev2adap(dev);
+ buffers_freed = 0;
+
+ timeo = sock_rcvtimeo(sk, nonblock);
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+- request = len;
+
+ if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
+ chtls_cleanup_rbuf(sk, copied);
+@@ -1694,11 +1688,9 @@ int chtls_recvmsg(struct sock *sk, struc
+ {
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct chtls_sock *csk;
+- struct chtls_hws *hws;
+ unsigned long avail; /* amount of available data in current skb */
+ int buffers_freed;
+ int copied = 0;
+- int request;
+ long timeo;
+ int target; /* Read at least this many bytes */
+
+@@ -1718,7 +1710,6 @@ int chtls_recvmsg(struct sock *sk, struc
+
+ lock_sock(sk);
+ csk = rcu_dereference_sk_user_data(sk);
+- hws = &csk->tlshws;
+
+ if (is_tls_rx(csk))
+ return chtls_pt_recvmsg(sk, msg, len, nonblock,
+@@ -1726,7 +1717,6 @@ int chtls_recvmsg(struct sock *sk, struc
+
+ timeo = sock_rcvtimeo(sk, nonblock);
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+- request = len;
+
+ if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND)))
+ chtls_cleanup_rbuf(sk, copied);
diff --git a/patches.drivers/cxgb4-Add-VF-Link-state-support.patch b/patches.drivers/cxgb4-Add-VF-Link-state-support.patch
new file mode 100644
index 0000000000..80bbca8e9d
--- /dev/null
+++ b/patches.drivers/cxgb4-Add-VF-Link-state-support.patch
@@ -0,0 +1,127 @@
+From: Arjun Vynipadath <arjun@chelsio.com>
+Date: Fri, 1 Mar 2019 15:44:47 +0530
+Subject: cxgb4: Add VF Link state support
+Patch-mainline: v5.1-rc1
+Git-commit: 8b965f3f649c50d68a7fbc3a7771fcc368d2b7b8
+References: bsc#1136345 jsc#SLE-4681
+
+Use ndo_set_vf_link_state to control the link states associated
+with the virtual interfaces.
+
+Signed-off-by: Arjun Vynipadath <arjun@chelsio.com>
+Signed-off-by: Vishal Kulkarni <vishal@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 1
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 57 +++++++++++++++++++++---
+ drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 8 +++
+ 3 files changed, 60 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+@@ -885,6 +885,7 @@ struct vf_info {
+ unsigned int tx_rate;
+ bool pf_set_mac;
+ u16 vlan;
++ int link_state;
+ };
+
+ enum {
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -2747,6 +2747,7 @@ static int cxgb4_mgmt_get_vf_config(stru
+ ivi->min_tx_rate = 0;
+ ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
+ ivi->vlan = vfinfo->vlan;
++ ivi->linkstate = vfinfo->link_state;
+ return 0;
+ }
+
+@@ -2886,6 +2887,49 @@ static int cxgb4_mgmt_set_vf_vlan(struct
+ ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
+ return ret;
+ }
++
++static int cxgb4_mgmt_set_vf_link_state(struct net_device *dev, int vf,
++ int link)
++{
++ struct port_info *pi = netdev_priv(dev);
++ struct adapter *adap = pi->adapter;
++ u32 param, val;
++ int ret = 0;
++
++ if (vf >= adap->num_vfs)
++ return -EINVAL;
++
++ switch (link) {
++ case IFLA_VF_LINK_STATE_AUTO:
++ val = FW_VF_LINK_STATE_AUTO;
++ break;
++
++ case IFLA_VF_LINK_STATE_ENABLE:
++ val = FW_VF_LINK_STATE_ENABLE;
++ break;
++
++ case IFLA_VF_LINK_STATE_DISABLE:
++ val = FW_VF_LINK_STATE_DISABLE;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
++ param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
++ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_LINK_STATE));
++ ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
++ &param, &val);
++ if (ret) {
++ dev_err(adap->pdev_dev,
++ "Error %d in setting PF %d VF %d link state\n",
++ ret, adap->pf, vf);
++ return -EINVAL;
++ }
++
++ adap->vfinfo[vf].link_state = link;
++ return ret;
++}
+ #endif /* CONFIG_PCI_IOV */
+
+ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
+@@ -3301,12 +3345,13 @@ static const struct net_device_ops cxgb4
+
+ #ifdef CONFIG_PCI_IOV
+ static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
+- .ndo_open = cxgb4_mgmt_open,
+- .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
+- .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
+- .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
+- .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
+- .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
++ .ndo_open = cxgb4_mgmt_open,
++ .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
++ .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
++ .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
++ .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
++ .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
++ .ndo_set_vf_link_state = cxgb4_mgmt_set_vf_link_state,
+ };
+ #endif
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+@@ -1312,6 +1312,14 @@ enum fw_params_param_pfvf {
+ FW_PARAMS_PARAM_PFVF_RAWF_END = 0x37,
+ FW_PARAMS_PARAM_PFVF_NCRYPTO_LOOKASIDE = 0x39,
+ FW_PARAMS_PARAM_PFVF_PORT_CAPS32 = 0x3A,
++ FW_PARAMS_PARAM_PFVF_LINK_STATE = 0x40,
++};
++
++/* Virtual link state as seen by the specified VF */
++enum vf_link_states {
++ FW_VF_LINK_STATE_AUTO = 0x00,
++ FW_VF_LINK_STATE_ENABLE = 0x01,
++ FW_VF_LINK_STATE_DISABLE = 0x02,
+ };
+
+ /*
diff --git a/patches.drivers/cxgb4-Add-new-T6-PCI-device-ids-0x608b.patch b/patches.drivers/cxgb4-Add-new-T6-PCI-device-ids-0x608b.patch
new file mode 100644
index 0000000000..8cc78209e7
--- /dev/null
+++ b/patches.drivers/cxgb4-Add-new-T6-PCI-device-ids-0x608b.patch
@@ -0,0 +1,24 @@
+From: Vishal Kulkarni <vishal@chelsio.com>
+Date: Wed, 6 Feb 2019 18:27:13 +0530
+Subject: cxgb4: Add new T6 PCI device ids 0x608b
+Patch-mainline: v5.1-rc1
+Git-commit: 249f62b6edad70d5eba43caf09526ee035336e0a
+References: bsc#1136345 jsc#SLE-4681
+
+Signed-off-by: Vishal Kulkarni <vishal@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+@@ -218,6 +218,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
+ CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */
++ CH_PCI_ID_TABLE_FENTRY(0x608b), /* Custom T6225-CR */
+ CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
+
+ #endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/patches.drivers/cxgb4-Delete-all-hash-and-TCAM-filters-before-resour.patch b/patches.drivers/cxgb4-Delete-all-hash-and-TCAM-filters-before-resour.patch
new file mode 100644
index 0000000000..7c61d7f88b
--- /dev/null
+++ b/patches.drivers/cxgb4-Delete-all-hash-and-TCAM-filters-before-resour.patch
@@ -0,0 +1,111 @@
+From: Vishal Kulkarni <vishal@chelsio.com>
+Date: Fri, 26 Apr 2019 13:58:48 +0530
+Subject: cxgb4: Delete all hash and TCAM filters before resource cleanup
+Patch-mainline: v5.2-rc1
+Git-commit: b1a79360ee862f8ada4798ad2346fa45bb41b527
+References: bsc#1136345 jsc#SLE-4681
+
+During driver unload, hash/TCAM filter deletion doesn't wait for
+completion.This patch deletes all the filters with completion before
+clearing the resources.
+
+Signed-off-by: Vishal Kulkarni <vishal@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 34 ++++++++++++++++++----
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 10 +++---
+ 2 files changed, 33 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -524,8 +524,7 @@ static int del_filter_wr(struct adapter
+ return -ENOMEM;
+
+ fwr = __skb_put(skb, len);
+- t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & CXGB4_SHUTTING_DOWN) ? -1
+- : adapter->sge.fw_evtq.abs_id);
++ t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
+
+ /* Mark the filter as "pending" and ship off the Filter Work Request.
+ * When we get the Work Request Reply we'll clear the pending status.
+@@ -744,16 +743,40 @@ void clear_filter(struct adapter *adap,
+
+ void clear_all_filters(struct adapter *adapter)
+ {
++ struct net_device *dev = adapter->port[0];
+ unsigned int i;
+
+ if (adapter->tids.ftid_tab) {
+ struct filter_entry *f = &adapter->tids.ftid_tab[0];
+ unsigned int max_ftid = adapter->tids.nftids +
+ adapter->tids.nsftids;
+-
++ /* Clear all TCAM filters */
+ for (i = 0; i < max_ftid; i++, f++)
+ if (f->valid || f->pending)
+- clear_filter(adapter, f);
++ cxgb4_del_filter(dev, i, &f->fs);
++ }
++
++ /* Clear all hash filters */
++ if (is_hashfilter(adapter) && adapter->tids.tid_tab) {
++ struct filter_entry *f;
++ unsigned int sb;
++
++ for (i = adapter->tids.hash_base;
++ i <= adapter->tids.ntids; i++) {
++ f = (struct filter_entry *)
++ adapter->tids.tid_tab[i];
++
++ if (f && (f->valid || f->pending))
++ cxgb4_del_filter(dev, i, &f->fs);
++ }
++
++ sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A);
++ for (i = 0; i < sb; i++) {
++ f = (struct filter_entry *)adapter->tids.tid_tab[i];
++
++ if (f && (f->valid || f->pending))
++ cxgb4_del_filter(dev, i, &f->fs);
++ }
+ }
+ }
+
+@@ -1568,9 +1591,8 @@ int cxgb4_del_filter(struct net_device *
+ struct filter_ctx ctx;
+ int ret;
+
+- /* If we are shutting down the adapter do not wait for completion */
+ if (netdev2adap(dev)->flags & CXGB4_SHUTTING_DOWN)
+- return __cxgb4_del_filter(dev, filter_id, fs, NULL);
++ return 0;
+
+ init_completion(&ctx.completion);
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -6033,6 +6033,11 @@ static void remove_one(struct pci_dev *p
+ return;
+ }
+
++ /* If we allocated filters, free up state associated with any
++ * valid filters ...
++ */
++ clear_all_filters(adapter);
++
+ adapter->flags |= CXGB4_SHUTTING_DOWN;
+
+ if (adapter->pf == 4) {
+@@ -6063,11 +6068,6 @@ static void remove_one(struct pci_dev *p
+ if (IS_REACHABLE(CONFIG_THERMAL))
+ cxgb4_thermal_remove(adapter);
+
+- /* If we allocated filters, free up state associated with any
+- * valid filters ...
+- */
+- clear_all_filters(adapter);
+-
+ if (adapter->flags & CXGB4_FULL_INIT_DONE)
+ cxgb_down(adapter);
+
diff --git a/patches.drivers/cxgb4-Don-t-return-EAGAIN-when-TCAM-is-full.patch b/patches.drivers/cxgb4-Don-t-return-EAGAIN-when-TCAM-is-full.patch
new file mode 100644
index 0000000000..a58a9b1adb
--- /dev/null
+++ b/patches.drivers/cxgb4-Don-t-return-EAGAIN-when-TCAM-is-full.patch
@@ -0,0 +1,52 @@
+From: Vishal Kulkarni <vishal@chelsio.com>
+Date: Mon, 8 Apr 2019 18:03:49 +0530
+Subject: cxgb4: Don't return EAGAIN when TCAM is full.
+Patch-mainline: v5.2-rc1
+Git-commit: ed514fc5615d7688b7c227a76863e98a92fb0d54
+References: bsc#1136345 jsc#SLE-4681
+
+During hash filter programming, driver needs to return ENOSPC error
+intead of EAGAIN when TCAM is full.
+
+Signed-off-by: Vishal Kulkarni <vishal@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 7 ++++---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 5 +----
+ 2 files changed, 5 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -1722,12 +1722,13 @@ void hash_filter_rpl(struct adapter *ada
+ break;
+
+ default:
+- dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n",
+- __func__, status);
++ if (status != CPL_ERR_TCAM_FULL)
++ dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n",
++ __func__, status);
+
+ if (ctx) {
+ if (status == CPL_ERR_TCAM_FULL)
+- ctx->result = -EAGAIN;
++ ctx->result = -ENOSPC;
+ else
+ ctx->result = -EINVAL;
+ }
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+@@ -719,11 +719,8 @@ int cxgb4_tc_flower_replace(struct net_d
+
+ ret = ctx.result;
+ /* Check if hw returned error for filter creation */
+- if (ret) {
+- netdev_err(dev, "%s: filter creation err %d\n",
+- __func__, ret);
++ if (ret)
+ goto free_entry;
+- }
+
+ ch_flower->tc_flower_cookie = cls->cookie;
+ ch_flower->filter_id = ctx.tid;
diff --git a/patches.drivers/cxgb4-Enable-outer-UDP-checksum-offload-for-T6.patch b/patches.drivers/cxgb4-Enable-outer-UDP-checksum-offload-for-T6.patch
new file mode 100644
index 0000000000..a961883412
--- /dev/null
+++ b/patches.drivers/cxgb4-Enable-outer-UDP-checksum-offload-for-T6.patch
@@ -0,0 +1,33 @@
+From: Arjun Vynipadath <arjun@chelsio.com>
+Date: Thu, 28 Feb 2019 15:09:28 +0530
+Subject: cxgb4: Enable outer UDP checksum offload for T6
+Patch-mainline: v5.1-rc1
+Git-commit: 64f40cdd07e19b509302d5cb53878ed9d9fa3b5b
+References: bsc#1136345 jsc#SLE-4681
+
+T6 adapters support outer UDP checksum offload for
+encapsulated packets, hence enabling netdev feature flag
+NETIF_F_GSO_UDP_TUNNEL_CSUM.
+
+Signed-off-by: Arjun Vynipadath <arjun@chelsio.com>
+Signed-off-by: Vishal Kulkarni <vishal@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -5727,9 +5727,11 @@ static int init_one(struct pci_dev *pdev
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_GSO_UDP_TUNNEL |
++ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
+
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
++ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_HW_TLS_RECORD;
+ }
+
diff --git a/patches.drivers/cxgb4-Fix-error-path-in-cxgb4_init_module.patch b/patches.drivers/cxgb4-Fix-error-path-in-cxgb4_init_module.patch
new file mode 100644
index 0000000000..10e4d9f59b
--- /dev/null
+++ b/patches.drivers/cxgb4-Fix-error-path-in-cxgb4_init_module.patch
@@ -0,0 +1,80 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Mon, 6 May 2019 23:57:54 +0800
+Subject: cxgb4: Fix error path in cxgb4_init_module
+Patch-mainline: v5.2-rc1
+Git-commit: a3147770bea76c8dbad73eca3a24c2118da5e719
+References: bsc#1136345 jsc#SLE-4681
+
+BUG: unable to handle kernel paging request at ffffffffa016a270
+PGD 3270067 P4D 3270067 PUD 3271063 PMD 230bbd067 PTE 0
+Oops: 0000 [#1
+CPU: 0 PID: 6134 Comm: modprobe Not tainted 5.1.0+ #33
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.9.3-0-ge2fc41e-prebuilt.qemu-project.org 04/01/2014
+RIP: 0010:atomic_notifier_chain_register+0x24/0x60
+Code: 1f 80 00 00 00 00 55 48 89 e5 41 54 49 89 f4 53 48 89 fb e8 ae b4 38 01 48 8b 53 38 48 8d 4b 38 48 85 d2 74 20 45 8b 44 24 10 <44> 3b 42 10 7e 08 eb 13 44 39 42 10 7c 0d 48 8d 4a 08 48 8b 52 08
+RSP: 0018:ffffc90000e2bc60 EFLAGS: 00010086
+RAX: 0000000000000292 RBX: ffffffff83467240 RCX: ffffffff83467278
+RDX: ffffffffa016a260 RSI: ffffffff83752140 RDI: ffffffff83467240
+RBP: ffffc90000e2bc70 R08: 0000000000000000 R09: 0000000000000001
+R10: 0000000000000000 R11: 00000000014fa61f R12: ffffffffa01c8260
+R13: ffff888231091e00 R14: 0000000000000000 R15: ffffc90000e2be78
+FS: 00007fbd8d7cd540(0000) GS:ffff888237a00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: ffffffffa016a270 CR3: 000000022c7e3000 CR4: 00000000000006f0
+Call Trace:
+ register_inet6addr_notifier+0x13/0x20
+ cxgb4_init_module+0x6c/0x1000 [cxgb4
+ ? 0xffffffffa01d7000
+ do_one_initcall+0x6c/0x3cc
+ ? do_init_module+0x22/0x1f1
+ ? rcu_read_lock_sched_held+0x97/0xb0
+ ? kmem_cache_alloc_trace+0x325/0x3b0
+ do_init_module+0x5b/0x1f1
+ load_module+0x1db1/0x2690
+ ? m_show+0x1d0/0x1d0
+ __do_sys_finit_module+0xc5/0xd0
+ __x64_sys_finit_module+0x15/0x20
+ do_syscall_64+0x6b/0x1d0
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+If pci_register_driver fails, register inet6addr_notifier is
+pointless. This patch fix the error path in cxgb4_init_module.
+
+Fixes: b5a02f503caa ("cxgb4 : Update ipv6 address handling api")
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -6169,15 +6169,24 @@ static int __init cxgb4_init_module(void
+
+ ret = pci_register_driver(&cxgb4_driver);
+ if (ret < 0)
+- debugfs_remove(cxgb4_debugfs_root);
++ goto err_pci;
+
+ #if IS_ENABLED(CONFIG_IPV6)
+ if (!inet6addr_registered) {
+- register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+- inet6addr_registered = true;
++ ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
++ if (ret)
++ pci_unregister_driver(&cxgb4_driver);
++ else
++ inet6addr_registered = true;
+ }
+ #endif
+
++ if (ret == 0)
++ return ret;
++
++err_pci:
++ debugfs_remove(cxgb4_debugfs_root);
++
+ return ret;
+ }
+
diff --git a/patches.drivers/cxgb4-Revert-cxgb4-Remove-SGE_HOST_PAGE_SIZE-depende.patch b/patches.drivers/cxgb4-Revert-cxgb4-Remove-SGE_HOST_PAGE_SIZE-depende.patch
new file mode 100644
index 0000000000..173ea2fb91
--- /dev/null
+++ b/patches.drivers/cxgb4-Revert-cxgb4-Remove-SGE_HOST_PAGE_SIZE-depende.patch
@@ -0,0 +1,44 @@
+From: Vishal Kulkarni <vishal@chelsio.com>
+Date: Thu, 23 May 2019 08:07:21 +0530
+Subject: cxgb4: Revert "cxgb4: Remove SGE_HOST_PAGE_SIZE dependency on page
+ size"
+Patch-mainline: v5.2-rc3
+Git-commit: ab0610efabb4c4f419a531455708caf1dd29357e
+References: bsc#1136345 jsc#SLE-4681
+
+This reverts commit 2391b0030e241386d710df10e53e2cfc3c5d4fc1 which has
+introduced regression. Now SGE's BAR2 Doorbell/GTS Page Size is
+interpreted correctly in the firmware itself by using actual host
+page size. Hence previous commit needs to be reverted.
+
+Signed-off-by: Vishal Kulkarni <vishal@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+@@ -7253,10 +7253,21 @@ int t4_fixup_host_params(struct adapter
+ unsigned int cache_line_size)
+ {
+ unsigned int page_shift = fls(page_size) - 1;
++ unsigned int sge_hps = page_shift - 10;
+ unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
+ unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
+ unsigned int fl_align_log = fls(fl_align) - 1;
+
++ t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
++ HOSTPAGESIZEPF0_V(sge_hps) |
++ HOSTPAGESIZEPF1_V(sge_hps) |
++ HOSTPAGESIZEPF2_V(sge_hps) |
++ HOSTPAGESIZEPF3_V(sge_hps) |
++ HOSTPAGESIZEPF4_V(sge_hps) |
++ HOSTPAGESIZEPF5_V(sge_hps) |
++ HOSTPAGESIZEPF6_V(sge_hps) |
++ HOSTPAGESIZEPF7_V(sge_hps));
++
+ if (is_t4(adap->params.chip)) {
+ t4_set_reg_field(adap, SGE_CONTROL_A,
+ INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
diff --git a/patches.drivers/cxgb4-TLS-record-offload-enable.patch b/patches.drivers/cxgb4-TLS-record-offload-enable.patch
new file mode 100644
index 0000000000..a337f92727
--- /dev/null
+++ b/patches.drivers/cxgb4-TLS-record-offload-enable.patch
@@ -0,0 +1,28 @@
+From: Atul Gupta <atul.gupta@chelsio.com>
+Date: Thu, 17 Jan 2019 20:56:39 -0800
+Subject: cxgb4: TLS record offload enable
+Patch-mainline: v5.1-rc1
+Git-commit: 1435d997037899924187c704ac672257d9576f3e
+References: bsc#1136345 jsc#SLE-4681
+
+Enable Inline TLS record by default
+
+Signed-off-by: Atul Gupta <atul.gupta@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -5661,7 +5661,8 @@ static int init_one(struct pci_dev *pdev
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_TSO | NETIF_F_TSO6;
+
+- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
++ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
++ NETIF_F_HW_TLS_RECORD;
+ }
+
+ if (highdma)
diff --git a/patches.drivers/cxgb4-Update-1.23.3.0-as-the-latest-firmware-support.patch b/patches.drivers/cxgb4-Update-1.23.3.0-as-the-latest-firmware-support.patch
new file mode 100644
index 0000000000..bdd661a535
--- /dev/null
+++ b/patches.drivers/cxgb4-Update-1.23.3.0-as-the-latest-firmware-support.patch
@@ -0,0 +1,52 @@
+From: Vishal Kulkarni <vishal@chelsio.com>
+Date: Fri, 29 Mar 2019 16:56:09 +0530
+Subject: cxgb4: Update 1.23.3.0 as the latest firmware supported.
+Patch-mainline: v5.2-rc1
+Git-commit: 5d10de34d43bf3d1d5a7164b6c64a8a4c73b4a6c
+References: bsc#1136345 jsc#SLE-4681
+
+Change t4fw_version.h to update latest firmware version
+number to 1.23.3.0.
+
+Signed-off-by: Vishal Kulkarni <vishal@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+@@ -36,8 +36,8 @@
+ #define __T4FW_VERSION_H__
+
+ #define T4FW_VERSION_MAJOR 0x01
+-#define T4FW_VERSION_MINOR 0x16
+-#define T4FW_VERSION_MICRO 0x09
++#define T4FW_VERSION_MINOR 0x17
++#define T4FW_VERSION_MICRO 0x03
+ #define T4FW_VERSION_BUILD 0x00
+
+ #define T4FW_MIN_VERSION_MAJOR 0x01
+@@ -45,8 +45,8 @@
+ #define T4FW_MIN_VERSION_MICRO 0x00
+
+ #define T5FW_VERSION_MAJOR 0x01
+-#define T5FW_VERSION_MINOR 0x16
+-#define T5FW_VERSION_MICRO 0x09
++#define T5FW_VERSION_MINOR 0x17
++#define T5FW_VERSION_MICRO 0x03
+ #define T5FW_VERSION_BUILD 0x00
+
+ #define T5FW_MIN_VERSION_MAJOR 0x00
+@@ -54,8 +54,8 @@
+ #define T5FW_MIN_VERSION_MICRO 0x00
+
+ #define T6FW_VERSION_MAJOR 0x01
+-#define T6FW_VERSION_MINOR 0x16
+-#define T6FW_VERSION_MICRO 0x09
++#define T6FW_VERSION_MINOR 0x17
++#define T6FW_VERSION_MICRO 0x03
+ #define T6FW_VERSION_BUILD 0x00
+
+ #define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/patches.drivers/cxgb4-add-tcb-flags-and-tcb-rpl-struct.patch b/patches.drivers/cxgb4-add-tcb-flags-and-tcb-rpl-struct.patch
new file mode 100644
index 0000000000..7cb7f605c8
--- /dev/null
+++ b/patches.drivers/cxgb4-add-tcb-flags-and-tcb-rpl-struct.patch
@@ -0,0 +1,68 @@
+From: Raju Rangoju <rajur@chelsio.com>
+Date: Wed, 6 Feb 2019 22:54:42 +0530
+Subject: cxgb4: add tcb flags and tcb rpl struct
+Patch-mainline: v5.1-rc1
+Git-commit: e381a1cb650d97e213b5943c81bbcadf8f480962
+References: bsc#1136345 jsc#SLE-4681
+
+This patch adds the tcb flags and structures needed for querying tcb
+information.
+
+Signed-off-by: Raju Rangoju <rajur@chelsio.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/chelsio/cxgb4/t4_msg.h | 8 ++++++++
+ drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h | 12 ++++++++++++
+ 2 files changed, 20 insertions(+)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+@@ -56,6 +56,7 @@ enum {
+ CPL_TX_DATA_ISO = 0x1F,
+
+ CPL_CLOSE_LISTSRV_RPL = 0x20,
++ CPL_GET_TCB_RPL = 0x22,
+ CPL_L2T_WRITE_RPL = 0x23,
+ CPL_PASS_OPEN_RPL = 0x24,
+ CPL_ACT_OPEN_RPL = 0x25,
+@@ -688,6 +689,13 @@ struct cpl_get_tcb {
+ #define NO_REPLY_V(x) ((x) << NO_REPLY_S)
+ #define NO_REPLY_F NO_REPLY_V(1U)
+
++struct cpl_get_tcb_rpl {
++ union opcode_tid ot;
++ __u8 cookie;
++ __u8 status;
++ __be16 len;
++};
++
+ struct cpl_set_tcb_field {
+ WR_HDR;
+ union opcode_tid ot;
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_tcb.h
+@@ -41,6 +41,14 @@
+ #define TCB_SMAC_SEL_V(x) ((x) << TCB_SMAC_SEL_S)
+
+ #define TCB_T_FLAGS_W 1
++#define TCB_T_FLAGS_S 0
++#define TCB_T_FLAGS_M 0xffffffffffffffffULL
++#define TCB_T_FLAGS_V(x) ((__u64)(x) << TCB_T_FLAGS_S)
++
++#define TCB_RQ_START_W 30
++#define TCB_RQ_START_S 0
++#define TCB_RQ_START_M 0x3ffffffULL
++#define TCB_RQ_START_V(x) ((x) << TCB_RQ_START_S)
+
+ #define TF_CCTRL_ECE_S 60
+ #define TF_CCTRL_CWR_S 61
+@@ -66,4 +74,8 @@
+ #define TCB_RX_FRAG3_LEN_RAW_W 29
+ #define TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W 30
+ #define TCB_PDU_HDR_LEN_W 31
++
++#define TF_RX_PDU_OUT_S 49
++#define TF_RX_PDU_OUT_V(x) ((__u64)(x) << TF_RX_PDU_OUT_S)
++
+ #endif /* __T4_TCB_H */
diff --git a/patches.drivers/cxgb4-chtls-Prefix-adapter-flags-with-CXGB4.patch b/patches.drivers/cxgb4-chtls-Prefix-adapter-flags-with-CXGB4.patch
new file mode 100644
index 0000000000..bc9b8cc619
--- /dev/null
+++ b/patches.drivers/cxgb4-chtls-Prefix-adapter-flags-with-CXGB4.patch
@@ -0,0 +1,828 @@
+From: Arjun Vynipadath <arjun@chelsio.com>
+Date: Mon, 4 Mar 2019 17:43:02 +0530
+Subject: cxgb4/chtls: Prefix adapter flags with CXGB4
+Patch-mainline: v5.1-rc1
+Git-commit: 80f61f19e542aed04cdc6cf9b748ca92bf394333
+References: bsc#1136345 jsc#SLE-4681
+
+Some of these macros were conflicting with global namespace,
+hence prefixing them with CXGB4.
+
+Signed-off-by: Arjun Vynipadath <arjun@chelsio.com>
+Signed-off-by: Vishal Kulkarni <vishal@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/crypto/chelsio/chtls/chtls_cm.c | 2
+ drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c | 2
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 24 ++--
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 2
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 16 +--
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c | 2
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c | 4
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 101 ++++++++++-----------
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | 30 +++---
+ drivers/net/ethernet/chelsio/cxgb4/sge.c | 12 +-
+ drivers/net/ethernet/chelsio/cxgb4/srq.c | 2
+ drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 14 +-
+ 12 files changed, 106 insertions(+), 105 deletions(-)
+
+--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
++++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
+@@ -616,7 +616,7 @@ int chtls_listen_start(struct chtls_dev
+
+ pi = netdev_priv(ndev);
+ adap = pi->adapter;
+- if (!(adap->flags & FULL_INIT_DONE))
++ if (!(adap->flags & CXGB4_FULL_INIT_DONE))
+ return -EBADF;
+
+ if (listen_hash_find(cdev, sk) >= 0) /* already have it */
+--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+@@ -81,7 +81,7 @@ static int is_fw_attached(struct cudbg_i
+ {
+ struct adapter *padap = pdbg_init->adap;
+
+- if (!(padap->flags & FW_OK) || padap->use_bd)
++ if (!(padap->flags & CXGB4_FW_OK) || padap->use_bd)
+ return 0;
+
+ return 1;
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+@@ -606,18 +606,18 @@ struct dentry;
+ struct work_struct;
+
+ enum { /* adapter flags */
+- FULL_INIT_DONE = (1 << 0),
+- DEV_ENABLED = (1 << 1),
+- USING_MSI = (1 << 2),
+- USING_MSIX = (1 << 3),
+- FW_OK = (1 << 4),
+- RSS_TNLALLLOOKUP = (1 << 5),
+- USING_SOFT_PARAMS = (1 << 6),
+- MASTER_PF = (1 << 7),
+- FW_OFLD_CONN = (1 << 9),
+- ROOT_NO_RELAXED_ORDERING = (1 << 10),
+- SHUTTING_DOWN = (1 << 11),
+- SGE_DBQ_TIMER = (1 << 12),
++ CXGB4_FULL_INIT_DONE = (1 << 0),
++ CXGB4_DEV_ENABLED = (1 << 1),
++ CXGB4_USING_MSI = (1 << 2),
++ CXGB4_USING_MSIX = (1 << 3),
++ CXGB4_FW_OK = (1 << 4),
++ CXGB4_RSS_TNLALLLOOKUP = (1 << 5),
++ CXGB4_USING_SOFT_PARAMS = (1 << 6),
++ CXGB4_MASTER_PF = (1 << 7),
++ CXGB4_FW_OFLD_CONN = (1 << 9),
++ CXGB4_ROOT_NO_RELAXED_ORDERING = (1 << 10),
++ CXGB4_SHUTTING_DOWN = (1 << 11),
++ CXGB4_SGE_DBQ_TIMER = (1 << 12),
+ };
+
+ enum {
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+@@ -3143,7 +3143,7 @@ static int tid_info_show(struct seq_file
+ seq_printf(seq, ", in use: %u/%u\n",
+ atomic_read(&t->tids_in_use),
+ atomic_read(&t->hash_tids_in_use));
+- } else if (adap->flags & FW_OFLD_CONN) {
++ } else if (adap->flags & CXGB4_FW_OFLD_CONN) {
+ seq_printf(seq, "TID range: %u..%u/%u..%u",
+ t->aftid_base,
+ t->aftid_end,
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+@@ -875,7 +875,7 @@ static int set_sge_param(struct net_devi
+ e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
+ return -EINVAL;
+
+- if (adapter->flags & FULL_INIT_DONE)
++ if (adapter->flags & CXGB4_FULL_INIT_DONE)
+ return -EBUSY;
+
+ for (i = 0; i < pi->nqsets; ++i) {
+@@ -940,7 +940,7 @@ static int get_dbqtimer_tick(struct net_
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+
+- if (!(adap->flags & SGE_DBQ_TIMER))
++ if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
+ return 0;
+
+ return adap->sge.dbqtimer_tick;
+@@ -957,7 +957,7 @@ static int get_dbqtimer(struct net_devic
+
+ txq = &adap->sge.ethtxq[pi->first_qset];
+
+- if (!(adap->flags & SGE_DBQ_TIMER))
++ if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
+ return 0;
+
+ /* all of the TX Queues use the same Timer Index */
+@@ -979,7 +979,7 @@ static int set_dbqtimer_tick(struct net_
+ u32 param, val;
+ int ret;
+
+- if (!(adap->flags & SGE_DBQ_TIMER))
++ if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
+ return 0;
+
+ /* return early if it's the same Timer Tick we're already using */
+@@ -1015,7 +1015,7 @@ static int set_dbqtimer(struct net_devic
+ u32 param, val;
+ int ret;
+
+- if (!(adap->flags & SGE_DBQ_TIMER))
++ if (!(adap->flags & CXGB4_SGE_DBQ_TIMER))
+ return 0;
+
+ /* Find the SGE Doorbell Timer Value that's closest to the requested
+@@ -1042,7 +1042,7 @@ static int set_dbqtimer(struct net_devic
+ return 0;
+
+ for (qix = 0; qix < pi->nqsets; qix++, txq++) {
+- if (adap->flags & FULL_INIT_DONE) {
++ if (adap->flags & CXGB4_FULL_INIT_DONE) {
+ param =
+ (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_TIMERIX) |
+@@ -1263,7 +1263,7 @@ static int set_flash(struct net_device *
+ * firmware image otherwise we'll try to do the entire job from the
+ * host ... and we always "force" the operation in this path.
+ */
+- if (adap->flags & FULL_INIT_DONE)
++ if (adap->flags & CXGB4_FULL_INIT_DONE)
+ mbox = adap->mbox;
+
+ ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
+@@ -1342,7 +1342,7 @@ static int set_rss_table(struct net_devi
+ return 0;
+
+ /* Interface must be brought up atleast once */
+- if (pi->adapter->flags & FULL_INIT_DONE) {
++ if (pi->adapter->flags & CXGB4_FULL_INIT_DONE) {
+ for (i = 0; i < pi->rss_size; i++)
+ pi->rss[i] = p[i];
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_fcoe.c
+@@ -74,7 +74,7 @@ int cxgb_fcoe_enable(struct net_device *
+ if (is_t4(adap->params.chip))
+ return -EINVAL;
+
+- if (!(adap->flags & FULL_INIT_DONE))
++ if (!(adap->flags & CXGB4_FULL_INIT_DONE))
+ return -EINVAL;
+
+ dev_info(adap->pdev_dev, "Enabling FCoE offload features\n");
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+@@ -524,7 +524,7 @@ static int del_filter_wr(struct adapter
+ return -ENOMEM;
+
+ fwr = __skb_put(skb, len);
+- t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & SHUTTING_DOWN) ? -1
++ t4_mk_filtdelwr(f->tid, fwr, (adapter->flags & CXGB4_SHUTTING_DOWN) ? -1
+ : adapter->sge.fw_evtq.abs_id);
+
+ /* Mark the filter as "pending" and ship off the Filter Work Request.
+@@ -1569,7 +1569,7 @@ int cxgb4_del_filter(struct net_device *
+ int ret;
+
+ /* If we are shutting down the adapter do not wait for completion */
+- if (netdev2adap(dev)->flags & SHUTTING_DOWN)
++ if (netdev2adap(dev)->flags & CXGB4_SHUTTING_DOWN)
+ return __cxgb4_del_filter(dev, filter_id, fs, NULL);
+
+ init_completion(&ctx.completion);
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -651,12 +651,12 @@ out:
+
+ static void disable_msi(struct adapter *adapter)
+ {
+- if (adapter->flags & USING_MSIX) {
++ if (adapter->flags & CXGB4_USING_MSIX) {
+ pci_disable_msix(adapter->pdev);
+- adapter->flags &= ~USING_MSIX;
+- } else if (adapter->flags & USING_MSI) {
++ adapter->flags &= ~CXGB4_USING_MSIX;
++ } else if (adapter->flags & CXGB4_USING_MSI) {
+ pci_disable_msi(adapter->pdev);
+- adapter->flags &= ~USING_MSI;
++ adapter->flags &= ~CXGB4_USING_MSI;
+ }
+ }
+
+@@ -672,7 +672,7 @@ static irqreturn_t t4_nondata_intr(int i
+ adap->swintr = 1;
+ t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
+ }
+- if (adap->flags & MASTER_PF)
++ if (adap->flags & CXGB4_MASTER_PF)
+ t4_slow_intr_handler(adap);
+ return IRQ_HANDLED;
+ }
+@@ -837,9 +837,9 @@ static void quiesce_rx(struct adapter *a
+ /* Disable interrupt and napi handler */
+ static void disable_interrupts(struct adapter *adap)
+ {
+- if (adap->flags & FULL_INIT_DONE) {
++ if (adap->flags & CXGB4_FULL_INIT_DONE) {
+ t4_intr_disable(adap);
+- if (adap->flags & USING_MSIX) {
++ if (adap->flags & CXGB4_USING_MSIX) {
+ free_msix_queue_irqs(adap);
+ free_irq(adap->msix_info[0].vec, adap);
+ } else {
+@@ -880,7 +880,7 @@ static int setup_fw_sge_queues(struct ad
+ bitmap_zero(s->starving_fl, s->egr_sz);
+ bitmap_zero(s->txq_maperr, s->egr_sz);
+
+- if (adap->flags & USING_MSIX)
++ if (adap->flags & CXGB4_USING_MSIX)
+ adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */
+ else {
+ err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
+@@ -939,7 +939,7 @@ static int setup_sge_queues(struct adapt
+ err = t4_sge_alloc_eth_txq(adap, t, dev,
+ netdev_get_tx_queue(dev, j),
+ q->rspq.cntxt_id,
+- !!(adap->flags & SGE_DBQ_TIMER));
++ !!(adap->flags & CXGB4_SGE_DBQ_TIMER));
+ if (err)
+ goto freeout;
+ }
+@@ -2280,7 +2280,7 @@ static int cxgb_up(struct adapter *adap)
+ if (err)
+ goto freeq;
+
+- if (adap->flags & USING_MSIX) {
++ if (adap->flags & CXGB4_USING_MSIX) {
+ name_msix_vecs(adap);
+ err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
+ adap->msix_info[0].desc, adap);
+@@ -2293,7 +2293,8 @@ static int cxgb_up(struct adapter *adap)
+ }
+ } else {
+ err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
+- (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
++ (adap->flags & CXGB4_USING_MSI) ? 0
++ : IRQF_SHARED,
+ adap->port[0]->name, adap);
+ if (err)
+ goto irq_err;
+@@ -2302,7 +2303,7 @@ static int cxgb_up(struct adapter *adap)
+ enable_rx(adap);
+ t4_sge_start(adap);
+ t4_intr_enable(adap);
+- adap->flags |= FULL_INIT_DONE;
++ adap->flags |= CXGB4_FULL_INIT_DONE;
+ mutex_unlock(&uld_mutex);
+
+ notify_ulds(adap, CXGB4_STATE_UP);
+@@ -2338,7 +2339,7 @@ static void cxgb_down(struct adapter *ad
+ kfree(entry);
+ }
+
+- adapter->flags &= ~FULL_INIT_DONE;
++ adapter->flags &= ~CXGB4_FULL_INIT_DONE;
+ }
+
+ /*
+@@ -2352,7 +2353,7 @@ static int cxgb_open(struct net_device *
+
+ netif_carrier_off(dev);
+
+- if (!(adapter->flags & FULL_INIT_DONE)) {
++ if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) {
+ err = cxgb_up(adapter);
+ if (err < 0)
+ return err;
+@@ -2957,7 +2958,7 @@ static void cxgb_netpoll(struct net_devi
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+
+- if (adap->flags & USING_MSIX) {
++ if (adap->flags & CXGB4_USING_MSIX) {
+ int i;
+ struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
+
+@@ -2984,7 +2985,7 @@ static int cxgb_set_tx_maxrate(struct ne
+ if (index < 0 || index > pi->nqsets - 1)
+ return -EINVAL;
+
+- if (!(adap->flags & FULL_INIT_DONE)) {
++ if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
+ dev_err(adap->pdev_dev,
+ "Failed to rate limit on queue %d. Link Down?\n",
+ index);
+@@ -3085,7 +3086,7 @@ static int cxgb_setup_tc_block_cb(enum t
+ struct port_info *pi = netdev2pinfo(dev);
+ struct adapter *adap = netdev2adap(dev);
+
+- if (!(adap->flags & FULL_INIT_DONE)) {
++ if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
+ dev_err(adap->pdev_dev,
+ "Failed to setup tc on port %d. Link Down?\n",
+ pi->port_id);
+@@ -4217,7 +4218,7 @@ static int adap_init0(struct adapter *ad
+ return ret;
+ }
+ if (ret == adap->mbox)
+- adap->flags |= MASTER_PF;
++ adap->flags |= CXGB4_MASTER_PF;
+
+ /*
+ * If we're the Master PF Driver and the device is uninitialized,
+@@ -4232,7 +4233,7 @@ static int adap_init0(struct adapter *ad
+ /* If firmware is too old (not supported by driver) force an update. */
+ if (ret)
+ state = DEV_STATE_UNINIT;
+- if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
++ if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) {
+ struct fw_info *fw_info;
+ struct fw_hdr *card_fw;
+ const struct firmware *fw;
+@@ -4294,7 +4295,7 @@ static int adap_init0(struct adapter *ad
+ ret);
+ dev_info(adap->pdev_dev, "Coming up as %s: "\
+ "Adapter already initialized\n",
+- adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
++ adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE");
+ } else {
+ dev_info(adap->pdev_dev, "Coming up as MASTER: "\
+ "Initializing adapter\n");
+@@ -4396,7 +4397,7 @@ static int adap_init0(struct adapter *ad
+ }
+
+ if (!ret)
+- adap->flags |= SGE_DBQ_TIMER;
++ adap->flags |= CXGB4_SGE_DBQ_TIMER;
+
+ if (is_bypass_device(adap->pdev->device))
+ adap->params.bypass = 1;
+@@ -4520,7 +4521,7 @@ static int adap_init0(struct adapter *ad
+ * offload connection through firmware work request
+ */
+ if ((val[0] != val[1]) && (ret >= 0)) {
+- adap->flags |= FW_OFLD_CONN;
++ adap->flags |= CXGB4_FW_OFLD_CONN;
+ adap->tids.aftid_base = val[0];
+ adap->tids.aftid_end = val[1];
+ }
+@@ -4613,7 +4614,7 @@ static int adap_init0(struct adapter *ad
+ * 2. Server filter: This are special filters which are used
+ * to redirect SYN packets to offload queue.
+ */
+- if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
++ if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) {
+ adap->tids.sftid_base = adap->tids.ftid_base +
+ DIV_ROUND_UP(adap->tids.nftids, 3);
+ adap->tids.nsftids = adap->tids.nftids -
+@@ -4792,7 +4793,7 @@ static int adap_init0(struct adapter *ad
+ adap->params.b_wnd);
+ }
+ t4_init_sge_params(adap);
+- adap->flags |= FW_OK;
++ adap->flags |= CXGB4_FW_OK;
+ t4_init_tp_params(adap, true);
+ return 0;
+
+@@ -4827,7 +4828,7 @@ static pci_ers_result_t eeh_err_detected
+ goto out;
+
+ rtnl_lock();
+- adap->flags &= ~FW_OK;
++ adap->flags &= ~CXGB4_FW_OK;
+ notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
+ spin_lock(&adap->stats_lock);
+ for_each_port(adap, i) {
+@@ -4839,12 +4840,12 @@ static pci_ers_result_t eeh_err_detected
+ }
+ spin_unlock(&adap->stats_lock);
+ disable_interrupts(adap);
+- if (adap->flags & FULL_INIT_DONE)
++ if (adap->flags & CXGB4_FULL_INIT_DONE)
+ cxgb_down(adap);
+ rtnl_unlock();
+- if ((adap->flags & DEV_ENABLED)) {
++ if ((adap->flags & CXGB4_DEV_ENABLED)) {
+ pci_disable_device(pdev);
+- adap->flags &= ~DEV_ENABLED;
++ adap->flags &= ~CXGB4_DEV_ENABLED;
+ }
+ out: return state == pci_channel_io_perm_failure ?
+ PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+@@ -4862,13 +4863,13 @@ static pci_ers_result_t eeh_slot_reset(s
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+- if (!(adap->flags & DEV_ENABLED)) {
++ if (!(adap->flags & CXGB4_DEV_ENABLED)) {
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "Cannot reenable PCI "
+ "device after reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+- adap->flags |= DEV_ENABLED;
++ adap->flags |= CXGB4_DEV_ENABLED;
+ }
+
+ pci_set_master(pdev);
+@@ -4880,7 +4881,7 @@ static pci_ers_result_t eeh_slot_reset(s
+ return PCI_ERS_RESULT_DISCONNECT;
+ if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
+ return PCI_ERS_RESULT_DISCONNECT;
+- adap->flags |= FW_OK;
++ adap->flags |= CXGB4_FW_OK;
+ if (adap_init1(adap, &c))
+ return PCI_ERS_RESULT_DISCONNECT;
+
+@@ -4992,7 +4993,7 @@ static int cfg_queues(struct adapter *ad
+ * at all is problematic ...
+ */
+ niqflint = adap->params.pfres.niqflint - 1;
+- if (!(adap->flags & USING_MSIX))
++ if (!(adap->flags & CXGB4_USING_MSIX))
+ niqflint--;
+ neq = adap->params.pfres.neq / 2;
+ avail_eth_qsets = min(niqflint, neq);
+@@ -5274,8 +5275,8 @@ static void print_adapter_info(struct ad
+ /* Software/Hardware configuration */
+ dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
+ is_offload(adapter) ? "R" : "",
+- ((adapter->flags & USING_MSIX) ? "MSI-X" :
+- (adapter->flags & USING_MSI) ? "MSI" : ""),
++ ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" :
++ (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""),
+ is_offload(adapter) ? "Offload" : "non-Offload");
+ }
+
+@@ -5350,7 +5351,7 @@ static void free_some_resources(struct a
+ kfree(adap2pinfo(adapter, i)->rss);
+ free_netdev(adapter->port[i]);
+ }
+- if (adapter->flags & FW_OK)
++ if (adapter->flags & CXGB4_FW_OK)
+ t4_fw_bye(adapter, adapter->pf);
+ }
+
+@@ -5654,7 +5655,7 @@ static int init_one(struct pci_dev *pdev
+ }
+
+ /* PCI device has been enabled */
+- adapter->flags |= DEV_ENABLED;
++ adapter->flags |= CXGB4_DEV_ENABLED;
+ memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
+
+ /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
+@@ -5672,7 +5673,7 @@ static int init_one(struct pci_dev *pdev
+ * using Relaxed Ordering.
+ */
+ if (!pcie_relaxed_ordering_enabled(pdev))
+- adapter->flags |= ROOT_NO_RELAXED_ORDERING;
++ adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING;
+
+ spin_lock_init(&adapter->stats_lock);
+ spin_lock_init(&adapter->tid_release_lock);
+@@ -5804,7 +5805,7 @@ static int init_one(struct pci_dev *pdev
+
+ pci_set_drvdata(pdev, adapter);
+
+- if (adapter->flags & FW_OK) {
++ if (adapter->flags & CXGB4_FW_OK) {
+ err = t4_port_init(adapter, func, func, 0);
+ if (err)
+ goto out_free_dev;
+@@ -5826,7 +5827,7 @@ static int init_one(struct pci_dev *pdev
+ }
+ }
+
+- if (!(adapter->flags & FW_OK))
++ if (!(adapter->flags & CXGB4_FW_OK))
+ goto fw_attach_fail;
+
+ /* Configure queues and allocate tables now, they can be needed as
+@@ -5920,9 +5921,9 @@ static int init_one(struct pci_dev *pdev
+
+ /* See what interrupts we'll be using */
+ if (msi > 1 && enable_msix(adapter) == 0)
+- adapter->flags |= USING_MSIX;
++ adapter->flags |= CXGB4_USING_MSIX;
+ else if (msi > 0 && pci_enable_msi(pdev) == 0) {
+- adapter->flags |= USING_MSI;
++ adapter->flags |= CXGB4_USING_MSI;
+ if (msi > 1)
+ free_msix_info(adapter);
+ }
+@@ -5990,7 +5991,7 @@ fw_attach_fail:
+ cxgb4_ptp_init(adapter);
+
+ if (IS_REACHABLE(CONFIG_THERMAL) &&
+- !is_t4(adapter->params.chip) && (adapter->flags & FW_OK))
++ !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK))
+ cxgb4_thermal_init(adapter);
+
+ print_adapter_info(adapter);
+@@ -5999,7 +6000,7 @@ fw_attach_fail:
+ out_free_dev:
+ t4_free_sge_resources(adapter);
+ free_some_resources(adapter);
+- if (adapter->flags & USING_MSIX)
++ if (adapter->flags & CXGB4_USING_MSIX)
+ free_msix_info(adapter);
+ if (adapter->num_uld || adapter->num_ofld_uld)
+ t4_uld_mem_free(adapter);
+@@ -6032,7 +6033,7 @@ static void remove_one(struct pci_dev *p
+ return;
+ }
+
+- adapter->flags |= SHUTTING_DOWN;
++ adapter->flags |= CXGB4_SHUTTING_DOWN;
+
+ if (adapter->pf == 4) {
+ int i;
+@@ -6067,10 +6068,10 @@ static void remove_one(struct pci_dev *p
+ */
+ clear_all_filters(adapter);
+
+- if (adapter->flags & FULL_INIT_DONE)
++ if (adapter->flags & CXGB4_FULL_INIT_DONE)
+ cxgb_down(adapter);
+
+- if (adapter->flags & USING_MSIX)
++ if (adapter->flags & CXGB4_USING_MSIX)
+ free_msix_info(adapter);
+ if (adapter->num_uld || adapter->num_ofld_uld)
+ t4_uld_mem_free(adapter);
+@@ -6094,9 +6095,9 @@ static void remove_one(struct pci_dev *p
+ #endif
+ iounmap(adapter->regs);
+ pci_disable_pcie_error_reporting(pdev);
+- if ((adapter->flags & DEV_ENABLED)) {
++ if ((adapter->flags & CXGB4_DEV_ENABLED)) {
+ pci_disable_device(pdev);
+- adapter->flags &= ~DEV_ENABLED;
++ adapter->flags &= ~CXGB4_DEV_ENABLED;
+ }
+ pci_release_regions(pdev);
+ kfree(adapter->mbox_log);
+@@ -6122,7 +6123,7 @@ static void shutdown_one(struct pci_dev
+ return;
+ }
+
+- adapter->flags |= SHUTTING_DOWN;
++ adapter->flags |= CXGB4_SHUTTING_DOWN;
+
+ if (adapter->pf == 4) {
+ int i;
+@@ -6140,7 +6141,7 @@ static void shutdown_one(struct pci_dev
+ disable_msi(adapter);
+
+ t4_sge_stop(adapter);
+- if (adapter->flags & FW_OK)
++ if (adapter->flags & CXGB4_FW_OK)
+ t4_fw_bye(adapter, adapter->mbox);
+ }
+ }
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+@@ -147,7 +147,7 @@ static int alloc_uld_rxqs(struct adapter
+
+ per_chan = rxq_info->nrxq / adap->params.nports;
+
+- if (adap->flags & USING_MSIX)
++ if (adap->flags & CXGB4_USING_MSIX)
+ msi_idx = 1;
+ else
+ msi_idx = -((int)s->intrq.abs_id + 1);
+@@ -195,7 +195,7 @@ setup_sge_queues_uld(struct adapter *ada
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ int i, ret = 0;
+
+- if (adap->flags & USING_MSIX) {
++ if (adap->flags & CXGB4_USING_MSIX) {
+ rxq_info->msix_tbl = kcalloc((rxq_info->nrxq + rxq_info->nciq),
+ sizeof(unsigned short),
+ GFP_KERNEL);
+@@ -206,7 +206,7 @@ setup_sge_queues_uld(struct adapter *ada
+ ret = !(!alloc_uld_rxqs(adap, rxq_info, lro));
+
+ /* Tell uP to route control queue completions to rdma rspq */
+- if (adap->flags & FULL_INIT_DONE &&
++ if (adap->flags & CXGB4_FULL_INIT_DONE &&
+ !ret && uld_type == CXGB4_ULD_RDMA) {
+ struct sge *s = &adap->sge;
+ unsigned int cmplqid;
+@@ -239,7 +239,7 @@ static void free_sge_queues_uld(struct a
+ {
+ struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+
+- if (adap->flags & FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
++ if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
+ struct sge *s = &adap->sge;
+ u32 param, cmdop, cmplqid = 0;
+ int i;
+@@ -258,7 +258,7 @@ static void free_sge_queues_uld(struct a
+ t4_free_uld_rxqs(adap, rxq_info->nciq,
+ rxq_info->uldrxq + rxq_info->nrxq);
+ t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
+- if (adap->flags & USING_MSIX)
++ if (adap->flags & CXGB4_USING_MSIX)
+ kfree(rxq_info->msix_tbl);
+ }
+
+@@ -273,7 +273,7 @@ static int cfg_queues_uld(struct adapter
+ if (!rxq_info)
+ return -ENOMEM;
+
+- if (adap->flags & USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
++ if (adap->flags & CXGB4_USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
+ i = s->nqs_per_uld;
+ rxq_info->nrxq = roundup(i, adap->params.nports);
+ } else {
+@@ -284,7 +284,7 @@ static int cfg_queues_uld(struct adapter
+ if (!uld_info->ciq) {
+ rxq_info->nciq = 0;
+ } else {
+- if (adap->flags & USING_MSIX)
++ if (adap->flags & CXGB4_USING_MSIX)
+ rxq_info->nciq = min_t(int, s->nqs_per_uld,
+ num_online_cpus());
+ else
+@@ -611,10 +611,10 @@ static void cxgb4_shutdown_uld_adapter(s
+ adap->uld[type].add = NULL;
+ release_sge_txq_uld(adap, type);
+
+- if (adap->flags & FULL_INIT_DONE)
++ if (adap->flags & CXGB4_FULL_INIT_DONE)
+ quiesce_rx_uld(adap, type);
+
+- if (adap->flags & USING_MSIX)
++ if (adap->flags & CXGB4_USING_MSIX)
+ free_msix_queue_irqs_uld(adap, type);
+
+ free_sge_queues_uld(adap, type);
+@@ -673,7 +673,7 @@ static void uld_init(struct adapter *ada
+ lld->sge_egrstatuspagesize = adap->sge.stat_len;
+ lld->sge_pktshift = adap->sge.pktshift;
+ lld->ulp_crypto = adap->params.crypto;
+- lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
++ lld->enable_fw_ofld_conn = adap->flags & CXGB4_FW_OFLD_CONN;
+ lld->max_ordird_qp = adap->params.max_ordird_qp;
+ lld->max_ird_adapter = adap->params.max_ird_adapter;
+ lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
+@@ -702,7 +702,7 @@ static void uld_attach(struct adapter *a
+ adap->uld[uld].handle = handle;
+ t4_register_netevent_notifier();
+
+- if (adap->flags & FULL_INIT_DONE)
++ if (adap->flags & CXGB4_FULL_INIT_DONE)
+ adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
+ }
+
+@@ -737,13 +737,13 @@ void cxgb4_register_uld(enum cxgb4_uld t
+ ret = setup_sge_queues_uld(adap, type, p->lro);
+ if (ret)
+ goto free_queues;
+- if (adap->flags & USING_MSIX) {
++ if (adap->flags & CXGB4_USING_MSIX) {
+ name_msix_vecs_uld(adap, type);
+ ret = request_msix_queue_irqs_uld(adap, type);
+ if (ret)
+ goto free_rxq;
+ }
+- if (adap->flags & FULL_INIT_DONE)
++ if (adap->flags & CXGB4_FULL_INIT_DONE)
+ enable_rx_uld(adap, type);
+ if (adap->uld[type].add)
+ goto free_irq;
+@@ -754,9 +754,9 @@ void cxgb4_register_uld(enum cxgb4_uld t
+ uld_attach(adap, type);
+ continue;
+ free_irq:
+- if (adap->flags & FULL_INIT_DONE)
++ if (adap->flags & CXGB4_FULL_INIT_DONE)
+ quiesce_rx_uld(adap, type);
+- if (adap->flags & USING_MSIX)
++ if (adap->flags & CXGB4_USING_MSIX)
+ free_msix_queue_irqs_uld(adap, type);
+ free_rxq:
+ free_sge_queues_uld(adap, type);
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+@@ -3376,7 +3376,7 @@ static irqreturn_t t4_intr_msi(int irq,
+ {
+ struct adapter *adap = cookie;
+
+- if (adap->flags & MASTER_PF)
++ if (adap->flags & CXGB4_MASTER_PF)
+ t4_slow_intr_handler(adap);
+ process_intrq(adap);
+ return IRQ_HANDLED;
+@@ -3392,7 +3392,7 @@ static irqreturn_t t4_intr_intx(int irq,
+ struct adapter *adap = cookie;
+
+ t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
+- if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) |
++ if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) |
+ process_intrq(adap))
+ return IRQ_HANDLED;
+ return IRQ_NONE; /* probably shared interrupt */
+@@ -3407,9 +3407,9 @@ static irqreturn_t t4_intr_intx(int irq,
+ */
+ irq_handler_t t4_intr_handler(struct adapter *adap)
+ {
+- if (adap->flags & USING_MSIX)
++ if (adap->flags & CXGB4_USING_MSIX)
+ return t4_sge_intr_msix;
+- if (adap->flags & USING_MSI)
++ if (adap->flags & CXGB4_USING_MSI)
+ return t4_intr_msi;
+ return t4_intr_intx;
+ }
+@@ -3442,7 +3442,7 @@ static void sge_rx_timer_cb(struct timer
+ * global Master PF activities like checking for chip ingress stalls,
+ * etc.
+ */
+- if (!(adap->flags & MASTER_PF))
++ if (!(adap->flags & CXGB4_MASTER_PF))
+ goto done;
+
+ t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
+@@ -3550,7 +3550,7 @@ int t4_sge_alloc_rxq(struct adapter *ada
+ struct fw_iq_cmd c;
+ struct sge *s = &adap->sge;
+ struct port_info *pi = netdev_priv(dev);
+- int relaxed = !(adap->flags & ROOT_NO_RELAXED_ORDERING);
++ int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING);
+
+ /* Size needs to be multiple of 16, including status entry. */
+ iq->size = roundup(iq->size, 16);
+--- a/drivers/net/ethernet/chelsio/cxgb4/srq.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/srq.c
+@@ -77,7 +77,7 @@ int cxgb4_get_srq_entry(struct net_devic
+ adap = netdev2adap(dev);
+ s = adap->srq;
+
+- if (!(adap->flags & FULL_INIT_DONE) || !s)
++ if (!(adap->flags & CXGB4_FULL_INIT_DONE) || !s)
+ goto out;
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+@@ -198,7 +198,7 @@ static void t4_report_fw_error(struct ad
+ if (pcie_fw & PCIE_FW_ERR_F) {
+ dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
+ reason[PCIE_FW_EVAL_G(pcie_fw)]);
+- adap->flags &= ~FW_OK;
++ adap->flags &= ~CXGB4_FW_OK;
+ }
+ }
+
+@@ -5243,7 +5243,7 @@ int t4_read_rss(struct adapter *adapter,
+
+ static unsigned int t4_use_ldst(struct adapter *adap)
+ {
+- return (adap->flags & FW_OK) && !adap->use_bd;
++ return (adap->flags & CXGB4_FW_OK) && !adap->use_bd;
+ }
+
+ /**
+@@ -6132,7 +6132,7 @@ unsigned int t4_get_mps_bg_map(struct ad
+ * ( MPSBGMAP[Port 1] << 8 ) |
+ * ( MPSBGMAP[Port 0] << 0 ))
+ */
+- if (adapter->flags & FW_OK) {
++ if (adapter->flags & CXGB4_FW_OK) {
+ u32 param, val;
+ int ret;
+
+@@ -7093,10 +7093,10 @@ int t4_fw_upgrade(struct adapter *adap,
+ if (!t4_fw_matches_chip(adap, fw_hdr))
+ return -EINVAL;
+
+- /* Disable FW_OK flag so that mbox commands with FW_OK flag set
+- * wont be sent when we are flashing FW.
++ /* Disable CXGB4_FW_OK flag so that mbox commands with CXGB4_FW_OK flag
++ * set wont be sent when we are flashing FW.
+ */
+- adap->flags &= ~FW_OK;
++ adap->flags &= ~CXGB4_FW_OK;
+
+ ret = t4_fw_halt(adap, mbox, force);
+ if (ret < 0 && !force)
+@@ -7135,7 +7135,7 @@ int t4_fw_upgrade(struct adapter *adap,
+ */
+ (void)t4_init_devlog_params(adap);
+ out:
+- adap->flags |= FW_OK;
++ adap->flags |= CXGB4_FW_OK;
+ return ret;
+ }
+
diff --git a/patches.drivers/cxgb4-cxgb4vf-Display-advertised-FEC-in-ethtool.patch b/patches.drivers/cxgb4-cxgb4vf-Display-advertised-FEC-in-ethtool.patch
new file mode 100644
index 0000000000..1723c74847
--- /dev/null
+++ b/patches.drivers/cxgb4-cxgb4vf-Display-advertised-FEC-in-ethtool.patch
@@ -0,0 +1,302 @@
+From: Vishal Kulkarni <vishal@chelsio.com>
+Date: Fri, 29 Mar 2019 18:24:03 +0530
+Subject: cxgb4/cxgb4vf: Display advertised FEC in ethtool
+Patch-mainline: v5.2-rc1
+Git-commit: 9f764898c73d21fac3ff22b20826f15418345a60
+References: bsc#1136345 jsc#SLE-4681
+
+This patch advertises Forward Error Correction in ethtool
+
+Signed-off-by: Casey Leedom <leedom@chelsio.com>
+Signed-off-by: Vishal Kulkarni <vishal@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 4
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 23 ----
+ drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 107 ++++++++++++++------
+ drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 16 --
+ drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | 10 +
+ 5 files changed, 95 insertions(+), 65 deletions(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+@@ -1575,9 +1575,11 @@ int t4_slow_intr_handler(struct adapter
+
+ int t4_wait_dev_ready(void __iomem *regs);
+
++fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port,
++ struct link_config *lc);
+ int t4_link_l1cfg_core(struct adapter *adap, unsigned int mbox,
+ unsigned int port, struct link_config *lc,
+- bool sleep_ok, int timeout);
++ u8 sleep_ok, int timeout);
+
+ static inline int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox,
+ unsigned int port, struct link_config *lc)
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+@@ -442,7 +442,7 @@ static unsigned int speed_to_fw_caps(int
+ * Link Mode Mask.
+ */
+ static void fw_caps_to_lmm(enum fw_port_type port_type,
+- unsigned int fw_caps,
++ fw_port_cap32_t fw_caps,
+ unsigned long *link_mode_mask)
+ {
+ #define SET_LMM(__lmm_name) \
+@@ -632,7 +632,10 @@ static int get_link_ksettings(struct net
+
+ fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
+ link_ksettings->link_modes.supported);
+- fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps,
++ fw_caps_to_lmm(pi->port_type,
++ t4_link_acaps(pi->adapter,
++ pi->lport,
++ &pi->link_cfg),
+ link_ksettings->link_modes.advertising);
+ fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
+ link_ksettings->link_modes.lp_advertising);
+@@ -642,22 +645,6 @@ static int get_link_ksettings(struct net
+ : SPEED_UNKNOWN);
+ base->duplex = DUPLEX_FULL;
+
+- if (pi->link_cfg.fc & PAUSE_RX) {
+- if (pi->link_cfg.fc & PAUSE_TX) {
+- ethtool_link_ksettings_add_link_mode(link_ksettings,
+- advertising,
+- Pause);
+- } else {
+- ethtool_link_ksettings_add_link_mode(link_ksettings,
+- advertising,
+- Asym_Pause);
+- }
+- } else if (pi->link_cfg.fc & PAUSE_TX) {
+- ethtool_link_ksettings_add_link_mode(link_ksettings,
+- advertising,
+- Asym_Pause);
+- }
+-
+ base->autoneg = pi->link_cfg.autoneg;
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+@@ -3964,6 +3964,14 @@ void t4_ulprx_read_la(struct adapter *ad
+ }
+ }
+
++/* The ADVERT_MASK is used to mask out all of the Advertised Firmware Port
++ * Capabilities which we control with separate controls -- see, for instance,
++ * Pause Frames and Forward Error Correction. In order to determine what the
++ * full set of Advertised Port Capabilities are, the base Advertised Port
++ * Capabilities (masked by ADVERT_MASK) must be combined with the Advertised
++ * Port Capabilities associated with those other controls. See
++ * t4_link_acaps() for how this is done.
++ */
+ #define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
+ FW_PORT_CAP32_ANEG)
+
+@@ -4061,6 +4069,9 @@ static inline enum cc_pause fwcap_to_cc_
+ /* Translate Common Code Pause specification into Firmware Port Capabilities */
+ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
+ {
++ /* Translate orthogonal RX/TX Pause Controls for L1 Configure
++ * commands, etc.
++ */
+ fw_port_cap32_t fw_pause = 0;
+
+ if (cc_pause & PAUSE_RX)
+@@ -4070,6 +4081,19 @@ static inline fw_port_cap32_t cc_to_fwca
+ if (!(cc_pause & PAUSE_AUTONEG))
+ fw_pause |= FW_PORT_CAP32_FORCE_PAUSE;
+
++ /* Translate orthogonal Pause controls into IEEE 802.3 Pause,
++ * Asymetrical Pause for use in reporting to upper layer OS code, etc.
++ * Note that these bits are ignored in L1 Configure commands.
++ */
++ if (cc_pause & PAUSE_RX) {
++ if (cc_pause & PAUSE_TX)
++ fw_pause |= FW_PORT_CAP32_802_3_PAUSE;
++ else
++ fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
++ } else if (cc_pause & PAUSE_TX) {
++ fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
++ }
++
+ return fw_pause;
+ }
+
+@@ -4100,31 +4124,22 @@ static inline fw_port_cap32_t cc_to_fwca
+ }
+
+ /**
+- * t4_link_l1cfg - apply link configuration to MAC/PHY
++ * t4_link_acaps - compute Link Advertised Port Capabilities
+ * @adapter: the adapter
+- * @mbox: the Firmware Mailbox to use
+ * @port: the Port ID
+ * @lc: the Port's Link Configuration
+- * @sleep_ok: if true we may sleep while awaiting command completion
+- * @timeout: time to wait for command to finish before timing out
+- * (negative implies @sleep_ok=false)
+ *
+- * Set up a port's MAC and PHY according to a desired link configuration.
+- * - If the PHY can auto-negotiate first decide what to advertise, then
+- * enable/disable auto-negotiation as desired, and reset.
+- * - If the PHY does not auto-negotiate just reset it.
+- * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+- * otherwise do it later based on the outcome of auto-negotiation.
++ * Synthesize the Advertised Port Capabilities we'll be using based on
++ * the base Advertised Port Capabilities (which have been filtered by
++ * ADVERT_MASK) plus the individual controls for things like Pause
++ * Frames, Forward Error Correction, MDI, etc.
+ */
+-int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
+- unsigned int port, struct link_config *lc,
+- bool sleep_ok, int timeout)
++fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port,
++ struct link_config *lc)
+ {
+- unsigned int fw_caps = adapter->params.fw_caps_support;
+- fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
+- struct fw_port_cmd cmd;
++ fw_port_cap32_t fw_fc, fw_fec, acaps;
+ unsigned int fw_mdi;
+- int ret;
++ char cc_fec;
+
+ fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
+
+@@ -4151,18 +4166,15 @@ int t4_link_l1cfg_core(struct adapter *a
+ * init_link_config().
+ */
+ if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
+- if (lc->autoneg == AUTONEG_ENABLE)
+- return -EINVAL;
+-
+- rcap = lc->acaps | fw_fc | fw_fec;
++ acaps = lc->acaps | fw_fc | fw_fec;
+ lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
+ lc->fec = cc_fec;
+ } else if (lc->autoneg == AUTONEG_DISABLE) {
+- rcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
++ acaps = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
+ lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
+ lc->fec = cc_fec;
+ } else {
+- rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
++ acaps = lc->acaps | fw_fc | fw_fec | fw_mdi;
+ }
+
+ /* Some Requested Port Capabilities are trivially wrong if they exceed
+@@ -4173,15 +4185,50 @@ int t4_link_l1cfg_core(struct adapter *a
+ * we need to exclude this from this check in order to maintain
+ * compatibility ...
+ */
+- if ((rcap & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
+- dev_err(adapter->pdev_dev,
+- "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
+- rcap, lc->pcaps);
++ if ((acaps & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
++ dev_err(adapter->pdev_dev, "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
++ acaps, lc->pcaps);
++ return -EINVAL;
++ }
++
++ return acaps;
++}
++
++/**
++ * t4_link_l1cfg_core - apply link configuration to MAC/PHY
++ * @adapter: the adapter
++ * @mbox: the Firmware Mailbox to use
++ * @port: the Port ID
++ * @lc: the Port's Link Configuration
++ * @sleep_ok: if true we may sleep while awaiting command completion
++ * @timeout: time to wait for command to finish before timing out
++ * (negative implies @sleep_ok=false)
++ *
++ * Set up a port's MAC and PHY according to a desired link configuration.
++ * - If the PHY can auto-negotiate first decide what to advertise, then
++ * enable/disable auto-negotiation as desired, and reset.
++ * - If the PHY does not auto-negotiate just reset it.
++ * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
++ * otherwise do it later based on the outcome of auto-negotiation.
++ */
++int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
++ unsigned int port, struct link_config *lc,
++ u8 sleep_ok, int timeout)
++{
++ unsigned int fw_caps = adapter->params.fw_caps_support;
++ struct fw_port_cmd cmd;
++ fw_port_cap32_t rcap;
++ int ret;
++
++ if (!(lc->pcaps & FW_PORT_CAP32_ANEG) &&
++ lc->autoneg == AUTONEG_ENABLE) {
+ return -EINVAL;
+ }
+
+- /* And send that on to the Firmware ...
++ /* Compute our Requested Port Capabilities and send that on to the
++ * Firmware.
+ */
++ rcap = t4_link_acaps(adapter, port, lc);
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
+@@ -4211,7 +4258,7 @@ int t4_link_l1cfg_core(struct adapter *a
+ rcap, -ret);
+ return ret;
+ }
+- return ret;
++ return 0;
+ }
+
+ /**
+--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+@@ -1479,22 +1479,6 @@ static int cxgb4vf_get_link_ksettings(st
+ base->duplex = DUPLEX_UNKNOWN;
+ }
+
+- if (pi->link_cfg.fc & PAUSE_RX) {
+- if (pi->link_cfg.fc & PAUSE_TX) {
+- ethtool_link_ksettings_add_link_mode(link_ksettings,
+- advertising,
+- Pause);
+- } else {
+- ethtool_link_ksettings_add_link_mode(link_ksettings,
+- advertising,
+- Asym_Pause);
+- }
+- } else if (pi->link_cfg.fc & PAUSE_TX) {
+- ethtool_link_ksettings_add_link_mode(link_ksettings,
+- advertising,
+- Asym_Pause);
+- }
+-
+ base->autoneg = pi->link_cfg.autoneg;
+ if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
++++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+@@ -313,7 +313,17 @@ int t4vf_wr_mbox_core(struct adapter *ad
+ return ret;
+ }
+
++/* In the Physical Function Driver Common Code, the ADVERT_MASK is used to
++ * mask out bits in the Advertised Port Capabilities which are managed via
++ * separate controls, like Pause Frames and Forward Error Correction. In the
++ * Virtual Function Common Code, since we never perform L1 Configuration on
++ * the Link, the only things we really need to filter out are things which
++ * we decode and report separately like Speed.
++ */
+ #define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
++ FW_PORT_CAP32_802_3_PAUSE | \
++ FW_PORT_CAP32_802_3_ASM_DIR | \
++ FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M) | \
+ FW_PORT_CAP32_ANEG)
+
+ /**
diff --git a/patches.drivers/cxgb4-cxgb4vf-Fix-up-netdev-hw_features.patch b/patches.drivers/cxgb4-cxgb4vf-Fix-up-netdev-hw_features.patch
new file mode 100644
index 0000000000..90882ccfc5
--- /dev/null
+++ b/patches.drivers/cxgb4-cxgb4vf-Fix-up-netdev-hw_features.patch
@@ -0,0 +1,75 @@
+From: Arjun Vynipadath <arjun@chelsio.com>
+Date: Thu, 28 Feb 2019 15:06:54 +0530
+Subject: cxgb4/cxgb4vf: Fix up netdev->hw_features
+Patch-mainline: v5.1-rc1
+Git-commit: 012475e3c59cbe488779e86565807a73ff115f4b
+References: bsc#1136345 jsc#SLE-4681
+
+GRO is done by cxgb4/cxgb4vf. Hence set NETIF_F_GRO flag for
+both cxgb4/cxgb4vf.
+Cleaned up VLAN netdev features in cxgb4vf. Also fixed
+NETIF_F_HIGHDMA being set unconditionally for vlan netdev
+features.
+
+Signed-off-by: Arjun Vynipadath <arjun@chelsio.com>
+Signed-off-by: Vishal Kulkarni <vishal@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 4 ++--
+ drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 15 +++++++--------
+ 2 files changed, 9 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -5311,7 +5311,7 @@ static void free_some_resources(struct a
+
+ #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+ #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
+- NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
++ NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
+ #define SEGMENT_SIZE 128
+
+ static int t4_get_chip_type(struct adapter *adap, int ver)
+@@ -5718,7 +5718,7 @@ static int init_one(struct pci_dev *pdev
+
+ netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+- NETIF_F_RXCSUM | NETIF_F_RXHASH |
++ NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_TC;
+
+--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+@@ -1932,6 +1932,8 @@ static void cxgb4vf_get_wol(struct net_d
+ * TCP Segmentation Offload flags which we support.
+ */
+ #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
++#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
++ NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
+
+ static const struct ethtool_ops cxgb4vf_ethtool_ops = {
+ .get_link_ksettings = cxgb4vf_get_link_ksettings,
+@@ -3141,16 +3143,13 @@ static int cxgb4vf_pci_probe(struct pci_
+ netif_carrier_off(netdev);
+ netdev->irq = pdev->irq;
+
+- netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
+- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
+- netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
+- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+- NETIF_F_HIGHDMA;
+- netdev->features = netdev->hw_features |
+- NETIF_F_HW_VLAN_CTAG_TX;
++ netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_GRO |
++ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
++ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
++ netdev->features = netdev->hw_features;
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
++ netdev->vlan_features = netdev->features & VLAN_FEAT;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->min_mtu = 81;
diff --git a/patches.drivers/cxgb4-cxgb4vf_main-Mark-expected-switch-fall-through.patch b/patches.drivers/cxgb4-cxgb4vf_main-Mark-expected-switch-fall-through.patch
new file mode 100644
index 0000000000..65fb6d4166
--- /dev/null
+++ b/patches.drivers/cxgb4-cxgb4vf_main-Mark-expected-switch-fall-through.patch
@@ -0,0 +1,50 @@
+From: "Gustavo A. R. Silva" <gustavo@embeddedor.com>
+Date: Wed, 24 Apr 2019 11:27:42 -0500
+Subject: cxgb4/cxgb4vf_main: Mark expected switch fall-through
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+Patch-mainline: v5.2-rc1
+Git-commit: 05dd2645302fbbe08abe4d89879c468accb81715
+References: bsc#1136345 jsc#SLE-4681
+
+In preparation to enabling -Wimplicit-fallthrough, mark switch
+cases where we are expecting to fall through.
+
+This patch fixes the following warning:
+
+drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c: In function ‘fwevtq_handler’:
+drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c:520:7: warning: this statement may fall through [-Wimplicit-fallthrough=]
+ cpl = (void *)p;
+ ~~~~^~~~~~~~~~~
+drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c:524:2: note: here
+ case CPL_SGE_EGR_UPDATE: {
+ ^~~~
+
+Warning level 3 was used: -Wimplicit-fallthrough=3
+
+Notice that, in this particular case, the code comment is modified
+in accordance with what GCC is expecting to find.
+
+This patch is part of the ongoing efforts to enable
+-Wimplicit-fallthrough.
+
+Signed-off-by: Gustavo A. R. Silva <gustavo@embeddedor.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+@@ -518,8 +518,8 @@ static int fwevtq_handler(struct sge_rsp
+ break;
+ }
+ cpl = (void *)p;
+- /*FALLTHROUGH*/
+ }
++ /* Fall through */
+
+ case CPL_SGE_EGR_UPDATE: {
+ /*
diff --git a/patches.drivers/cxgb4-free-mac_hlist-properly.patch b/patches.drivers/cxgb4-free-mac_hlist-properly.patch
new file mode 100644
index 0000000000..d2e79f6a54
--- /dev/null
+++ b/patches.drivers/cxgb4-free-mac_hlist-properly.patch
@@ -0,0 +1,42 @@
+From: Arjun Vynipadath <arjun@chelsio.com>
+Date: Fri, 9 Nov 2018 14:50:25 +0530
+Subject: cxgb4: free mac_hlist properly
+Patch-mainline: v5.0-rc1
+Git-commit: 2a8d84bf513823ba398f4b2dec41b8decf4041af
+References: bsc#1136345 jsc#SLE-4681
+
+The locally maintained list for tracking hash mac table was
+not freed during driver remove.
+
+Signed-off-by: Arjun Vynipadath <arjun@chelsio.com>
+Signed-off-by: Ganesh Goudar <ganeshgr@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -2295,6 +2295,8 @@ static int cxgb_up(struct adapter *adap)
+
+ static void cxgb_down(struct adapter *adapter)
+ {
++ struct hash_mac_addr *entry, *tmp;
++
+ cancel_work_sync(&adapter->tid_release_task);
+ cancel_work_sync(&adapter->db_full_task);
+ cancel_work_sync(&adapter->db_drop_task);
+@@ -2303,6 +2305,12 @@ static void cxgb_down(struct adapter *ad
+
+ t4_sge_stop(adapter);
+ t4_free_sge_resources(adapter);
++
++ list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
++ list_del(&entry->list);
++ kfree(entry);
++ }
++
+ adapter->flags &= ~FULL_INIT_DONE;
+ }
+
diff --git a/patches.drivers/cxgb4-kfree-mhp-after-the-debug-print.patch b/patches.drivers/cxgb4-kfree-mhp-after-the-debug-print.patch
new file mode 100644
index 0000000000..809ec0bdeb
--- /dev/null
+++ b/patches.drivers/cxgb4-kfree-mhp-after-the-debug-print.patch
@@ -0,0 +1,32 @@
+From: Shaobo He <shaobo@cs.utah.edu>
+Date: Thu, 28 Feb 2019 15:38:38 -0700
+Subject: cxgb4: kfree mhp after the debug print
+Patch-mainline: v5.1-rc1
+Git-commit: 952a3cc9c06a6143147ccdd108c5ef8ac9a6454c
+References: bsc#1136345 jsc#SLE-4681
+
+In function `c4iw_dealloc_mw`, variable mhp's value is printed after
+freed, it is clearer to have the print before the kfree.
+
+Otherwise racing threads could allocate another mhp with the same pointer
+value and create confusing tracing.
+
+Signed-off-by: Shaobo He <shaobo@cs.utah.edu>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/cxgb4/mem.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/cxgb4/mem.c
++++ b/drivers/infiniband/hw/cxgb4/mem.c
+@@ -684,8 +684,8 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
+ mhp->wr_waitp);
+ kfree_skb(mhp->dereg_skb);
+ c4iw_put_wr_wait(mhp->wr_waitp);
+- kfree(mhp);
+ pr_debug("ib_mw %p mmid 0x%x ptr %p\n", mw, mmid, mhp);
++ kfree(mhp);
+ return 0;
+ }
+
diff --git a/patches.drivers/cxgb4-offload-VLAN-flows-regardless-of-VLAN-ethtype.patch b/patches.drivers/cxgb4-offload-VLAN-flows-regardless-of-VLAN-ethtype.patch
new file mode 100644
index 0000000000..c1566df293
--- /dev/null
+++ b/patches.drivers/cxgb4-offload-VLAN-flows-regardless-of-VLAN-ethtype.patch
@@ -0,0 +1,42 @@
+From: Raju Rangoju <rajur@chelsio.com>
+Date: Thu, 23 May 2019 20:41:44 +0530
+Subject: cxgb4: offload VLAN flows regardless of VLAN ethtype
+Patch-mainline: v5.2-rc3
+Git-commit: b5730061d1056abf317caea823b94d6e12b5b4f6
+References: bsc#1136345 jsc#SLE-4681
+
+VLAN flows never get offloaded unless ivlan_vld is set in filter spec.
+It's not compulsory for vlan_ethtype to be set.
+
+So, always enable ivlan_vld bit for offloading VLAN flows regardless of
+vlan_ethtype is set or not.
+
+Fixes: ad9af3e09c (cxgb4: add tc flower match support for vlan)
+Signed-off-by: Raju Rangoju <rajur@chelsio.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+@@ -228,6 +228,9 @@ static void cxgb4_process_flow_match(str
+ fs->val.ivlan = vlan_tci;
+ fs->mask.ivlan = vlan_tci_mask;
+
++ fs->val.ivlan_vld = 1;
++ fs->mask.ivlan_vld = 1;
++
+ /* Chelsio adapters use ivlan_vld bit to match vlan packets
+ * as 802.1Q. Also, when vlan tag is present in packets,
+ * ethtype match is used then to match on ethtype of inner
+@@ -238,8 +241,6 @@ static void cxgb4_process_flow_match(str
+ * ethtype value with ethtype of inner header.
+ */
+ if (fs->val.ethtype == ETH_P_8021Q) {
+- fs->val.ivlan_vld = 1;
+- fs->mask.ivlan_vld = 1;
+ fs->val.ethtype = 0;
+ fs->mask.ethtype = 0;
+ }
diff --git a/patches.drivers/cxgb4-remove-DEFINE_SIMPLE_DEBUGFS_FILE.patch b/patches.drivers/cxgb4-remove-DEFINE_SIMPLE_DEBUGFS_FILE.patch
new file mode 100644
index 0000000000..c60a004d0b
--- /dev/null
+++ b/patches.drivers/cxgb4-remove-DEFINE_SIMPLE_DEBUGFS_FILE.patch
@@ -0,0 +1,298 @@
+From: Yangtao Li <tiny.windzz@gmail.com>
+Date: Sat, 15 Dec 2018 02:59:30 -0500
+Subject: cxgb4: remove DEFINE_SIMPLE_DEBUGFS_FILE()
+Patch-mainline: v5.0-rc1
+Git-commit: b09026c6913eeef1b21914a9d441ba275df40f27
+References: bsc#1136345 jsc#SLE-4681
+
+We already have the DEFINE_SHOW_ATTRIBUTE. There is no need to define
+such a macro, so remove DEFINE_SIMPLE_DEBUGFS_FILE. Also use the
+DEFINE_SHOW_ATTRIBUTE macro to simplify some code.
+
+Signed-off-by: Yangtao Li <tiny.windzz@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 113 ++++----------------
+ drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h | 13 --
+ drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 16 --
+ 3 files changed, 25 insertions(+), 117 deletions(-)
+
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+@@ -378,19 +378,7 @@ static int cim_qcfg_show(struct seq_file
+ QUEREMFLITS_G(p[2]) * 16);
+ return 0;
+ }
+-
+-static int cim_qcfg_open(struct inode *inode, struct file *file)
+-{
+- return single_open(file, cim_qcfg_show, inode->i_private);
+-}
+-
+-static const struct file_operations cim_qcfg_fops = {
+- .owner = THIS_MODULE,
+- .open = cim_qcfg_open,
+- .read = seq_read,
+- .llseek = seq_lseek,
+- .release = single_release,
+-};
++DEFINE_SHOW_ATTRIBUTE(cim_qcfg);
+
+ static int cimq_show(struct seq_file *seq, void *v, int idx)
+ {
+@@ -860,8 +848,7 @@ static int tx_rate_show(struct seq_file
+ }
+ return 0;
+ }
+-
+-DEFINE_SIMPLE_DEBUGFS_FILE(tx_rate);
++DEFINE_SHOW_ATTRIBUTE(tx_rate);
+
+ static int cctrl_tbl_show(struct seq_file *seq, void *v)
+ {
+@@ -893,8 +880,7 @@ static int cctrl_tbl_show(struct seq_fil
+ kfree(incr);
+ return 0;
+ }
+-
+-DEFINE_SIMPLE_DEBUGFS_FILE(cctrl_tbl);
++DEFINE_SHOW_ATTRIBUTE(cctrl_tbl);
+
+ /* Format a value in a unit that differs from the value's native unit by the
+ * given factor.
+@@ -955,8 +941,7 @@ static int clk_show(struct seq_file *seq
+
+ return 0;
+ }
+-
+-DEFINE_SIMPLE_DEBUGFS_FILE(clk);
++DEFINE_SHOW_ATTRIBUTE(clk);
+
+ /* Firmware Device Log dump. */
+ static const char * const devlog_level_strings[] = {
+@@ -1990,22 +1975,10 @@ static int sensors_show(struct seq_file
+
+ return 0;
+ }
+-
+-DEFINE_SIMPLE_DEBUGFS_FILE(sensors);
++DEFINE_SHOW_ATTRIBUTE(sensors);
+
+ #if IS_ENABLED(CONFIG_IPV6)
+-static int clip_tbl_open(struct inode *inode, struct file *file)
+-{
+- return single_open(file, clip_tbl_show, inode->i_private);
+-}
+-
+-static const struct file_operations clip_tbl_debugfs_fops = {
+- .owner = THIS_MODULE,
+- .open = clip_tbl_open,
+- .read = seq_read,
+- .llseek = seq_lseek,
+- .release = single_release
+-};
++DEFINE_SHOW_ATTRIBUTE(clip_tbl);
+ #endif
+
+ /*RSS Table.
+@@ -2208,8 +2181,7 @@ static int rss_config_show(struct seq_fi
+
+ return 0;
+ }
+-
+-DEFINE_SIMPLE_DEBUGFS_FILE(rss_config);
++DEFINE_SHOW_ATTRIBUTE(rss_config);
+
+ /* RSS Secret Key.
+ */
+@@ -2628,19 +2600,7 @@ static int resources_show(struct seq_fil
+
+ return 0;
+ }
+-
+-static int resources_open(struct inode *inode, struct file *file)
+-{
+- return single_open(file, resources_show, inode->i_private);
+-}
+-
+-static const struct file_operations resources_debugfs_fops = {
+- .owner = THIS_MODULE,
+- .open = resources_open,
+- .read = seq_read,
+- .llseek = seq_lseek,
+- .release = seq_release,
+-};
++DEFINE_SHOW_ATTRIBUTE(resources);
+
+ /**
+ * ethqset2pinfo - return port_info of an Ethernet Queue Set
+@@ -3233,8 +3193,7 @@ static int tid_info_show(struct seq_file
+ t4_read_reg(adap, LE_DB_ACT_CNT_IPV6_A));
+ return 0;
+ }
+-
+-DEFINE_SIMPLE_DEBUGFS_FILE(tid_info);
++DEFINE_SHOW_ATTRIBUTE(tid_info);
+
+ static void add_debugfs_mem(struct adapter *adap, const char *name,
+ unsigned int idx, unsigned int size_mb)
+@@ -3364,21 +3323,9 @@ static int meminfo_show(struct seq_file
+
+ return 0;
+ }
++DEFINE_SHOW_ATTRIBUTE(meminfo);
+
+-static int meminfo_open(struct inode *inode, struct file *file)
+-{
+- return single_open(file, meminfo_show, inode->i_private);
+-}
+-
+-static const struct file_operations meminfo_fops = {
+- .owner = THIS_MODULE,
+- .open = meminfo_open,
+- .read = seq_read,
+- .llseek = seq_lseek,
+- .release = single_release,
+-};
+-
+-static int chcr_show(struct seq_file *seq, void *v)
++static int chcr_stats_show(struct seq_file *seq, void *v)
+ {
+ struct adapter *adap = seq->private;
+
+@@ -3399,20 +3346,7 @@ static int chcr_show(struct seq_file *se
+ atomic_read(&adap->chcr_stats.ipsec_cnt));
+ return 0;
+ }
+-
+-
+-static int chcr_stats_open(struct inode *inode, struct file *file)
+-{
+- return single_open(file, chcr_show, inode->i_private);
+-}
+-
+-static const struct file_operations chcr_stats_debugfs_fops = {
+- .owner = THIS_MODULE,
+- .open = chcr_stats_open,
+- .read = seq_read,
+- .llseek = seq_lseek,
+- .release = single_release,
+-};
++DEFINE_SHOW_ATTRIBUTE(chcr_stats);
+
+ #define PRINT_