Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPetr Tesarik <ptesarik@suse.cz>2019-06-07 15:05:05 +0200
committerPetr Tesarik <ptesarik@suse.cz>2019-06-07 15:05:05 +0200
commitddfc5320c10e71daa0be2db816360c736070d8ec (patch)
treeffb159a122769a35990cd11c3261c4c024bf1d47
parent187af105002d4396a677826f774d8f73cd303ebe (diff)
parent94a6e53ab4f10f2ade8edf8033aa0f40d1906af8 (diff)
Merge branch 'SLE15-SP1' into SLE12-SP5rpm-4.12.14-100
- Delete patches.kabi/* Conflicts: patches.kabi/qla2xxx-kABI-fixes-for-v10.00.00.14-k.patch series.conf
-rw-r--r--kabi/severities6
-rw-r--r--patches.drivers/RDMA-hns-Add-SCC-context-allocation-support-for-hip0.patch447
-rw-r--r--patches.drivers/RDMA-hns-Add-SCC-context-clr-support-for-hip08.patch189
-rw-r--r--patches.drivers/RDMA-hns-Add-constraint-on-the-setting-of-local-ACK-.patch42
-rw-r--r--patches.drivers/RDMA-hns-Add-the-process-of-AEQ-overflow-for-hip08.patch47
-rw-r--r--patches.drivers/RDMA-hns-Add-timer-allocation-support-for-hip08.patch466
-rw-r--r--patches.drivers/RDMA-hns-Bugfix-for-SCC-hem-free.patch41
-rw-r--r--patches.drivers/RDMA-hns-Bugfix-for-mapping-user-db.patch41
-rw-r--r--patches.drivers/RDMA-hns-Bugfix-for-posting-multiple-srq-work-reques.patch142
-rw-r--r--patches.drivers/RDMA-hns-Bugfix-for-sending-with-invalidate.patch29
-rw-r--r--patches.drivers/RDMA-hns-Bugfix-for-set-hem-of-SCC.patch60
-rw-r--r--patches.drivers/RDMA-hns-Bugfix-for-the-scene-without-receiver-queue.patch29
-rw-r--r--patches.drivers/RDMA-hns-Configure-capacity-of-hns-device.patch38
-rw-r--r--patches.drivers/RDMA-hns-Delete-unused-variable-in-hns_roce_v2_modif.patch34
-rw-r--r--patches.drivers/RDMA-hns-Delete-useful-prints-for-aeq-subtype-event.patch91
-rw-r--r--patches.drivers/RDMA-hns-Fix-bad-endianess-of-port_pd-variable.patch36
-rw-r--r--patches.drivers/RDMA-hns-Fix-the-Oops-during-rmmod-or-insmod-ko-when.patch307
-rw-r--r--patches.drivers/RDMA-hns-Fix-the-bug-with-updating-rq-head-pointer-w.patch48
-rw-r--r--patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-door.patch168
-rw-r--r--patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-mail.patch285
-rw-r--r--patches.drivers/RDMA-hns-Fix-the-state-of-rereg-mr.patch29
-rw-r--r--patches.drivers/RDMA-hns-Hide-error-print-information-with-roce-vf-d.patch54
-rw-r--r--patches.drivers/RDMA-hns-Limit-minimum-ROCE-CQ-depth-to-64.patch27
-rw-r--r--patches.drivers/RDMA-hns-Limit-scope-of-hns_roce_cmq_send.patch34
-rw-r--r--patches.drivers/RDMA-hns-Make-some-function-static.patch60
-rw-r--r--patches.drivers/RDMA-hns-Modify-qp-cq-pd-specification-according-to-.patch48
-rw-r--r--patches.drivers/RDMA-hns-Modify-the-pbl-ba-page-size-for-hip08.patch28
-rw-r--r--patches.drivers/RDMA-hns-Move-spin_lock_irqsave-to-the-correct-place.patch92
-rw-r--r--patches.drivers/RDMA-hns-Only-assgin-some-fields-if-the-relatived-at.patch248
-rw-r--r--patches.drivers/RDMA-hns-Only-assign-the-fields-of-the-rq-psn-if-IB_.patch68
-rw-r--r--patches.drivers/RDMA-hns-Only-assign-the-relatived-fields-of-psn-if-.patch133
-rw-r--r--patches.drivers/RDMA-hns-RDMA-hns-Assign-rq-head-pointer-when-enable.patch62
-rw-r--r--patches.drivers/RDMA-hns-Remove-jiffies-operation-in-disable-interru.patch124
-rw-r--r--patches.drivers/RDMA-hns-Remove-set-but-not-used-variable-rst.patch41
-rw-r--r--patches.drivers/RDMA-hns-Set-allocated-memory-to-zero-for-wrid.patch33
-rw-r--r--patches.drivers/RDMA-hns-Support-to-create-1M-srq-queue.patch29
-rw-r--r--patches.drivers/RDMA-hns-Update-CQE-specifications.patch29
-rw-r--r--patches.drivers/RDMA-hns-Update-the-range-of-raq_psn-field-of-qp-con.patch29
-rw-r--r--patches.drivers/RDMA-qedr-Fix-incorrect-device-rate.patch89
-rw-r--r--patches.drivers/Revert-net-phy-marvell-avoid-pause-mode-on-SGMII-to-.patch6
-rw-r--r--patches.drivers/net-hns3-Add-handling-of-MAC-tunnel-interruption.patch260
-rw-r--r--patches.drivers/net-hns3-Add-support-for-netif-message-level-setting.patch231
-rw-r--r--patches.drivers/net-hns3-Make-hclge_destroy_cmd_queue-static.patch30
-rw-r--r--patches.drivers/net-hns3-Make-hclgevf_update_link_mode-static.patch30
-rw-r--r--patches.drivers/net-hns3-add-counter-for-times-RX-pages-gets-allocat.patch58
-rw-r--r--patches.drivers/net-hns3-add-error-handler-for-initializing-command-.patch84
-rw-r--r--patches.drivers/net-hns3-add-function-type-check-for-debugfs-help-in.patch66
-rw-r--r--patches.drivers/net-hns3-add-hns3_gro_complete-for-HW-GRO-process.patch301
-rw-r--r--patches.drivers/net-hns3-add-linearizing-checking-for-TSO-case.patch92
-rw-r--r--patches.drivers/net-hns3-add-protect-when-handling-mac-addr-list.patch68
-rw-r--r--patches.drivers/net-hns3-add-queue-s-statistics-update-to-service-ta.patch66
-rw-r--r--patches.drivers/net-hns3-add-reset-statistics-for-VF.patch102
-rw-r--r--patches.drivers/net-hns3-add-reset-statistics-info-for-PF.patch175
-rw-r--r--patches.drivers/net-hns3-add-some-debug-info-for-hclgevf_get_mbx_res.patch44
-rw-r--r--patches.drivers/net-hns3-add-some-debug-information-for-hclge_check_.patch43
-rw-r--r--patches.drivers/net-hns3-add-support-for-dump-ncl-config-by-debugfs.patch132
-rw-r--r--patches.drivers/net-hns3-adjust-the-timing-of-hns3_client_stop-when-.patch37
-rw-r--r--patches.drivers/net-hns3-always-assume-no-drop-TC-for-performance-re.patch87
-rw-r--r--patches.drivers/net-hns3-check-1000M-half-for-hns3_ethtool_ops.set_l.patch32
-rw-r--r--patches.drivers/net-hns3-check-resetting-status-in-hns3_get_stats.patch34
-rw-r--r--patches.drivers/net-hns3-code-optimization-for-command-queue-spin-lo.patch59
-rw-r--r--patches.drivers/net-hns3-combine-len-and-checksum-handling-for-inner.patch280
-rw-r--r--patches.drivers/net-hns3-deactive-the-reset-timer-when-reset-success.patch28
-rw-r--r--patches.drivers/net-hns3-divide-shared-buffer-between-TC.patch81
-rw-r--r--patches.drivers/net-hns3-do-not-initialize-MDIO-bus-when-PHY-is-inex.patch45
-rw-r--r--patches.drivers/net-hns3-do-not-request-reset-when-hardware-resettin.patch47
-rw-r--r--patches.drivers/net-hns3-dump-more-information-when-tx-timeout-happe.patch166
-rw-r--r--patches.drivers/net-hns3-extend-the-loopback-state-acquisition-time.patch35
-rw-r--r--patches.drivers/net-hns3-fix-VLAN-offload-handle-for-VLAN-inserted-b.patch148
-rw-r--r--patches.drivers/net-hns3-fix-data-race-between-ring-next_to_clean.patch72
-rw-r--r--patches.drivers/net-hns3-fix-error-handling-for-desc-filling.patch94
-rw-r--r--patches.drivers/net-hns3-fix-for-HNS3_RXD_GRO_SIZE_M-macro.patch32
-rw-r--r--patches.drivers/net-hns3-fix-for-TX-clean-num-when-cleaning-TX-BD.patch49
-rw-r--r--patches.drivers/net-hns3-fix-for-tunnel-type-handling-in-hns3_rx_che.patch89
-rw-r--r--patches.drivers/net-hns3-fix-for-vport-bw_limit-overflow-problem.patch34
-rw-r--r--patches.drivers/net-hns3-fix-keep_alive_timer-not-stop-problem.patch51
-rw-r--r--patches.drivers/net-hns3-fix-loop-condition-of-hns3_get_tx_timeo_que.patch32
-rw-r--r--patches.drivers/net-hns3-fix-pause-configure-fail-problem.patch36
-rw-r--r--patches.drivers/net-hns3-fix-set-port-based-VLAN-for-PF.patch321
-rw-r--r--patches.drivers/net-hns3-fix-set-port-based-VLAN-issue-for-VF.patch263
-rw-r--r--patches.drivers/net-hns3-fix-sparse-warning-when-calling-hclge_set_v.patch46
-rw-r--r--patches.drivers/net-hns3-free-the-pending-skb-when-clean-RX-ring.patch38
-rw-r--r--patches.drivers/net-hns3-handle-pending-reset-while-reset-fail.patch30
-rw-r--r--patches.drivers/net-hns3-handle-the-BD-info-on-the-last-BD-of-the-pa.patch111
-rw-r--r--patches.drivers/net-hns3-ignore-lower-level-new-coming-reset.patch31
-rw-r--r--patches.drivers/net-hns3-minor-optimization-for-datapath.patch81
-rw-r--r--patches.drivers/net-hns3-minor-optimization-for-ring_space.patch48
-rw-r--r--patches.drivers/net-hns3-minor-refactor-for-hns3_rx_checksum.patch49
-rw-r--r--patches.drivers/net-hns3-modify-HNS3_NIC_STATE_INITED-flag-in-hns3_r.patch39
-rw-r--r--patches.drivers/net-hns3-modify-VLAN-initialization-to-be-compatible.patch195
-rw-r--r--patches.drivers/net-hns3-modify-the-VF-network-port-media-type-acqui.patch136
-rw-r--r--patches.drivers/net-hns3-not-reset-TQP-in-the-DOWN-while-VF-resettin.patch35
-rw-r--r--patches.drivers/net-hns3-not-reset-vport-who-not-alive-when-PF-reset.patch31
-rw-r--r--patches.drivers/net-hns3-optimize-the-barrier-using-when-cleaning-TX.patch71
-rw-r--r--patches.drivers/net-hns3-prevent-change-MTU-when-resetting.patch30
-rw-r--r--patches.drivers/net-hns3-prevent-double-free-in-hns3_put_ring_config.patch76
-rw-r--r--patches.drivers/net-hns3-reduce-resources-use-in-kdump-kernel.patch81
-rw-r--r--patches.drivers/net-hns3-refactor-BD-filling-for-l2l3l4-info.patch111
-rw-r--r--patches.drivers/net-hns3-refine-tx-timeout-count-handle.patch44
-rw-r--r--patches.drivers/net-hns3-remove-redundant-assignment-of-l2_hdr-to-it.patch30
-rw-r--r--patches.drivers/net-hns3-remove-reset-after-command-send-failed.patch68
-rw-r--r--patches.drivers/net-hns3-return-0-and-print-warning-when-hit-duplica.patch39
-rw-r--r--patches.drivers/net-hns3-set-dividual-reset-level-for-all-RAS-and-MS.patch1271
-rw-r--r--patches.drivers/net-hns3-set-up-the-vport-alive-state-while-reinitia.patch38
-rw-r--r--patches.drivers/net-hns3-set-vport-alive-state-to-default-while-rese.patch31
-rw-r--r--patches.drivers/net-hns3-simplify-hclgevf_cmd_csq_clean.patch72
-rw-r--r--patches.drivers/net-hns3-some-cleanup-for-struct-hns3_enet_ring.patch70
-rw-r--r--patches.drivers/net-hns3-split-function-hnae3_match_n_instantiate.patch128
-rw-r--r--patches.drivers/net-hns3-stop-mailbox-handling-when-command-queue-ne.patch31
-rw-r--r--patches.drivers/net-hns3-stop-sending-keep-alive-msg-when-VF-command.patch33
-rw-r--r--patches.drivers/net-hns3-unify-maybe_stop_tx-for-TSO-and-non-TSO-cas.patch259
-rw-r--r--patches.drivers/net-hns3-unify-the-page-reusing-for-page-size-4K-and.patch90
-rw-r--r--patches.drivers/net-hns3-use-a-reserved-byte-to-identify-need_resp-f.patch84
-rw-r--r--patches.drivers/net-hns3-use-atomic_t-replace-u32-for-arq-s-count.patch74
-rw-r--r--patches.drivers/net-hns3-use-devm_kcalloc-when-allocating-desc_cb.patch51
-rw-r--r--patches.drivers/net-hns3-use-napi_schedule_irqoff-in-hard-interrupts.patch33
-rw-r--r--patches.drivers/net-phy-marvell-Enable-interrupt-function-on-LED2-pi.patch63
-rw-r--r--patches.drivers/net-phy-marvell-add-new-default-led-configure-for-m8.patch75
-rw-r--r--patches.drivers/net-phy-marvell-change-default-m88e1510-LED-configur.patch123
-rw-r--r--patches.drivers/scsi-qla2xxx-Add-28xx-flash-primary-secondary-status.patch873
-rw-r--r--patches.drivers/scsi-qla2xxx-Add-Device-ID-for-ISP28XX.patch1542
-rw-r--r--patches.drivers/scsi-qla2xxx-Add-Serdes-support-for-ISP28XX.patch374
-rw-r--r--patches.drivers/scsi-qla2xxx-Add-fw_attr-and-port_no-SysFS-node.patch76
-rw-r--r--patches.drivers/scsi-qla2xxx-Add-support-for-multiple-fwdump-templat.patch862
-rw-r--r--patches.drivers/scsi-qla2xxx-Avoid-PCI-IRQ-affinity-mapping-when-mul.patch80
-rw-r--r--patches.drivers/scsi-qla2xxx-Cleanups-for-NVRAM-Flash-read-write-pat.patch1545
-rw-r--r--patches.drivers/scsi-qla2xxx-Correction-and-improvement-to-fwdt-proc.patch124
-rw-r--r--patches.drivers/scsi-qla2xxx-Correctly-report-max-min-supported-spee.patch309
-rw-r--r--patches.drivers/scsi-qla2xxx-Fix-read-offset-in-qla24xx_load_risc_fl.patch235
-rw-r--r--patches.drivers/scsi-qla2xxx-Fix-routine-qla27xx_dump_-mpi-ram.patch259
-rw-r--r--patches.drivers/scsi-qla2xxx-Remove-FW-default-template.patch320
-rw-r--r--patches.drivers/scsi-qla2xxx-Secure-flash-update-support-for-ISP28XX.patch1064
-rw-r--r--patches.drivers/scsi-qla2xxx-Simplification-of-register-address-used.patch264
-rw-r--r--patches.drivers/scsi-qla2xxx-Simplify-conditional-check-again.patch48
-rw-r--r--patches.drivers/scsi-qla2xxx-Update-driver-version-to-10.01.00.15-k.patch35
-rw-r--r--patches.drivers/scsi-qla2xxx-Update-flash-read-write-routine.patch1330
-rw-r--r--patches.drivers/scsi-qla2xxx-no-need-to-check-return-value-of-debugf.patch108
-rw-r--r--patches.fixes/scsi-qla2xxx-fix-driver-unload-by-shutting-down-chip.patch10
-rw-r--r--patches.suse/qla2xxx-allow-irqbalance-control-in-non-MQ-mode.patch34
-rw-r--r--series.conf138
140 files changed, 21276 insertions, 43 deletions
diff --git a/kabi/severities b/kabi/severities
index 51ff3157f2..7d76d32e79 100644
--- a/kabi/severities
+++ b/kabi/severities
@@ -66,3 +66,9 @@ drivers/net/ethernet/qlogic/qede/* PASS
drivers/scsi/qedf/* PASS
drivers/scsi/qedi/* PASS
drivers/infiniband/hw/qedr/* PASS
+
+# inter-module symbols for hns3
+drivers/net/ethernet/hisilicon/hns3/* PASS
+drivers/net/ethernet/hisilicon/hns3/hns3pf/* PASS
+drivers/net/ethernet/hisilicon/hns3/hns3vf/* PASS
+drivers/infiniband/hw/hns/* PASS
diff --git a/patches.drivers/RDMA-hns-Add-SCC-context-allocation-support-for-hip0.patch b/patches.drivers/RDMA-hns-Add-SCC-context-allocation-support-for-hip0.patch
new file mode 100644
index 0000000000..182a71dcf2
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Add-SCC-context-allocation-support-for-hip0.patch
@@ -0,0 +1,447 @@
+From: Yangyang Li <liyangyang20@huawei.com>
+Date: Tue, 18 Dec 2018 21:21:53 +0800
+Subject: RDMA/hns: Add SCC context allocation support for hip08
+Patch-mainline: v5.1-rc1
+Git-commit: 6a157f7d1b14eb88d89fbd396cfea15ac4bded2d
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+This patch adds SCC context allocation and initialization support for
+DCQCN in kernel space driver.
+
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_cmd.h | 4 ++
+ drivers/infiniband/hw/hns/hns_roce_device.h | 6 +++
+ drivers/infiniband/hw/hns/hns_roce_hem.c | 26 ++++++++++++++-
+ drivers/infiniband/hw/hns/hns_roce_hem.h | 1
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 46 +++++++++++++++++++++++++++-
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 33 ++++++++++++++++++--
+ drivers/infiniband/hw/hns/hns_roce_main.c | 18 ++++++++++
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 20 +++++++++++-
+ 8 files changed, 146 insertions(+), 8 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
++++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
+@@ -98,6 +98,10 @@ enum {
+ HNS_ROCE_CMD_MODIFY_CEQC = 0x91,
+ HNS_ROCE_CMD_QUERY_CEQC = 0x92,
+ HNS_ROCE_CMD_DESTROY_CEQC = 0x93,
++
++ /* SCC CTX BT commands */
++ HNS_ROCE_CMD_READ_SCCC_BT0 = 0xa4,
++ HNS_ROCE_CMD_WRITE_SCCC_BT0 = 0xa5,
+ };
+
+ enum {
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -482,6 +482,7 @@ struct hns_roce_qp_table {
+ struct hns_roce_hem_table qp_table;
+ struct hns_roce_hem_table irrl_table;
+ struct hns_roce_hem_table trrl_table;
++ struct hns_roce_hem_table sccc_table;
+ };
+
+ struct hns_roce_cq_table {
+@@ -769,6 +770,7 @@ struct hns_roce_caps {
+ int irrl_entry_sz;
+ int trrl_entry_sz;
+ int cqc_entry_sz;
++ int sccc_entry_sz;
+ int srqc_entry_sz;
+ int idx_entry_sz;
+ u32 pbl_ba_pg_sz;
+@@ -781,6 +783,7 @@ struct hns_roce_caps {
+ u32 srqc_bt_num;
+ u32 cqc_bt_num;
+ u32 mpt_bt_num;
++ u32 sccc_bt_num;
+ u32 qpc_ba_pg_sz;
+ u32 qpc_buf_pg_sz;
+ u32 qpc_hop_num;
+@@ -796,6 +799,9 @@ struct hns_roce_caps {
+ u32 mtt_ba_pg_sz;
+ u32 mtt_buf_pg_sz;
+ u32 mtt_hop_num;
++ u32 sccc_ba_pg_sz;
++ u32 sccc_buf_pg_sz;
++ u32 sccc_hop_num;
+ u32 cqe_ba_pg_sz;
+ u32 cqe_buf_pg_sz;
+ u32 cqe_hop_num;
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -45,6 +45,7 @@ bool hns_roce_check_whether_mhop(struct
+ (hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) ||
+ (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
+ (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
++ (hr_dev->caps.sccc_hop_num && type == HEM_TYPE_SCCC) ||
+ (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
+ (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) ||
+ (hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) ||
+@@ -125,6 +126,14 @@ int hns_roce_calc_hem_mhop(struct hns_ro
+ mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
+ mhop->hop_num = hr_dev->caps.cqc_hop_num;
+ break;
++ case HEM_TYPE_SCCC:
++ mhop->buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
++ + PAGE_SHIFT);
++ mhop->bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
++ + PAGE_SHIFT);
++ mhop->ba_l0_num = hr_dev->caps.sccc_bt_num;
++ mhop->hop_num = hr_dev->caps.sccc_hop_num;
++ break;
+ case HEM_TYPE_SRQC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ + PAGE_SHIFT);
+@@ -175,7 +184,7 @@ int hns_roce_calc_hem_mhop(struct hns_ro
+ return 0;
+
+ /*
+- * QPC/MTPT/CQC/SRQC alloc hem for buffer pages.
++ * QPC/MTPT/CQC/SRQC/SCCC alloc hem for buffer pages.
+ * MTT/CQE alloc hem for bt pages.
+ */
+ bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
+@@ -486,7 +495,7 @@ static int hns_roce_table_mhop_get(struc
+ }
+
+ /*
+- * alloc buffer space chunk for QPC/MTPT/CQC/SRQC.
++ * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
+ * alloc bt space chunk for MTT/CQE.
+ */
+ size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size;
+@@ -658,7 +667,7 @@ static void hns_roce_table_mhop_put(stru
+ }
+
+ /*
+- * free buffer space chunk for QPC/MTPT/CQC/SRQC.
++ * free buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
+ * free bt space chunk for MTT/CQE.
+ */
+ hns_roce_free_hem(hr_dev, table->hem[hem_idx]);
+@@ -904,6 +913,14 @@ int hns_roce_init_hem_table(struct hns_r
+ num_bt_l0 = hr_dev->caps.cqc_bt_num;
+ hop_num = hr_dev->caps.cqc_hop_num;
+ break;
++ case HEM_TYPE_SCCC:
++ buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
++ + PAGE_SHIFT);
++ bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
++ + PAGE_SHIFT);
++ num_bt_l0 = hr_dev->caps.sccc_bt_num;
++ hop_num = hr_dev->caps.sccc_hop_num;
++ break;
+ case HEM_TYPE_SRQC:
+ buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ + PAGE_SHIFT);
+@@ -1081,6 +1098,9 @@ void hns_roce_cleanup_hem(struct hns_roc
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->srq_table.table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
++ if (hr_dev->caps.sccc_entry_sz)
++ hns_roce_cleanup_hem_table(hr_dev,
++ &hr_dev->qp_table.sccc_table);
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qp_table.trrl_table);
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
+@@ -44,6 +44,7 @@ enum {
+ HEM_TYPE_MTPT,
+ HEM_TYPE_CQC,
+ HEM_TYPE_SRQC,
++ HEM_TYPE_SCCC,
+
+ /* UNMAP HEM */
+ HEM_TYPE_MTT,
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1078,6 +1078,9 @@ static int hns_roce_query_pf_resource(st
+ hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
+ PF_RES_DATA_3_PF_SL_NUM_M,
+ PF_RES_DATA_3_PF_SL_NUM_S);
++ hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
++ PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
++ PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
+
+ return 0;
+ }
+@@ -1193,6 +1196,14 @@ static int hns_roce_alloc_vf_resource(st
+ VF_RES_B_DATA_3_VF_SL_NUM_M,
+ VF_RES_B_DATA_3_VF_SL_NUM_S,
+ HNS_ROCE_VF_SL_NUM);
++
++ roce_set_field(req_b->vf_sccc_idx_num,
++ VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
++ VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
++ roce_set_field(req_b->vf_sccc_idx_num,
++ VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
++ VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
++ HNS_ROCE_VF_SCCC_BT_NUM);
+ }
+ }
+
+@@ -1205,6 +1216,7 @@ static int hns_roce_v2_set_bt(struct hns
+ u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
+ u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
+ u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
++ u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
+ struct hns_roce_cfg_bt_attr *req;
+ struct hns_roce_cmq_desc desc;
+
+@@ -1252,6 +1264,20 @@ static int hns_roce_v2_set_bt(struct hns
+ CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
+ mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
+
++ roce_set_field(req->vf_sccc_cfg,
++ CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
++ CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
++ hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
++ roce_set_field(req->vf_sccc_cfg,
++ CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
++ CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
++ hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
++ roce_set_field(req->vf_sccc_cfg,
++ CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
++ CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
++ sccc_hop_num ==
++ HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
++
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+ }
+
+@@ -1408,9 +1434,14 @@ static int hns_roce_v2_profile(struct hn
+ caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
+ caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
+
+- if (hr_dev->pci_dev->revision == 0x21)
++ if (hr_dev->pci_dev->revision == 0x21) {
+ caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC |
+ HNS_ROCE_CAP_FLAG_SRQ;
++ caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
++ caps->sccc_ba_pg_sz = 0;
++ caps->sccc_buf_pg_sz = 0;
++ caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
++ }
+
+ ret = hns_roce_v2_set_bt(hr_dev);
+ if (ret)
+@@ -2663,11 +2694,18 @@ static int hns_roce_v2_set_hem(struct hn
+ case HEM_TYPE_SRQC:
+ op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
+ break;
++ case HEM_TYPE_SCCC:
++ op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
++ break;
+ default:
+ dev_warn(dev, "Table %d not to be written by mailbox!\n",
+ table->type);
+ return 0;
+ }
++
++ if (table->type == HEM_TYPE_SCCC && step_idx)
++ return 0;
++
+ op += step_idx;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+@@ -2722,6 +2760,8 @@ static int hns_roce_v2_clear_hem(struct
+ case HEM_TYPE_CQC:
+ op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
+ break;
++ case HEM_TYPE_SCCC:
++ break;
+ case HEM_TYPE_SRQC:
+ op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
+ break;
+@@ -2730,6 +2770,10 @@ static int hns_roce_v2_clear_hem(struct
+ table->type);
+ return 0;
+ }
++
++ if (table->type == HEM_TYPE_SCCC)
++ return 0;
++
+ op += step_idx;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -36,6 +36,7 @@
+ #include <linux/bitops.h>
+
+ #define HNS_ROCE_VF_QPC_BT_NUM 256
++#define HNS_ROCE_VF_SCCC_BT_NUM 64
+ #define HNS_ROCE_VF_SRQC_BT_NUM 64
+ #define HNS_ROCE_VF_CQC_BT_NUM 64
+ #define HNS_ROCE_VF_MPT_BT_NUM 64
+@@ -83,6 +84,7 @@
+ #define HNS_ROCE_V2_MTPT_ENTRY_SZ 64
+ #define HNS_ROCE_V2_MTT_ENTRY_SZ 64
+ #define HNS_ROCE_V2_CQE_ENTRY_SIZE 32
++#define HNS_ROCE_V2_SCCC_ENTRY_SZ 32
+ #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
+ #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
+ #define HNS_ROCE_INVALID_LKEY 0x100
+@@ -91,6 +93,7 @@
+ #define HNS_ROCE_V2_RSV_QPS 8
+
+ #define HNS_ROCE_CONTEXT_HOP_NUM 1
++#define HNS_ROCE_SCCC_HOP_NUM 1
+ #define HNS_ROCE_MTT_HOP_NUM 1
+ #define HNS_ROCE_CQE_HOP_NUM 1
+ #define HNS_ROCE_SRQWQE_HOP_NUM 1
+@@ -1300,7 +1303,8 @@ struct hns_roce_pf_res_b {
+ __le32 smac_idx_num;
+ __le32 sgid_idx_num;
+ __le32 qid_idx_sl_num;
+- __le32 rsv[2];
++ __le32 sccc_bt_idx_num;
++ __le32 rsv;
+ };
+
+ #define PF_RES_DATA_1_PF_SMAC_IDX_S 0
+@@ -1321,6 +1325,12 @@ struct hns_roce_pf_res_b {
+ #define PF_RES_DATA_3_PF_SL_NUM_S 16
+ #define PF_RES_DATA_3_PF_SL_NUM_M GENMASK(26, 16)
+
++#define PF_RES_DATA_4_PF_SCCC_BT_IDX_S 0
++#define PF_RES_DATA_4_PF_SCCC_BT_IDX_M GENMASK(8, 0)
++
++#define PF_RES_DATA_4_PF_SCCC_BT_NUM_S 9
++#define PF_RES_DATA_4_PF_SCCC_BT_NUM_M GENMASK(17, 9)
++
+ struct hns_roce_vf_res_a {
+ __le32 vf_id;
+ __le32 vf_qpc_bt_idx_num;
+@@ -1365,7 +1375,8 @@ struct hns_roce_vf_res_b {
+ __le32 vf_smac_idx_num;
+ __le32 vf_sgid_idx_num;
+ __le32 vf_qid_idx_sl_num;
+- __le32 rsv[2];
++ __le32 vf_sccc_idx_num;
++ __le32 rsv1;
+ };
+
+ #define VF_RES_B_DATA_0_VF_ID_S 0
+@@ -1389,6 +1400,12 @@ struct hns_roce_vf_res_b {
+ #define VF_RES_B_DATA_3_VF_SL_NUM_S 16
+ #define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16)
+
++#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S 0
++#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M GENMASK(8, 0)
++
++#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S 9
++#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M GENMASK(17, 9)
++
+ struct hns_roce_vf_switch {
+ __le32 rocee_sel;
+ __le32 fun_id;
+@@ -1424,7 +1441,8 @@ struct hns_roce_cfg_bt_attr {
+ __le32 vf_srqc_cfg;
+ __le32 vf_cqc_cfg;
+ __le32 vf_mpt_cfg;
+- __le32 rsv[2];
++ __le32 vf_sccc_cfg;
++ __le32 rsv;
+ };
+
+ #define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S 0
+@@ -1463,6 +1481,15 @@ struct hns_roce_cfg_bt_attr {
+ #define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S 8
+ #define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8)
+
++#define CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S 0
++#define CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M GENMASK(3, 0)
++
++#define CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S 4
++#define CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M GENMASK(7, 4)
++
++#define CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S 8
++#define CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M GENMASK(9, 8)
++
+ struct hns_roce_cfg_sgid_tb {
+ __le32 table_idx_rsv;
+ __le32 vf_sgid_l;
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -704,8 +704,26 @@ static int hns_roce_init_hem(struct hns_
+ }
+ }
+
++ if (hr_dev->caps.sccc_entry_sz) {
++ ret = hns_roce_init_hem_table(hr_dev,
++ &hr_dev->qp_table.sccc_table,
++ HEM_TYPE_SCCC,
++ hr_dev->caps.sccc_entry_sz,
++ hr_dev->caps.num_qps, 1);
++ if (ret) {
++ dev_err(dev,
++ "Failed to init SCC context memory, aborting.\n");
++ goto err_unmap_idx;
++ }
++ }
++
+ return 0;
+
++err_unmap_idx:
++ if (hr_dev->caps.num_idx_segs)
++ hns_roce_cleanup_hem_table(hr_dev,
++ &hr_dev->mr_table.mtt_idx_table);
++
+ err_unmap_srqwqe:
+ if (hr_dev->caps.num_srqwqe_segs)
+ hns_roce_cleanup_hem_table(hr_dev,
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -209,13 +209,23 @@ static int hns_roce_qp_alloc(struct hns_
+ }
+ }
+
++ if (hr_dev->caps.sccc_entry_sz) {
++ /* Alloc memory for SCC CTX */
++ ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
++ hr_qp->qpn);
++ if (ret) {
++ dev_err(dev, "SCC CTX table get failed\n");
++ goto err_put_trrl;
++ }
++ }
++
+ spin_lock_irq(&qp_table->lock);
+ ret = radix_tree_insert(&hr_dev->qp_table_tree,
+ hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
+ spin_unlock_irq(&qp_table->lock);
+ if (ret) {
+ dev_err(dev, "QPC radix_tree_insert failed\n");
+- goto err_put_trrl;
++ goto err_put_sccc;
+ }
+
+ atomic_set(&hr_qp->refcount, 1);
+@@ -223,6 +233,11 @@ static int hns_roce_qp_alloc(struct hns_
+
+ return 0;
+
++err_put_sccc:
++ if (hr_dev->caps.sccc_entry_sz)
++ hns_roce_table_put(hr_dev, &qp_table->sccc_table,
++ hr_qp->qpn);
++
+ err_put_trrl:
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
+@@ -258,6 +273,9 @@ void hns_roce_qp_free(struct hns_roce_de
+ wait_for_completion(&hr_qp->free);
+
+ if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
++ if (hr_dev->caps.sccc_entry_sz)
++ hns_roce_table_put(hr_dev, &qp_table->sccc_table,
++ hr_qp->qpn);
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_table_put(hr_dev, &qp_table->trrl_table,
+ hr_qp->qpn);
diff --git a/patches.drivers/RDMA-hns-Add-SCC-context-clr-support-for-hip08.patch b/patches.drivers/RDMA-hns-Add-SCC-context-clr-support-for-hip08.patch
new file mode 100644
index 0000000000..23cd338997
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Add-SCC-context-clr-support-for-hip08.patch
@@ -0,0 +1,189 @@
+From: Yangyang Li <liyangyang20@huawei.com>
+Date: Tue, 18 Dec 2018 21:21:54 +0800
+Subject: RDMA/hns: Add SCC context clr support for hip08
+Patch-mainline: v5.1-rc1
+Git-commit: aa84fa18741b83daf0f8f160c46ae92f4d6f1343
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+This patch adds SCC context clear support for DCQCN in kernel space
+driver.
+
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_device.h | 4 +
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 59 +++++++++++++++++++++++++++-
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 15 +++++++
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 8 +++
+ 4 files changed, 85 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -202,6 +202,7 @@ enum {
+ HNS_ROCE_CAP_FLAG_SRQ = BIT(5),
+ HNS_ROCE_CAP_FLAG_MW = BIT(7),
+ HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
++ HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
+ HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
+ };
+
+@@ -483,6 +484,7 @@ struct hns_roce_qp_table {
+ struct hns_roce_hem_table irrl_table;
+ struct hns_roce_hem_table trrl_table;
+ struct hns_roce_hem_table sccc_table;
++ struct mutex scc_mutex;
+ };
+
+ struct hns_roce_cq_table {
+@@ -868,6 +870,8 @@ struct hns_roce_hw {
+ int attr_mask, enum ib_qp_state cur_state,
+ enum ib_qp_state new_state);
+ int (*destroy_qp)(struct ib_qp *ibqp);
++ int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
++ struct hns_roce_qp *hr_qp);
+ int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+ int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1436,7 +1436,9 @@ static int hns_roce_v2_profile(struct hn
+
+ if (hr_dev->pci_dev->revision == 0x21) {
+ caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC |
+- HNS_ROCE_CAP_FLAG_SRQ;
++ HNS_ROCE_CAP_FLAG_SRQ |
++ HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
++
+ caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
+ caps->sccc_ba_pg_sz = 0;
+ caps->sccc_buf_pg_sz = 0;
+@@ -4277,6 +4279,60 @@ static int hns_roce_v2_destroy_qp(struct
+ return 0;
+ }
+
++static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
++ struct hns_roce_qp *hr_qp)
++{
++ struct hns_roce_sccc_clr_done *rst, *resp;
++ struct hns_roce_sccc_clr *clr;
++ struct hns_roce_cmq_desc desc;
++ int ret, i;
++
++ mutex_lock(&hr_dev->qp_table.scc_mutex);
++
++ /* set scc ctx clear done flag */
++ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
++ rst = (struct hns_roce_sccc_clr_done *)desc.data;
++ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
++ if (ret) {
++ dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret);
++ goto out;
++ }
++
++ /* clear scc context */
++ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
++ clr = (struct hns_roce_sccc_clr *)desc.data;
++ clr->qpn = cpu_to_le32(hr_qp->qpn);
++ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
++ if (ret) {
++ dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret);
++ goto out;
++ }
++
++ /* query scc context clear is done or not */
++ resp = (struct hns_roce_sccc_clr_done *)desc.data;
++ for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
++ hns_roce_cmq_setup_basic_desc(&desc,
++ HNS_ROCE_OPC_QUERY_SCCC, true);
++ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
++ if (ret) {
++ dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret);
++ goto out;
++ }
++
++ if (resp->clr_done)
++ goto out;
++
++ msleep(20);
++ }
++
++ dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n");
++ ret = -ETIMEDOUT;
++
++out:
++ mutex_unlock(&hr_dev->qp_table.scc_mutex);
++ return ret;
++}
++
+ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+ {
+ struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
+@@ -5819,6 +5875,7 @@ static const struct hns_roce_hw hns_roce
+ .modify_qp = hns_roce_v2_modify_qp,
+ .query_qp = hns_roce_v2_query_qp,
+ .destroy_qp = hns_roce_v2_destroy_qp,
++ .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
+ .modify_cq = hns_roce_v2_modify_cq,
+ .post_send = hns_roce_v2_post_send,
+ .post_recv = hns_roce_v2_post_recv,
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -123,6 +123,8 @@
+ #define HNS_ROCE_CMQ_EN_B 16
+ #define HNS_ROCE_CMQ_ENABLE BIT(HNS_ROCE_CMQ_EN_B)
+
++#define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT 5
++
+ #define check_whether_last_step(hop_num, step_idx) \
+ ((step_idx == 0 && hop_num == HNS_ROCE_HOP_NUM_0) || \
+ (step_idx == 1 && hop_num == 1) || \
+@@ -232,6 +234,9 @@ enum hns_roce_opcode_type {
+ HNS_ROCE_OPC_POST_MB = 0x8504,
+ HNS_ROCE_OPC_QUERY_MB_ST = 0x8505,
+ HNS_ROCE_OPC_CFG_BT_ATTR = 0x8506,
++ HNS_ROCE_OPC_CLR_SCCC = 0x8509,
++ HNS_ROCE_OPC_QUERY_SCCC = 0x850a,
++ HNS_ROCE_OPC_RESET_SCCC = 0x850b,
+ HNS_SWITCH_PARAMETER_CFG = 0x1033,
+ };
+
+@@ -1757,4 +1762,14 @@ struct hns_roce_wqe_atomic_seg {
+ __le64 cmp_data;
+ };
+
++struct hns_roce_sccc_clr {
++ __le32 qpn;
++ __le32 rsv[5];
++};
++
++struct hns_roce_sccc_clr_done {
++ __le32 clr_done;
++ __le32 rsv[5];
++};
++
+ #endif
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -812,6 +812,13 @@ static int hns_roce_create_qp_common(str
+ if (ret)
+ goto err_qp;
+ }
++
++ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
++ ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
++ if (ret)
++ goto err_qp;
++ }
++
+ hr_qp->event = hns_roce_ib_qp_event;
+
+ return 0;
+@@ -1153,6 +1160,7 @@ int hns_roce_init_qp_table(struct hns_ro
+ int reserved_from_bot;
+ int ret;
+
++ mutex_init(&qp_table->scc_mutex);
+ spin_lock_init(&qp_table->lock);
+ INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
+
diff --git a/patches.drivers/RDMA-hns-Add-constraint-on-the-setting-of-local-ACK-.patch b/patches.drivers/RDMA-hns-Add-constraint-on-the-setting-of-local-ACK-.patch
new file mode 100644
index 0000000000..b2f6bc67ef
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Add-constraint-on-the-setting-of-local-ACK-.patch
@@ -0,0 +1,42 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Wed, 12 Dec 2018 17:49:08 +0800
+Subject: RDMA/hns: Add constraint on the setting of local ACK timeout
+Patch-mainline: v5.1-rc1
+Git-commit: 44754b95dd35ee07c462b5425ae9c4cde8c7e7c8
+References: bsc#1104427 FATE#326416 bsc#1137233
+
+According to IB protocol, local ACK timeout shall be a 5 bit
+value. Currently, hip08 could not support the possible max value 31. Fail
+the request in this case.
+
+Signed-off-by: Yixian Liu <liuyixian@huawei.com>
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3686,10 +3686,16 @@ static int modify_qp_rtr_to_rts(struct i
+ V2_QPC_BYTE_212_LSN_S, 0);
+
+ if (attr_mask & IB_QP_TIMEOUT) {
+- roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
+- V2_QPC_BYTE_28_AT_S, attr->timeout);
+- roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
+- V2_QPC_BYTE_28_AT_S, 0);
++ if (attr->timeout < 31) {
++ roce_set_field(context->byte_28_at_fl,
++ V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
++ attr->timeout);
++ roce_set_field(qpc_mask->byte_28_at_fl,
++ V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
++ 0);
++ } else {
++ dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
++ }
+ }
+
+ roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
diff --git a/patches.drivers/RDMA-hns-Add-the-process-of-AEQ-overflow-for-hip08.patch b/patches.drivers/RDMA-hns-Add-the-process-of-AEQ-overflow-for-hip08.patch
new file mode 100644
index 0000000000..0f7e6c080e
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Add-the-process-of-AEQ-overflow-for-hip08.patch
@@ -0,0 +1,47 @@
+From: Xiaofei Tan <tanxiaofei@huawei.com>
+Date: Sat, 19 Jan 2019 14:23:29 +0800
+Subject: RDMA/hns: Add the process of AEQ overflow for hip08
+Patch-mainline: v5.1-rc1
+Git-commit: 2b9acb9a97fe9b4101ca020643760c4a090b4cb4
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+AEQ overflow will be reported by hardware when too many asynchronous
+events occurred but not be handled in time. Normally, AEQ overflow error
+is not easy to occur. Once happened, we have to do physical function reset
+to recover. PF reset is implemented in two steps. Firstly, set reset
+level with ae_dev->ops->set_default_reset_request. Secondly, run reset
+with ae_dev->ops->reset_event.
+
+Signed-off-by: Xiaofei Tan <tanxiaofei@huawei.com>
+Signed-off-by: Yixian Liu <liuyixian@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -4702,11 +4702,22 @@ static irqreturn_t hns_roce_v2_msix_inte
+ int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
+
+ if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
++ struct pci_dev *pdev = hr_dev->pci_dev;
++ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
++ const struct hnae3_ae_ops *ops = ae_dev->ops;
++
+ dev_err(dev, "AEQ overflow!\n");
+
+ roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+
++ /* Set reset level for reset_event() */
++ if (ops->set_default_reset_request)
++ ops->set_default_reset_request(ae_dev,
++ HNAE3_FUNC_RESET);
++ if (ops->reset_event)
++ ops->reset_event(pdev, NULL);
++
+ roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+
diff --git a/patches.drivers/RDMA-hns-Add-timer-allocation-support-for-hip08.patch b/patches.drivers/RDMA-hns-Add-timer-allocation-support-for-hip08.patch
new file mode 100644
index 0000000000..67166ad771
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Add-timer-allocation-support-for-hip08.patch
@@ -0,0 +1,466 @@
+From: Yangyang Li <liyangyang20@huawei.com>
+Date: Tue, 18 Dec 2018 21:21:55 +0800
+Subject: RDMA/hns: Add timer allocation support for hip08
+Patch-mainline: v5.1-rc1
+Git-commit: 0e40dc2f70cda099e13392a26bd37aed24bcd25d
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+This patch adds qpc timer and cqc timer allocation support for hardware
+timeout retransmission in kernel space driver.
+
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_cmd.h | 8 ++
+ drivers/infiniband/hw/hns/hns_roce_device.h | 14 +++
+ drivers/infiniband/hw/hns/hns_roce_hem.c | 42 +++++++++++
+ drivers/infiniband/hw/hns/hns_roce_hem.h | 2
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 103 +++++++++++++++++++++++++++-
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 24 ++++++
+ drivers/infiniband/hw/hns/hns_roce_main.c | 36 +++++++++
+ 7 files changed, 227 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
++++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
+@@ -75,6 +75,10 @@ enum {
+ HNS_ROCE_CMD_DESTROY_MPT_BT1 = 0x29,
+ HNS_ROCE_CMD_DESTROY_MPT_BT2 = 0x2a,
+
++ /* CQC TIMER commands */
++ HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 = 0x23,
++ HNS_ROCE_CMD_READ_CQC_TIMER_BT0 = 0x27,
++
+ /* MPT commands */
+ HNS_ROCE_CMD_QUERY_MPT = 0x62,
+
+@@ -89,6 +93,10 @@ enum {
+ HNS_ROCE_CMD_DESTROY_SRQC_BT1 = 0x39,
+ HNS_ROCE_CMD_DESTROY_SRQC_BT2 = 0x3a,
+
++ /* QPC TIMER commands */
++ HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 = 0x33,
++ HNS_ROCE_CMD_READ_QPC_TIMER_BT0 = 0x37,
++
+ /* EQC commands */
+ HNS_ROCE_CMD_CREATE_AEQC = 0x80,
+ HNS_ROCE_CMD_MODIFY_AEQC = 0x81,
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -733,6 +733,8 @@ struct hns_roce_caps {
+ u32 max_extend_sg;
+ int num_qps; /* 256k */
+ int reserved_qps;
++ int num_qpc_timer;
++ int num_cqc_timer;
+ u32 max_srq_sg;
+ int num_srqs;
+ u32 max_wqes; /* 16k */
+@@ -773,6 +775,8 @@ struct hns_roce_caps {
+ int trrl_entry_sz;
+ int cqc_entry_sz;
+ int sccc_entry_sz;
++ int qpc_timer_entry_sz;
++ int cqc_timer_entry_sz;
+ int srqc_entry_sz;
+ int idx_entry_sz;
+ u32 pbl_ba_pg_sz;
+@@ -782,8 +786,10 @@ struct hns_roce_caps {
+ int ceqe_depth;
+ enum ib_mtu max_mtu;
+ u32 qpc_bt_num;
++ u32 qpc_timer_bt_num;
+ u32 srqc_bt_num;
+ u32 cqc_bt_num;
++ u32 cqc_timer_bt_num;
+ u32 mpt_bt_num;
+ u32 sccc_bt_num;
+ u32 qpc_ba_pg_sz;
+@@ -804,6 +810,12 @@ struct hns_roce_caps {
+ u32 sccc_ba_pg_sz;
+ u32 sccc_buf_pg_sz;
+ u32 sccc_hop_num;
++ u32 qpc_timer_ba_pg_sz;
++ u32 qpc_timer_buf_pg_sz;
++ u32 qpc_timer_hop_num;
++ u32 cqc_timer_ba_pg_sz;
++ u32 cqc_timer_buf_pg_sz;
++ u32 cqc_timer_hop_num;
+ u32 cqe_ba_pg_sz;
+ u32 cqe_buf_pg_sz;
+ u32 cqe_hop_num;
+@@ -931,6 +943,8 @@ struct hns_roce_dev {
+ struct hns_roce_srq_table srq_table;
+ struct hns_roce_qp_table qp_table;
+ struct hns_roce_eq_table eq_table;
++ struct hns_roce_hem_table qpc_timer_table;
++ struct hns_roce_hem_table cqc_timer_table;
+
+ int cmd_mod;
+ int loop_idc;
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -46,6 +46,8 @@ bool hns_roce_check_whether_mhop(struct
+ (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
+ (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
+ (hr_dev->caps.sccc_hop_num && type == HEM_TYPE_SCCC) ||
++ (hr_dev->caps.qpc_timer_hop_num && type == HEM_TYPE_QPC_TIMER) ||
++ (hr_dev->caps.cqc_timer_hop_num && type == HEM_TYPE_CQC_TIMER) ||
+ (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
+ (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) ||
+ (hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) ||
+@@ -134,6 +136,22 @@ int hns_roce_calc_hem_mhop(struct hns_ro
+ mhop->ba_l0_num = hr_dev->caps.sccc_bt_num;
+ mhop->hop_num = hr_dev->caps.sccc_hop_num;
+ break;
++ case HEM_TYPE_QPC_TIMER:
++ mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
++ + PAGE_SHIFT);
++ mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
++ + PAGE_SHIFT);
++ mhop->ba_l0_num = hr_dev->caps.qpc_timer_bt_num;
++ mhop->hop_num = hr_dev->caps.qpc_timer_hop_num;
++ break;
++ case HEM_TYPE_CQC_TIMER:
++ mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
++ + PAGE_SHIFT);
++ mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
++ + PAGE_SHIFT);
++ mhop->ba_l0_num = hr_dev->caps.cqc_timer_bt_num;
++ mhop->hop_num = hr_dev->caps.cqc_timer_hop_num;
++ break;
+ case HEM_TYPE_SRQC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ + PAGE_SHIFT);
+@@ -602,6 +620,7 @@ out:
+ mutex_unlock(&table->mutex);
+ return ret;
+ }
++EXPORT_SYMBOL_GPL(hns_roce_table_get);
+
+ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+@@ -744,6 +763,7 @@ void hns_roce_table_put(struct hns_roce_
+
+ mutex_unlock(&table->mutex);
+ }
++EXPORT_SYMBOL_GPL(hns_roce_table_put);
+
+ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+@@ -921,6 +941,22 @@ int hns_roce_init_hem_table(struct hns_r
+ num_bt_l0 = hr_dev->caps.sccc_bt_num;
+ hop_num = hr_dev->caps.sccc_hop_num;
+ break;
++ case HEM_TYPE_QPC_TIMER:
++ buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
++ + PAGE_SHIFT);
++ bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
++ + PAGE_SHIFT);
++ num_bt_l0 = hr_dev->caps.qpc_timer_bt_num;
++ hop_num = hr_dev->caps.qpc_timer_hop_num;
++ break;
++ case HEM_TYPE_CQC_TIMER:
++ buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
++ + PAGE_SHIFT);
++ bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
++ + PAGE_SHIFT);
++ num_bt_l0 = hr_dev->caps.cqc_timer_bt_num;
++ hop_num = hr_dev->caps.cqc_timer_hop_num;
++ break;
+ case HEM_TYPE_SRQC:
+ buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ + PAGE_SHIFT);
+@@ -1098,6 +1134,12 @@ void hns_roce_cleanup_hem(struct hns_roc
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->srq_table.table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
++ if (hr_dev->caps.qpc_timer_entry_sz)
++ hns_roce_cleanup_hem_table(hr_dev,
++ &hr_dev->qpc_timer_table);
++ if (hr_dev->caps.cqc_timer_entry_sz)
++ hns_roce_cleanup_hem_table(hr_dev,
++ &hr_dev->cqc_timer_table);
+ if (hr_dev->caps.sccc_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qp_table.sccc_table);
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
+@@ -45,6 +45,8 @@ enum {
+ HEM_TYPE_CQC,
+ HEM_TYPE_SRQC,
+ HEM_TYPE_SCCC,
++ HEM_TYPE_QPC_TIMER,
++ HEM_TYPE_CQC_TIMER,
+
+ /* UNMAP HEM */
+ HEM_TYPE_MTT,
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1085,6 +1085,41 @@ static int hns_roce_query_pf_resource(st
+ return 0;
+ }
+
++static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
++{
++ struct hns_roce_pf_timer_res_a *req_a;
++ struct hns_roce_cmq_desc desc[2];
++ int ret, i;
++
++ for (i = 0; i < 2; i++) {
++ hns_roce_cmq_setup_basic_desc(&desc[i],
++ HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
++ true);
++
++ if (i == 0)
++ desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
++ else
++ desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
++ }
++
++ ret = hns_roce_cmq_send(hr_dev, desc, 2);
++ if (ret)
++ return ret;
++
++ req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data;
++
++ hr_dev->caps.qpc_timer_bt_num =
++ roce_get_field(req_a->qpc_timer_bt_idx_num,
++ PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
++ PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
++ hr_dev->caps.cqc_timer_bt_num =
++ roce_get_field(req_a->cqc_timer_bt_idx_num,
++ PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
++ PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
++
++ return 0;
++}
++
+ static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
+ int vf_id)
+ {
+@@ -1315,6 +1350,16 @@ static int hns_roce_v2_profile(struct hn
+ return ret;
+ }
+
++ if (hr_dev->pci_dev->revision == 0x21) {
++ ret = hns_roce_query_pf_timer_resource(hr_dev);
++ if (ret) {
++ dev_err(hr_dev->dev,
++ "Query pf timer resource fail, ret = %d.\n",
++ ret);
++ return ret;
++ }
++ }
++
+ ret = hns_roce_alloc_vf_resource(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
+@@ -1439,6 +1484,17 @@ static int hns_roce_v2_profile(struct hn
+ HNS_ROCE_CAP_FLAG_SRQ |
+ HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
+
++ caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
++ caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
++ caps->qpc_timer_ba_pg_sz = 0;
++ caps->qpc_timer_buf_pg_sz = 0;
++ caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
++ caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
++ caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
++ caps->cqc_timer_ba_pg_sz = 0;
++ caps->cqc_timer_buf_pg_sz = 0;
++ caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
++
+ caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
+ caps->sccc_ba_pg_sz = 0;
+ caps->sccc_buf_pg_sz = 0;
+@@ -1644,7 +1700,8 @@ static void hns_roce_free_link_table(str
+ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
+ {
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+- int ret;
++ int qpc_count, cqc_count;
++ int ret, i;
+
+ /* TSQ includes SQ doorbell and ack doorbell */
+ ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
+@@ -1659,8 +1716,40 @@ static int hns_roce_v2_init(struct hns_r
+ goto err_tpq_init_failed;
+ }
+
++ /* Alloc memory for QPC Timer buffer space chunk*/
++ for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
++ qpc_count++) {
++ ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
++ qpc_count);
++ if (ret) {
++ dev_err(hr_dev->dev, "QPC Timer get failed\n");
++ goto err_qpc_timer_failed;
++ }
++ }
++
++ /* Alloc memory for CQC Timer buffer space chunk*/
++ for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
++ cqc_count++) {
++ ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
++ cqc_count);
++ if (ret) {
++ dev_err(hr_dev->dev, "CQC Timer get failed\n");
++ goto err_cqc_timer_failed;
++ }
++ }
++
+ return 0;
+
++err_cqc_timer_failed:
++ for (i = 0; i < cqc_count; i++)
++ hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
++
++err_qpc_timer_failed:
++ for (i = 0; i < qpc_count; i++)
++ hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
++
++ hns_roce_free_link_table(hr_dev, &priv->tpq);
++
+ err_tpq_init_failed:
+ hns_roce_free_link_table(hr_dev, &priv->tsq);
+
+@@ -2699,6 +2788,12 @@ static int hns_roce_v2_set_hem(struct hn
+ case HEM_TYPE_SCCC:
+ op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
+ break;
++ case HEM_TYPE_QPC_TIMER:
++ op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
++ break;
++ case HEM_TYPE_CQC_TIMER:
++ op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
++ break;
+ default:
+ dev_warn(dev, "Table %d not to be written by mailbox!\n",
+ table->type);
+@@ -2763,6 +2858,8 @@ static int hns_roce_v2_clear_hem(struct
+ op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
+ break;
+ case HEM_TYPE_SCCC:
++ case HEM_TYPE_QPC_TIMER:
++ case HEM_TYPE_CQC_TIMER:
+ break;
+ case HEM_TYPE_SRQC:
+ op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
+@@ -2773,7 +2870,9 @@ static int hns_roce_v2_clear_hem(struct
+ return 0;
+ }
+
+- if (table->type == HEM_TYPE_SCCC)
++ if (table->type == HEM_TYPE_SCCC ||
++ table->type == HEM_TYPE_QPC_TIMER ||
++ table->type == HEM_TYPE_CQC_TIMER)
+ return 0;
+
+ op += step_idx;
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -46,11 +46,13 @@
+ #define HNS_ROCE_VF_SL_NUM 8
+
+ #define HNS_ROCE_V2_MAX_QP_NUM 0x2000
++#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
+ #define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
+ #define HNS_ROCE_V2_MAX_SRQ 0x100000
+ #define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
+ #define HNS_ROCE_V2_MAX_SRQ_SGE 0x100
+ #define HNS_ROCE_V2_MAX_CQ_NUM 0x8000
++#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
+ #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
+ #define HNS_ROCE_V2_MAX_CQE_NUM 0x10000
+ #define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000
+@@ -85,6 +87,8 @@
+ #define HNS_ROCE_V2_MTT_ENTRY_SZ 64
+ #define HNS_ROCE_V2_CQE_ENTRY_SIZE 32
+ #define HNS_ROCE_V2_SCCC_ENTRY_SZ 32
++#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ 4096
++#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ 4096
+ #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
+ #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
+ #define HNS_ROCE_INVALID_LKEY 0x100
+@@ -229,6 +233,7 @@ enum hns_roce_opcode_type {
+ HNS_ROCE_OPC_ALLOC_VF_RES = 0x8401,
+ HNS_ROCE_OPC_CFG_EXT_LLM = 0x8403,
+ HNS_ROCE_OPC_CFG_TMOUT_LLM = 0x8404,
++ HNS_ROCE_OPC_QUERY_PF_TIMER_RES = 0x8406,
+ HNS_ROCE_OPC_CFG_SGID_TB = 0x8500,
+ HNS_ROCE_OPC_CFG_SMAC_TB = 0x8501,
+ HNS_ROCE_OPC_POST_MB = 0x8504,
+@@ -1336,6 +1341,25 @@ struct hns_roce_pf_res_b {
+ #define PF_RES_DATA_4_PF_SCCC_BT_NUM_S 9
+ #define PF_RES_DATA_4_PF_SCCC_BT_NUM_M GENMASK(17, 9)
+
++struct hns_roce_pf_timer_res_a {
++ __le32 rsv0;
++ __le32 qpc_timer_bt_idx_num;
++ __le32 cqc_timer_bt_idx_num;
++ __le32 rsv[3];
++};
++
++#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_S 0
++#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_M GENMASK(11, 0)
++
++#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S 16
++#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M GENMASK(28, 16)
++
++#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_S 0
++#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_M GENMASK(10, 0)
++
++#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S 16
++#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M GENMASK(27, 16)
++
+ struct hns_roce_vf_res_a {
+ __le32 vf_id;
+ __le32 vf_qpc_bt_idx_num;
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -717,8 +717,44 @@ static int hns_roce_init_hem(struct hns_
+ }
+ }
+
++ if (hr_dev->caps.qpc_timer_entry_sz) {
++ ret = hns_roce_init_hem_table(hr_dev,
++ &hr_dev->qpc_timer_table,
++ HEM_TYPE_QPC_TIMER,
++ hr_dev->caps.qpc_timer_entry_sz,
++ hr_dev->caps.num_qpc_timer, 1);
++ if (ret) {
++ dev_err(dev,
++ "Failed to init QPC timer memory, aborting.\n");
++ goto err_unmap_ctx;
++ }
++ }
++
++ if (hr_dev->caps.cqc_timer_entry_sz) {
++ ret = hns_roce_init_hem_table(hr_dev,
++ &hr_dev->cqc_timer_table,
++ HEM_TYPE_CQC_TIMER,
++ hr_dev->caps.cqc_timer_entry_sz,
++ hr_dev->caps.num_cqc_timer, 1);
++ if (ret) {
++ dev_err(dev,
++ "Failed to init CQC timer memory, aborting.\n");
++ goto err_unmap_qpc_timer;
++ }
++ }
++
+ return 0;
+
++err_unmap_qpc_timer:
++ if (hr_dev->caps.qpc_timer_entry_sz)
++ hns_roce_cleanup_hem_table(hr_dev,
++ &hr_dev->qpc_timer_table);
++
++err_unmap_ctx:
++ if (hr_dev->caps.sccc_entry_sz)
++ hns_roce_cleanup_hem_table(hr_dev,
++ &hr_dev->qp_table.sccc_table);
++
+ err_unmap_idx:
+ if (hr_dev->caps.num_idx_segs)
+ hns_roce_cleanup_hem_table(hr_dev,
diff --git a/patches.drivers/RDMA-hns-Bugfix-for-SCC-hem-free.patch b/patches.drivers/RDMA-hns-Bugfix-for-SCC-hem-free.patch
new file mode 100644
index 0000000000..9dad6cd70b
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Bugfix-for-SCC-hem-free.patch
@@ -0,0 +1,41 @@
+From: Yangyang Li <liyangyang20@huawei.com>
+Date: Sun, 7 Apr 2019 13:23:39 +0800
+Subject: RDMA/hns: Bugfix for SCC hem free
+Patch-mainline: v5.1-rc5
+Git-commit: 00fb67ec6b98114a887d9ef26fc7c3e566e7f665
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+The method of hem free for SCC context is different from qp context.
+
+In the current version, if free SCC hem during the execution of qp free,
+there may be smmu error as below:
+
+ arm-smmu-v3 arm-smmu-v3.1.auto: event 0x10 received:
+ arm-smmu-v3 arm-smmu-v3.1.auto: 0x00007d0000000010
+ arm-smmu-v3 arm-smmu-v3.1.auto: 0x000012000000017c
+ arm-smmu-v3 arm-smmu-v3.1.auto: 0x00000000000009e0
+ arm-smmu-v3 arm-smmu-v3.1.auto: 0x0000000000000000
+
+As SCC context is still used by hardware after qp free, we can solve this
+problem by removing SCC hem free from hns_roce_qp_free.
+
+Fixes: 6a157f7d1b14 ("RDMA/hns: Add SCC context allocation support for hip08")
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -273,9 +273,6 @@ void hns_roce_qp_free(struct hns_roce_de
+ wait_for_completion(&hr_qp->free);
+
+ if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
+- if (hr_dev->caps.sccc_entry_sz)
+- hns_roce_table_put(hr_dev, &qp_table->sccc_table,
+- hr_qp->qpn);
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_table_put(hr_dev, &qp_table->trrl_table,
+ hr_qp->qpn);
diff --git a/patches.drivers/RDMA-hns-Bugfix-for-mapping-user-db.patch b/patches.drivers/RDMA-hns-Bugfix-for-mapping-user-db.patch
new file mode 100644
index 0000000000..c6bc11523f
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Bugfix-for-mapping-user-db.patch
@@ -0,0 +1,41 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Tue, 23 Apr 2019 17:30:26 +0800
+Subject: RDMA/hns: Bugfix for mapping user db
+Patch-mainline: v5.1-rc7
+Git-commit: 2557fabd6e29f349bfa0ac13f38ac98aa5eafc74
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+When the maximum send wr delivered by the user is zero, the qp does not
+have a sq.
+
+When allocating the sq db buffer to store the user sq pi pointer and map
+it to the kernel mode, max_send_wr is used as the trigger condition, while
+the kernel does not consider the max_send_wr trigger condition when
+mapmping db. It will cause sq record doorbell map fail and create qp fail.
+
+The failed print information as follows:
+
+ hns3 0000:7d:00.1: Send cmd: tail - 418, opcode - 0x8504, flag - 0x0011, retval - 0x0000
+ hns3 0000:7d:00.1: Send cmd: 0xe59dc000 0x00000000 0x00000000 0x00000000 0x00000116 0x0000ffff
+ hns3 0000:7d:00.1: sq record doorbell map failed!
+ hns3 0000:7d:00.1: Create RC QP failed
+
+Fixes: 0425e3e6e0c7 ("RDMA/hns: Support flush cqe for hip08 in kernel space")
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -532,7 +532,7 @@ static int hns_roce_set_kernel_sq_size(s
+
+ static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
+ {
+- if (attr->qp_type == IB_QPT_XRC_TGT)
++ if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
+ return 0;
+
+ return 1;
diff --git a/patches.drivers/RDMA-hns-Bugfix-for-posting-multiple-srq-work-reques.patch b/patches.drivers/RDMA-hns-Bugfix-for-posting-multiple-srq-work-reques.patch
new file mode 100644
index 0000000000..8150c82df3
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Bugfix-for-posting-multiple-srq-work-reques.patch
@@ -0,0 +1,142 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Thu, 30 May 2019 23:55:53 +0800
+Subject: RDMA/hns: Bugfix for posting multiple srq work request
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
+Git-commit: 97545b10221ad14b046dba135a37f4e98a560697
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+When the user submits more than 32 work request to a srq queue
+at a time, it needs to find the corresponding number of entries
+in the bitmap in the idx queue. However, the original lookup
+function named ffs only processes 32 bits of the array element,
+When the number of srq wqe issued exceeds 32, the ffs will only
+process the lower 32 bits of the elements, it will not be able
+to get the correct wqe index for srq wqe.
+
+Signed-off-by: Xi Wang <wangxi11@huawei.com>
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_device.h | 2 -
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 34 ++++++++++++++--------------
+ drivers/infiniband/hw/hns/hns_roce_srq.c | 15 ++----------
+ 3 files changed, 22 insertions(+), 29 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -472,7 +472,7 @@ struct hns_roce_idx_que {
+ u32 buf_size;
+ struct ib_umem *umem;
+ struct hns_roce_mtt mtt;
+- u64 *bitmap;
++ unsigned long *bitmap;
+ };
+
+ struct hns_roce_srq {
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -2343,15 +2343,10 @@ static void *get_srq_wqe(struct hns_roce
+
+ static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
+ {
+- u32 bitmap_num;
+- int bit_num;
+-
+ /* always called with interrupts disabled. */
+ spin_lock(&srq->lock);
+
+- bitmap_num = wqe_index / (sizeof(u64) * 8);
+- bit_num = wqe_index % (sizeof(u64) * 8);
+- srq->idx_que.bitmap[bitmap_num] |= (1ULL << bit_num);
++ bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
+ srq->tail++;
+
+ spin_unlock(&srq->lock);
+@@ -5971,18 +5966,19 @@ out:
+ return ret;
+ }
+
+-static int find_empty_entry(struct hns_roce_idx_que *idx_que)
++static int find_empty_entry(struct hns_roce_idx_que *idx_que,
++ unsigned long size)
+ {
+- int bit_num;
+- int i;
++ int wqe_idx;
+
+- /* bitmap[i] is set zero if all bits are allocated */
+- for (i = 0; idx_que->bitmap[i] == 0; ++i)
+- ;
+- bit_num = ffs(idx_que->bitmap[i]);
+- idx_que->bitmap[i] &= ~(1ULL << (bit_num - 1));
++ if (unlikely(bitmap_full(idx_que->bitmap, size)))
++ return -ENOSPC;
++
++ wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
++
++ bitmap_set(idx_que->bitmap, wqe_idx, 1);
+
+- return i * sizeof(u64) * 8 + (bit_num - 1);
++ return wqe_idx;
+ }
+
+ static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
+@@ -6028,7 +6024,13 @@ static int hns_roce_v2_post_srq_recv(str
+ break;
+ }
+
+- wqe_idx = find_empty_entry(&srq->idx_que);
++ wqe_idx = find_empty_entry(&srq->idx_que, srq->max);
++ if (wqe_idx < 0) {
++ ret = -ENOMEM;
++ *bad_wr = wr;
++ break;
++ }
++
+ fill_idx_queue(&srq->idx_que, ind, wqe_idx);
+ wqe = get_srq_wqe(srq, wqe_idx);
+ dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
+--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
+@@ -188,28 +188,19 @@ static int hns_roce_create_idx_que(struc
+ {
+ struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
+- u32 bitmap_num;
+- int i;
+
+- bitmap_num = HNS_ROCE_ALOGN_UP(srq->max, 8 * sizeof(u64));
+-
+- idx_que->bitmap = kcalloc(1, bitmap_num / 8, GFP_KERNEL);
++ idx_que->bitmap = bitmap_zalloc(srq->max, GFP_KERNEL);
+ if (!idx_que->bitmap)
+ return -ENOMEM;
+
+- bitmap_num = bitmap_num / (8 * sizeof(u64));
+-
+ idx_que->buf_size = srq->idx_que.buf_size;
+
+ if (hns_roce_buf_alloc(hr_dev, idx_que->buf_size, (1 << page_shift) * 2,
+ &idx_que->idx_buf, page_shift)) {
+- kfree(idx_que->bitmap);
++ bitmap_free(idx_que->bitmap);
+ return -ENOMEM;
+ }
+
+- for (i = 0; i < bitmap_num; i++)
+- idx_que->bitmap[i] = ~(0UL);
+-
+ return 0;
+ }
+
+@@ -415,7 +406,7 @@ err_idx_mtt:
+ err_create_idx:
+ hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
+ &srq->idx_que.idx_buf);
+- kfree(srq->idx_que.bitmap);
++ bitmap_free(srq->idx_que.bitmap);
+
+ err_srq_mtt:
+ hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
diff --git a/patches.drivers/RDMA-hns-Bugfix-for-sending-with-invalidate.patch b/patches.drivers/RDMA-hns-Bugfix-for-sending-with-invalidate.patch
new file mode 100644
index 0000000000..0ab666db0e
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Bugfix-for-sending-with-invalidate.patch
@@ -0,0 +1,29 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:26 +0800
+Subject: RDMA/hns: Bugfix for sending with invalidate
+Patch-mainline: v5.2-rc1
+Git-commit: 82342e493b7e53f5e0d0698a48190f05e84d6690
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+According to IB protocol, the send with invalidate operation will not
+invalidate mr that was created through a register mr or reregister mr.
+
+Fixes: e93df0108579 ("RDMA/hns: Support local invalidate for hip08 in kernel space")
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -2155,7 +2155,7 @@ static int hns_roce_v2_write_mtpt(void *
+ V2_MPT_BYTE_4_PD_S, mr->pd);
+
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
+- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
++ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
+ (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
diff --git a/patches.drivers/RDMA-hns-Bugfix-for-set-hem-of-SCC.patch b/patches.drivers/RDMA-hns-Bugfix-for-set-hem-of-SCC.patch
new file mode 100644
index 0000000000..375f057966
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Bugfix-for-set-hem-of-SCC.patch
@@ -0,0 +1,60 @@
+From: Yangyang Li <liyangyang20@huawei.com>
+Date: Sat, 16 Feb 2019 20:10:25 +0800
+Subject: RDMA/hns: Bugfix for set hem of SCC
+Patch-mainline: v5.1-rc1
+Git-commit: 6ac16e403900a98f9b330daa5f0d89f76a24c6eb
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+The method of set hem for scc context is different from other contexts. It
+should notify the hardware with the detailed idx in bt0 for scc, while for
+other contexts, it only need to notify the bt step and the hardware will
+calculate the idx.
+
+Here fixes the following error when unloading the hip08 driver:
+
+[ 123.570768] {1}[Hardware Error]: Hardware error from APEI Generic Hardware Error Source: 0
+[ 123.579023] {1}[Hardware Error]: event severity: recoverable
+[ 123.584670] {1}[Hardware Error]: Error 0, type: recoverable
+[ 123.590317] {1}[Hardware Error]: section_type: PCIe error
+[ 123.595877] {1}[Hardware Error]: version: 4.0
+[ 123.600395] {1}[Hardware Error]: command: 0x0006, status: 0x0010
+[ 123.606562] {1}[Hardware Error]: device_id: 0000:7d:00.0
+[ 123.612034] {1}[Hardware Error]: slot: 0
+[ 123.616120] {1}[Hardware Error]: secondary_bus: 0x00
+[ 123.621245] {1}[Hardware Error]: vendor_id: 0x19e5, device_id: 0xa222
+[ 123.627847] {1}[Hardware Error]: class_code: 000002
+[ 123.632977] hns3 0000:7d:00.0: aer_status: 0x00000000, aer_mask: 0x00000000
+[ 123.639928] hns3 0000:7d:00.0: aer_layer=Transaction Layer, aer_agent=Receiver ID
+[ 123.647400] hns3 0000:7d:00.0: aer_uncor_severity: 0x00000000
+[ 123.653136] hns3 0000:7d:00.0: PCI error detected, state(=1)!!
+[ 123.658959] hns3 0000:7d:00.0: ROCEE uncorrected RAS error identified
+[ 123.665395] hns3 0000:7d:00.0: ROCEE RAS AXI rresp error
+[ 123.670713] hns3 0000:7d:00.0: requesting reset due to PCI error
+[ 123.676715] hns3 0000:7d:00.0: received reset event , reset type is 5
+[ 123.683147] hns3 0000:7d:00.0: AER: Device recovery successful
+[ 123.688978] hns3 0000:7d:00.0: PF Reset requested
+[ 123.693684] hns3 0000:7d:00.0: PF failed(=-5) to send mailbox message to VF
+[ 123.700633] hns3 0000:7d:00.0: inform reset to vf(1) failded -5!
+
+Fixes: 6a157f7d1b14 ("RDMA/hns: Add SCC context allocation support for hip08")
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Reviewed-by: Yixian Liu <liuyixian@huawei.com>
+Reviewed-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -2945,6 +2945,9 @@ static int hns_roce_v2_set_hem(struct hn
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
++ if (table->type == HEM_TYPE_SCCC)
++ obj = mhop.l0_idx;
++
+ if (check_whether_last_step(hop_num, step_idx)) {
+ hem = table->hem[hem_idx];
+ for (hns_roce_hem_first(hem, &iter);
diff --git a/patches.drivers/RDMA-hns-Bugfix-for-the-scene-without-receiver-queue.patch b/patches.drivers/RDMA-hns-Bugfix-for-the-scene-without-receiver-queue.patch
new file mode 100644
index 0000000000..3a6e75684f
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Bugfix-for-the-scene-without-receiver-queue.patch
@@ -0,0 +1,29 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Wed, 12 Dec 2018 17:49:07 +0800
+Subject: RDMA/hns: Bugfix for the scene without receiver queue
+Patch-mainline: v5.1-rc1
+Git-commit: 4d103905eb1e4f14cb62fcf962c9d35da7005dea
+References: bsc#1104427 FATE#326416 bsc#1137233
+
+In some application scenario, the user could not have receive queue when
+run rdma write or read operation.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -526,7 +526,8 @@ static int hns_roce_qp_has_sq(struct ib_
+ static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
+ {
+ if (attr->qp_type == IB_QPT_XRC_INI ||
+- attr->qp_type == IB_QPT_XRC_TGT || attr->srq)
++ attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
++ !attr->cap.max_recv_wr)
+ return 0;
+
+ return 1;
diff --git a/patches.drivers/RDMA-hns-Configure-capacity-of-hns-device.patch b/patches.drivers/RDMA-hns-Configure-capacity-of-hns-device.patch
new file mode 100644
index 0000000000..6f06739949
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Configure-capacity-of-hns-device.patch
@@ -0,0 +1,38 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sun, 3 Feb 2019 16:13:07 +0800
+Subject: RDMA/hns: Configure capacity of hns device
+Patch-mainline: v5.1-rc1
+Git-commit: dad1f9802ecee3a21143293b2505e1b57b1ae525
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+This patch adds new device capability for IB_DEVICE_MEM_MGT_EXTENSIONS to
+indicate device support for the following features:
+
+1. Fast register memory region.
+2. send with remote invalidate by frmr
+3. local invalidate memory regsion
+
+As well as adds the max depth of frmr page list len.
+
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_main.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -226,6 +226,11 @@ static int hns_roce_query_device(struct
+ props->max_srq_sge = hr_dev->caps.max_srq_sges;
+ }
+
++ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) {
++ props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
++ props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
++ }
++
+ return 0;
+ }
+
diff --git a/patches.drivers/RDMA-hns-Delete-unused-variable-in-hns_roce_v2_modif.patch b/patches.drivers/RDMA-hns-Delete-unused-variable-in-hns_roce_v2_modif.patch
new file mode 100644
index 0000000000..014a7d2d59
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Delete-unused-variable-in-hns_roce_v2_modif.patch
@@ -0,0 +1,34 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:28 +0800
+Subject: RDMA/hns: Delete unused variable in hns_roce_v2_modify_qp function
+Patch-mainline: v5.2-rc1
+Git-commit: d0a935563bc0f447abed7799388fa3f13099cc0d
+References: bsc#1104427 FATE#326416
+
+The src_mac array is not used in hns_roce_v2_modify_qp function.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3975,7 +3975,6 @@ static int hns_roce_v2_modify_qp(struct
+ const struct ib_global_route *grh =
+ rdma_ah_read_grh(&attr->ah_attr);
+ const struct ib_gid_attr *gid_attr = NULL;
+- u8 src_mac[ETH_ALEN];
+ int is_roce_protocol;
+ u16 vlan = 0xffff;
+ u8 ib_port;
+@@ -3990,7 +3989,6 @@ static int hns_roce_v2_modify_qp(struct
+ if (is_roce_protocol) {
+ gid_attr = attr->ah_attr.grh.sgid_attr;
+ vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev);
+- memcpy(src_mac, gid_attr->ndev->dev_addr, ETH_ALEN);
+ }
+
+ if (is_vlan_dev(gid_attr->ndev)) {
diff --git a/patches.drivers/RDMA-hns-Delete-useful-prints-for-aeq-subtype-event.patch b/patches.drivers/RDMA-hns-Delete-useful-prints-for-aeq-subtype-event.patch
new file mode 100644
index 0000000000..9288fac491
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Delete-useful-prints-for-aeq-subtype-event.patch
@@ -0,0 +1,91 @@
+From: Yixian Liu <liuyixian@huawei.com>
+Date: Sun, 3 Feb 2019 16:13:06 +0800
+Subject: RDMA/hns: Delete useful prints for aeq subtype event
+Patch-mainline: v5.1-rc1
+Git-commit: e95c716c7faa0d0eede5eabb6fea2504709e25b6
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+Current all messages printed for aeq subtype event are wrong. Thus,
+delete them and only the value of subtype event is printed.
+
+Signed-off-by: Yixian Liu <liuyixian@huawei.com>
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 57 +++--------------------------
+ 1 file changed, 6 insertions(+), 51 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -4663,64 +4663,19 @@ static void hns_roce_irq_work_handle(str
+ dev_warn(dev, "Send queue drained.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+- dev_err(dev, "Local work queue catastrophic error.\n");
++ dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n",
++ qpn, irq_work->sub_type);
+ hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+- switch (irq_work->sub_type) {
+- case HNS_ROCE_LWQCE_QPC_ERROR:
+- dev_err(dev, "QP %d, QPC error.\n", qpn);
+- break;
+- case HNS_ROCE_LWQCE_MTU_ERROR:
+- dev_err(dev, "QP %d, MTU error.\n", qpn);
+- break;
+- case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
+- dev_err(dev, "QP %d, WQE BA addr error.\n", qpn);
+- break;
+- case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
+- dev_err(dev, "QP %d, WQE addr error.\n", qpn);
+- break;
+- case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
+- dev_err(dev, "QP %d, WQE shift error.\n", qpn);
+- break;
+- default:
+- dev_err(dev, "Unhandled sub_event type %d.\n",
+- irq_work->sub_type);
+- break;
+- }
+ break;
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+- dev_err(dev, "Invalid request local work queue error.\n");
++ dev_err(dev, "Invalid request local work queue 0x%x error.\n",
++ qpn);
+ hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+- dev_err(dev, "Local access violation work queue error.\n");
++ dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n",
++ qpn, irq_work->sub_type);
+ hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+- switch (irq_work->sub_type) {
+- case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
+- dev_err(dev, "QP %d, R_key violation.\n", qpn);
+- break;
+- case HNS_ROCE_LAVWQE_LENGTH_ERROR:
+- dev_err(dev, "QP %d, length error.\n", qpn);
+- break;
+- case HNS_ROCE_LAVWQE_VA_ERROR:
+- dev_err(dev, "QP %d, VA error.\n", qpn);
+- break;
+- case HNS_ROCE_LAVWQE_PD_ERROR:
+- dev_err(dev, "QP %d, PD error.\n", qpn);
+- break;
+- case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
+- dev_err(dev, "QP %d, rw acc error.\n", qpn);
+- break;
+- case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
+- dev_err(dev, "QP %d, key state error.\n", qpn);
+- break;
+- case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
+- dev_err(dev, "QP %d, MR operation error.\n", qpn);
+- break;
+- default:
+- dev_err(dev, "Unhandled sub_event type %d.\n",
+- irq_work->sub_type);
+- break;
+- }
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+ dev_warn(dev, "SRQ limit reach.\n");
diff --git a/patches.drivers/RDMA-hns-Fix-bad-endianess-of-port_pd-variable.patch b/patches.drivers/RDMA-hns-Fix-bad-endianess-of-port_pd-variable.patch
new file mode 100644
index 0000000000..ac01c7524f
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Fix-bad-endianess-of-port_pd-variable.patch
@@ -0,0 +1,36 @@
+From: Leon Romanovsky <leonro@mellanox.com>
+Date: Tue, 19 Mar 2019 11:10:08 +0200
+Subject: RDMA/hns: Fix bad endianess of port_pd variable
+Patch-mainline: v5.2-rc1
+Git-commit: 6734b2973565e36659e97e12ab0d0faf1d9f3fbe
+References: bsc#1104427 FATE#326416
+
+port_pd is treated as le32 in declaration and read, fix assignment to be
+in le32 too. This change fixes the following compilation warnings.
+
+drivers/infiniband/hw/hns/hns_roce_ah.c:67:24: warning: incorrect type
+in assignment (different base types)
+drivers/infiniband/hw/hns/hns_roce_ah.c:67:24: expected restricted __le32 [usertype] port_pd
+drivers/infiniband/hw/hns/hns_roce_ah.c:67:24: got restricted __be32 [usertype]
+
+Fixes: 9a4435375cd1 ("IB/hns: Add driver files for hns RoCE driver")
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Reviewed-by: Gal Pressman <galpress@amazon.com>
+Reviewed-by: Lijun Ou <ouliun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_ah.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
++++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
+@@ -69,7 +69,7 @@ struct ib_ah *hns_roce_create_ah(struct
+ HNS_ROCE_VLAN_SL_BIT_MASK) <<
+ HNS_ROCE_VLAN_SL_SHIFT;
+
+- ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn |
++ ah->av.port_pd = cpu_to_le32(to_hr_pd(ibpd)->pdn |
+ (rdma_ah_get_port_num(ah_attr) <<
+ HNS_ROCE_PORT_NUM_SHIFT));
+ ah->av.gid_index = grh->sgid_index;
diff --git a/patches.drivers/RDMA-hns-Fix-the-Oops-during-rmmod-or-insmod-ko-when.patch b/patches.drivers/RDMA-hns-Fix-the-Oops-during-rmmod-or-insmod-ko-when.patch
new file mode 100644
index 0000000000..65227a37ae
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Fix-the-Oops-during-rmmod-or-insmod-ko-when.patch
@@ -0,0 +1,307 @@
+From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
+Date: Sun, 3 Feb 2019 20:43:13 +0800
+Subject: RDMA/hns: Fix the Oops during rmmod or insmod ko when reset occurs
+Patch-mainline: v5.1-rc1
+Git-commit: d061effc36f7bd38a12912977a37a50ac9140d11
+References: bsc#1104427 FATE#326416 bsc#1137232
+
+In the reset process, the hns3 NIC driver notifies the RoCE driver to
+perform reset related processing by calling the .reset_notify() interface
+registered by the RoCE driver in hip08 SoC.
+
+In the current version, if a reset occurs simultaneously during the
+execution of rmmod or insmod ko, there may be Oops error as below:
+
+ Internal error: Oops: 86000007 [#1] PREEMPT SMP
+ Modules linked in: hns_roce(O) hns3(O) hclge(O) hnae3(O) [last unloaded: hns_roce_hw_v2]
+ CPU: 0 PID: 14 Comm: kworker/0:1 Tainted: G O 4.19.0-ge00d540 #1
+ Hardware name: Huawei Technologies Co., Ltd.
+ Workqueue: events hclge_reset_service_task [hclge]
+ pstate: 60c00009 (nZCv daif +PAN +UAO)
+ pc : 0xffff00000100b0b8
+ lr : 0xffff00000100aea0
+ sp : ffff000009afbab0
+ x29: ffff000009afbab0 x28: 0000000000000800
+ x27: 0000000000007ff0 x26: ffff80002f90c004
+ x25: 00000000000007ff x24: ffff000008f97000
+ x23: ffff80003efee0a8 x22: 0000000000001000
+ x21: ffff80002f917ff0 x20: ffff8000286ea070
+ x19: 0000000000000800 x18: 0000000000000400
+ x17: 00000000c4d3225d x16: 00000000000021b8
+ x15: 0000000000000400 x14: 0000000000000400
+ x13: 0000000000000000 x12: ffff80003fac6e30
+ x11: 0000800036303000 x10: 0000000000000001
+ x9 : 0000000000000000 x8 : ffff80003016d000
+ x7 : 0000000000000000 x6 : 000000000000003f
+ x5 : 0000000000000040 x4 : 0000000000000000
+ x3 : 0000000000000004 x2 : 00000000000007ff
+ x1 : 0000000000000000 x0 : 0000000000000000
+ Process kworker/0:1 (pid: 14, stack limit = 0x00000000af8f0ad9)
+ Call trace:
+ 0xffff00000100b0b8
+ 0xffff00000100b3a0
+ hns_roce_init+0x624/0xc88 [hns_roce]
+ 0xffff000001002df8
+ 0xffff000001006960
+ hclge_notify_roce_client+0x74/0xe0 [hclge]
+ hclge_reset_service_task+0xa58/0xbc0 [hclge]
+ process_one_work+0x1e4/0x458
+ worker_thread+0x40/0x450
+ kthread+0x12c/0x130
+ ret_from_fork+0x10/0x18
+ Code: bad PC value
+
+In the reset process, we will release the resources firstly, and after the
+hardware reset is completed, we will reapply resources and reconfigure the
+hardware.
+
+We can solve this problem by modifying both the NIC and the RoCE
+driver. We can modify the concurrent processing in the NIC driver to avoid
+calling the .reset_notify and .uninit_instance ops at the same time. And
+we need to modify the RoCE driver to record the reset stage and the
+driver's init/uninit state, and check the state in the .reset_notify,
+.init_instance. and uninit_instance functions to avoid NULL pointer
+operation.
+
+Fixes: cb7a94c9c808 ("RDMA/hns: Add reset process for RoCE in hip08")
+Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_device.h | 21 +++++
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 103 ++++++++++++++++++++++++----
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 1
+ 3 files changed, 112 insertions(+), 13 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -217,6 +217,26 @@ enum {
+ HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
+ };
+
++enum hns_roce_reset_stage {
++ HNS_ROCE_STATE_NON_RST,
++ HNS_ROCE_STATE_RST_BEF_DOWN,
++ HNS_ROCE_STATE_RST_DOWN,
++ HNS_ROCE_STATE_RST_UNINIT,
++ HNS_ROCE_STATE_RST_INIT,
++ HNS_ROCE_STATE_RST_INITED,
++};
++
++enum hns_roce_instance_state {
++ HNS_ROCE_STATE_NON_INIT,
++ HNS_ROCE_STATE_INIT,
++ HNS_ROCE_STATE_INITED,
++ HNS_ROCE_STATE_UNINIT,
++};
++
++enum {
++ HNS_ROCE_RST_DIRECT_RETURN = 0,
++};
++
+ #define HNS_ROCE_CMD_SUCCESS 1
+
+ #define HNS_ROCE_PORT_DOWN 0
+@@ -919,6 +939,7 @@ struct hns_roce_dev {
+ spinlock_t bt_cmd_lock;
+ bool active;
+ bool is_reset;
++ unsigned long reset_cnt;
+ struct hns_roce_ib_iboe iboe;
+
+ struct list_head pgdir_list;
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -6002,6 +6002,7 @@ MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_
+ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
+ struct hnae3_handle *handle)
+ {
++ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ const struct pci_device_id *id;
+ int i;
+
+@@ -6032,10 +6033,13 @@ static int hns_roce_hw_v2_get_cfg(struct
+ hr_dev->cmd_mod = 1;
+ hr_dev->loop_idc = 0;
+
++ hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
++ priv->handle = handle;
++
+ return 0;
+ }
+
+-static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
++static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
+ {
+ struct hns_roce_dev *hr_dev;
+ int ret;
+@@ -6052,7 +6056,6 @@ static int hns_roce_hw_v2_init_instance(
+
+ hr_dev->pci_dev = handle->pdev;
+ hr_dev->dev = &handle->pdev->dev;
+- handle->priv = hr_dev;
+
+ ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
+ if (ret) {
+@@ -6066,6 +6069,8 @@ static int hns_roce_hw_v2_init_instance(
+ goto error_failed_get_cfg;
+ }
+
++ handle->priv = hr_dev;
++
+ return 0;
+
+ error_failed_get_cfg:
+@@ -6077,7 +6082,7 @@ error_failed_kzalloc:
+ return ret;
+ }
+
+-static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
++static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
+ bool reset)
+ {
+ struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
+@@ -6085,24 +6090,78 @@ static void hns_roce_hw_v2_uninit_instan
+ if (!hr_dev)
+ return;
+
++ handle->priv = NULL;
+ hns_roce_exit(hr_dev);
+ kfree(hr_dev->priv);
+ ib_dealloc_device(&hr_dev->ib_dev);
+ }
+
++static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
++{
++ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++ struct device *dev = &handle->pdev->dev;
++ int ret;
++
++ handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
++
++ if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
++ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
++ goto reset_chk_err;
++ }
++
++ ret = __hns_roce_hw_v2_init_instance(handle);
++ if (ret) {
++ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
++ dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
++ if (ops->ae_dev_resetting(handle) ||
++ ops->get_hw_reset_stat(handle))
++ goto reset_chk_err;
++ else
++ return ret;
++ }
++
++ handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
++
++
++ return 0;
++
++reset_chk_err:
++ dev_err(dev, "Device is busy in resetting state.\n"
++ "please retry later.\n");
++
++ return -EBUSY;
++}
++
++static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
++ bool reset)
++{
++ if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
++ return;
++
++ handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
++
++ __hns_roce_hw_v2_uninit_instance(handle, reset);
++
++ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
++}
+ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
+ {
+- struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
++ struct hns_roce_dev *hr_dev;
+ struct ib_event event;
+
+- if (!hr_dev) {
+- dev_err(&handle->pdev->dev,
+- "Input parameter handle->priv is NULL!\n");
+- return -EINVAL;
++ if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
++ set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
++ return 0;
+ }
+
++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
++ clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
++
++ hr_dev = (struct hns_roce_dev *)handle->priv;
++ if (!hr_dev)
++ return 0;
++
+ hr_dev->active = false;
+- hr_dev->is_reset = true;
+
+ event.event = IB_EVENT_DEVICE_FATAL;
+ event.device = &hr_dev->ib_dev;
+@@ -6114,17 +6173,29 @@ static int hns_roce_hw_v2_reset_notify_d
+
+ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
+ {
++ struct device *dev = &handle->pdev->dev;
+ int ret;
+
+- ret = hns_roce_hw_v2_init_instance(handle);
++ if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
++ &handle->rinfo.state)) {
++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
++ return 0;
++ }
++
++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
++
++ dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
++ ret = __hns_roce_hw_v2_init_instance(handle);
+ if (ret) {
+ /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
+ * callback function, RoCE Engine reinitialize. If RoCE reinit
+ * failed, we should inform NIC driver.
+ */
+ handle->priv = NULL;
+- dev_err(&handle->pdev->dev,
+- "In reset process RoCE reinit failed %d.\n", ret);
++ dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
++ } else {
++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
++ dev_info(dev, "Reset done, RoCE client reinit finished.\n");
+ }
+
+ return ret;
+@@ -6132,8 +6203,14 @@ static int hns_roce_hw_v2_reset_notify_i
+
+ static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
+ {
++ if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
++ return 0;
++
++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
++ dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
+ msleep(100);
+- hns_roce_hw_v2_uninit_instance(handle, false);
++ __hns_roce_hw_v2_uninit_instance(handle, false);
++
+ return 0;
+ }
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -1602,6 +1602,7 @@ struct hns_roce_link_table_entry {
+ #define HNS_ROCE_LINK_TABLE_NXT_PTR_M GENMASK(31, 20)
+
+ struct hns_roce_v2_priv {
++ struct hnae3_handle *handle;
+ struct hns_roce_v2_cmq cmq;
+ struct hns_roce_link_table tsq;
+ struct hns_roce_link_table tpq;
diff --git a/patches.drivers/RDMA-hns-Fix-the-bug-with-updating-rq-head-pointer-w.patch b/patches.drivers/RDMA-hns-Fix-the-bug-with-updating-rq-head-pointer-w.patch
new file mode 100644
index 0000000000..25bd379ed6
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Fix-the-bug-with-updating-rq-head-pointer-w.patch
@@ -0,0 +1,48 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Wed, 12 Dec 2018 17:49:06 +0800
+Subject: RDMA/hns: Fix the bug with updating rq head pointer when flush cqe
+Patch-mainline: v5.1-rc1
+Git-commit: 9c6ccc035c209dda07685e8dba829a203ba17499
+References: bsc#1104427 FATE#326416 bsc#1137233
+
+When flush cqe with srq, the driver disable to update the rq head pointer
+into the hardware.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3789,13 +3789,16 @@ static int hns_roce_v2_modify_qp(struct
+ roce_set_field(qpc_mask->byte_160_sq_ci_pi,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
+- roce_set_field(context->byte_84_rq_ci_pi,
++
++ if (!ibqp->srq) {
++ roce_set_field(context->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
+ hr_qp->rq.head);
+- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
++ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
++ }
+ }
+
+ if (attr_mask & IB_QP_AV) {
+@@ -4281,7 +4284,8 @@ static void hns_roce_set_qps_to_err(stru
+ if (hr_qp->ibqp.uobject) {
+ if (hr_qp->sdb_en == 1) {
+ hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
+- hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
++ if (hr_qp->rdb_en == 1)
++ hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
+ } else {
+ dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
+ return;
diff --git a/patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-door.patch b/patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-door.patch
new file mode 100644
index 0000000000..b0f860e779
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-door.patch
@@ -0,0 +1,168 @@
+From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
+Date: Sun, 3 Feb 2019 20:43:15 +0800
+Subject: RDMA/hns: Fix the chip hanging caused by sending doorbell during
+ reset
+Patch-mainline: v5.1-rc1
+Git-commit: d3743fa94ccd177917783726faf54632439ddb54
+References: bsc#1104427 FATE#326416 bsc#1137232
+
+On hi08 chip, There is a possibility of chip hanging when sending doorbell
+during reset. We can fix it by prohibiting doorbell during reset.
+
+Fixes: 2d40788825ac ("RDMA/hns: Add support for processing send wr and receive wr")
+Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_device.h | 1 +
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 25 ++++++++++++++++---------
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 11 +++++++++++
+ 3 files changed, 28 insertions(+), 9 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -946,6 +946,7 @@ struct hns_roce_dev {
+ spinlock_t bt_cmd_lock;
+ bool active;
+ bool is_reset;
++ bool dis_db;
+ unsigned long reset_cnt;
+ struct hns_roce_ib_iboe iboe;
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -587,7 +587,7 @@ out:
+ roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
+ V2_DB_PARAMETER_SL_S, qp->sl);
+
+- hns_roce_write64_k((__le32 *)&sq_db, qp->sq.db_reg_l);
++ hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
+
+ qp->sq_next_wqe = ind;
+ qp->next_sge = sge_ind;
+@@ -717,7 +717,7 @@ static int hns_roce_v2_cmd_hw_reseted(st
+ unsigned long reset_stage)
+ {
+ /* When hardware reset has been completed once or more, we should stop
+- * sending mailbox&cmq to hardware. If now in .init_instance()
++ * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
+ * function, we should exit with error. If now at HNAE3_INIT_CLIENT
+ * stage of soft reset process, we should exit with error, and then
+ * HNAE3_INIT_CLIENT related process can rollback the operation like
+@@ -726,6 +726,7 @@ static int hns_roce_v2_cmd_hw_reseted(st
+ * reset process once again.
+ */
+ hr_dev->is_reset = true;
++ hr_dev->dis_db = true;
+
+ if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
+ instance_stage == HNS_ROCE_STATE_INIT)
+@@ -742,8 +743,8 @@ static int hns_roce_v2_cmd_hw_resetting(
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+- /* When hardware reset is detected, we should stop sending mailbox&cmq
+- * to hardware. If now in .init_instance() function, we should
++ /* When hardware reset is detected, we should stop sending mailbox&cmq&
++ * doorbell to hardware. If now in .init_instance() function, we should
+ * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
+ * process, we should exit with error, and then HNAE3_INIT_CLIENT
+ * related process can rollback the operation like notifing hardware to
+@@ -751,6 +752,7 @@ static int hns_roce_v2_cmd_hw_resetting(
+ * error to notify NIC driver to reschedule soft reset process once
+ * again.
+ */
++ hr_dev->dis_db = true;
+ if (!ops->get_hw_reset_stat(handle))
+ hr_dev->is_reset = true;
+
+@@ -768,9 +770,10 @@ static int hns_roce_v2_cmd_sw_resetting(
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ /* When software reset is detected at .init_instance() function, we
+- * should stop sending mailbox&cmq to hardware, and exit with
+- * error.
++ * should stop sending mailbox&cmq&doorbell to hardware, and exit
++ * with error.
+ */
++ hr_dev->dis_db = true;
+ if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
+ hr_dev->is_reset = true;
+
+@@ -2495,6 +2498,7 @@ static void hns_roce_v2_write_cqc(struct
+ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
+ enum ib_cq_notify_flags flags)
+ {
++ struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
+ struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
+ u32 notification_flag;
+ u32 doorbell[2];
+@@ -2520,7 +2524,7 @@ static int hns_roce_v2_req_notify_cq(str
+ roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
+ notification_flag);
+
+- hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
++ hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
+
+ return 0;
+ }
+@@ -4763,6 +4767,7 @@ static void hns_roce_v2_init_irq_work(st
+
+ static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
+ {
++ struct hns_roce_dev *hr_dev = eq->hr_dev;
+ u32 doorbell[2];
+
+ doorbell[0] = 0;
+@@ -4789,7 +4794,7 @@ static void set_eq_cons_index_v2(struct
+ HNS_ROCE_V2_EQ_DB_PARA_S,
+ (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
+
+- hns_roce_write64_k(doorbell, eq->doorbell);
++ hns_roce_write64(hr_dev, doorbell, eq->doorbell);
+ }
+
+ static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
+@@ -6011,6 +6016,7 @@ static int hns_roce_v2_post_srq_recv(str
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+ {
++ struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
+ struct hns_roce_srq *srq = to_hr_srq(ibsrq);
+ struct hns_roce_v2_wqe_data_seg *dseg;
+ struct hns_roce_v2_db srq_db;
+@@ -6072,7 +6078,7 @@ static int hns_roce_v2_post_srq_recv(str
+ srq_db.byte_4 = HNS_ROCE_V2_SRQ_DB << 24 | srq->srqn;
+ srq_db.parameter = srq->head;
+
+- hns_roce_write64_k((__le32 *)&srq_db, srq->db_reg_l);
++ hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
+
+ }
+
+@@ -6291,6 +6297,7 @@ static int hns_roce_hw_v2_reset_notify_d
+ return 0;
+
+ hr_dev->active = false;
++ hr_dev->dis_db = true;
+
+ event.event = IB_EVENT_DEVICE_FATAL;
+ event.device = &hr_dev->ib_dev;
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -1799,4 +1799,15 @@ struct hns_roce_sccc_clr_done {
+ __le32 rsv[5];
+ };
+
++static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2],
++ void __iomem *dest)
++{
++ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
++ struct hnae3_handle *handle = priv->handle;
++ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++
++ if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle))
++ hns_roce_write64_k(val, dest);
++}
++
+ #endif
diff --git a/patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-mail.patch b/patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-mail.patch
new file mode 100644
index 0000000000..529996624b
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-mail.patch
@@ -0,0 +1,285 @@
+From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
+Date: Sun, 3 Feb 2019 20:43:14 +0800
+Subject: RDMA/hns: Fix the chip hanging caused by sending mailbox&CMQ during
+ reset
+Patch-mainline: v5.1-rc1
+Git-commit: 6a04aed6afaefd5fd396f23da184298135f31e37
+References: bsc#1104427 FATE#326416 bsc#1137232
+
+On hi08 chip, There is a possibility of chip hanging and some errors when
+sending mailbox & doorbell during reset. We can fix it by prohibiting
+mailbox and doorbell during reset and reset occurred to ensure that
+hardware can work normally.
+
+Fixes: a04ff739f2a9 ("RDMA/hns: Add command queue support for hip08 RoCE driver")
+Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_cmd.c | 32 ++++--
+ drivers/infiniband/hw/hns/hns_roce_device.h | 7 +
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 139 ++++++++++++++++++++++++++--
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 2
+ 4 files changed, 167 insertions(+), 13 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
++++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
+@@ -176,17 +176,33 @@ int hns_roce_cmd_mbox(struct hns_roce_de
+ unsigned long in_modifier, u8 op_modifier, u16 op,
+ unsigned long timeout)
+ {
+- if (hr_dev->is_reset)
+- return 0;
++ int ret;
++
++ if (hr_dev->hw->rst_prc_mbox) {
++ ret = hr_dev->hw->rst_prc_mbox(hr_dev);
++ if (ret == CMD_RST_PRC_SUCCESS)
++ return 0;
++ else if (ret == CMD_RST_PRC_EBUSY)
++ return -EBUSY;
++ }
+
+ if (hr_dev->cmd.use_events)
+- return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
+- in_modifier, op_modifier, op,
+- timeout);
++ ret = hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
++ in_modifier, op_modifier, op,
++ timeout);
+ else
+- return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
+- in_modifier, op_modifier, op,
+- timeout);
++ ret = hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
++ in_modifier, op_modifier, op,
++ timeout);
++
++ if (ret == CMD_RST_PRC_EBUSY)
++ return -EBUSY;
++
++ if (ret && (hr_dev->hw->rst_prc_mbox &&
++ hr_dev->hw->rst_prc_mbox(hr_dev) == CMD_RST_PRC_SUCCESS))
++ return 0;
++
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(hns_roce_cmd_mbox);
+
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -237,6 +237,12 @@ enum {
+ HNS_ROCE_RST_DIRECT_RETURN = 0,
+ };
+
++enum {
++ CMD_RST_PRC_OTHERS,
++ CMD_RST_PRC_SUCCESS,
++ CMD_RST_PRC_EBUSY,
++};
++
+ #define HNS_ROCE_CMD_SUCCESS 1
+
+ #define HNS_ROCE_PORT_DOWN 0
+@@ -875,6 +881,7 @@ struct hns_roce_hw {
+ u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
+ u16 token, int event);
+ int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout);
++ int (*rst_prc_mbox)(struct hns_roce_dev *hr_dev);
+ int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
+ const union ib_gid *gid, const struct ib_gid_attr *attr);
+ int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -712,6 +712,110 @@ out:
+ return ret;
+ }
+
++static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
++ unsigned long instance_stage,
++ unsigned long reset_stage)
++{
++ /* When hardware reset has been completed once or more, we should stop
++ * sending mailbox&cmq to hardware. If now in .init_instance()
++ * function, we should exit with error. If now at HNAE3_INIT_CLIENT
++ * stage of soft reset process, we should exit with error, and then
++ * HNAE3_INIT_CLIENT related process can rollback the operation like
++ * notifing hardware to free resources, HNAE3_INIT_CLIENT related
++ * process will exit with error to notify NIC driver to reschedule soft
++ * reset process once again.
++ */
++ hr_dev->is_reset = true;
++
++ if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
++ instance_stage == HNS_ROCE_STATE_INIT)
++ return CMD_RST_PRC_EBUSY;
++
++ return CMD_RST_PRC_SUCCESS;
++}
++
++static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
++ unsigned long instance_stage,
++ unsigned long reset_stage)
++{
++ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
++ struct hnae3_handle *handle = priv->handle;
++ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++
++ /* When hardware reset is detected, we should stop sending mailbox&cmq
++ * to hardware. If now in .init_instance() function, we should
++ * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
++ * process, we should exit with error, and then HNAE3_INIT_CLIENT
++ * related process can rollback the operation like notifing hardware to
++ * free resources, HNAE3_INIT_CLIENT related process will exit with
++ * error to notify NIC driver to reschedule soft reset process once
++ * again.
++ */
++ if (!ops->get_hw_reset_stat(handle))
++ hr_dev->is_reset = true;
++
++ if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
++ instance_stage == HNS_ROCE_STATE_INIT)
++ return CMD_RST_PRC_EBUSY;
++
++ return CMD_RST_PRC_SUCCESS;
++}
++
++static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
++{
++ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
++ struct hnae3_handle *handle = priv->handle;
++ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++
++ /* When software reset is detected at .init_instance() function, we
++ * should stop sending mailbox&cmq to hardware, and exit with
++ * error.
++ */
++ if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
++ hr_dev->is_reset = true;
++
++ return CMD_RST_PRC_EBUSY;
++}
++
++static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
++{
++ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
++ struct hnae3_handle *handle = priv->handle;
++ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++ unsigned long instance_stage; /* the current instance stage */
++ unsigned long reset_stage; /* the current reset stage */
++ unsigned long reset_cnt;
++ bool sw_resetting;
++ bool hw_resetting;
++
++ if (hr_dev->is_reset)
++ return CMD_RST_PRC_SUCCESS;
++
++ /* Get information about reset from NIC driver or RoCE driver itself,
++ * the meaning of the following variables from NIC driver are described
++ * as below:
++ * reset_cnt -- The count value of completed hardware reset.
++ * hw_resetting -- Whether hardware device is resetting now.
++ * sw_resetting -- Whether NIC's software reset process is running now.
++ */
++ instance_stage = handle->rinfo.instance_state;
++ reset_stage = handle->rinfo.reset_state;
++ reset_cnt = ops->ae_dev_reset_cnt(handle);
++ hw_resetting = ops->get_hw_reset_stat(handle);
++ sw_resetting = ops->ae_dev_resetting(handle);
++
++ if (reset_cnt != hr_dev->reset_cnt)
++ return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
++ reset_stage);
++ else if (hw_resetting)
++ return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
++ reset_stage);
++ else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
++ return hns_roce_v2_cmd_sw_resetting(hr_dev);
++
++ return 0;
++}
++
+ static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
+ {
+ int ntu = ring->next_to_use;
+@@ -892,8 +996,8 @@ static int hns_roce_cmq_csq_clean(struct
+ return clean;
+ }
+
+-static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+- struct hns_roce_cmq_desc *desc, int num)
++static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
++ struct hns_roce_cmq_desc *desc, int num)
+ {
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
+@@ -905,9 +1009,6 @@ static int hns_roce_cmq_send(struct hns_
+ int ret = 0;
+ int ntc;
+
+- if (hr_dev->is_reset)
+- return 0;
+-
+ spin_lock_bh(&csq->lock);
+
+ if (num > hns_roce_cmq_space(csq)) {
+@@ -982,6 +1083,30 @@ static int hns_roce_cmq_send(struct hns_
+ return ret;
+ }
+
++int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
++ struct hns_roce_cmq_desc *desc, int num)
++{
++ int retval;
++ int ret;
++
++ ret = hns_roce_v2_rst_process_cmd(hr_dev);
++ if (ret == CMD_RST_PRC_SUCCESS)
++ return 0;
++ if (ret == CMD_RST_PRC_EBUSY)
++ return ret;
++
++ ret = __hns_roce_cmq_send(hr_dev, desc, num);
++ if (ret) {
++ retval = hns_roce_v2_rst_process_cmd(hr_dev);
++ if (retval == CMD_RST_PRC_SUCCESS)
++ return 0;
++ else if (retval == CMD_RST_PRC_EBUSY)
++ return retval;
++ }
++
++ return ret;
++}
++
+ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
+ {
+ struct hns_roce_query_version *resp;
+@@ -1857,6 +1982,9 @@ static int hns_roce_v2_chk_mbox(struct h
+
+ status = hns_roce_v2_cmd_complete(hr_dev);
+ if (status != 0x1) {
++ if (status == CMD_RST_PRC_EBUSY)
++ return status;
++
+ dev_err(dev, "mailbox status 0x%x!\n", status);
+ return -EBUSY;
+ }
+@@ -5961,6 +6089,7 @@ static const struct hns_roce_hw hns_roce
+ .hw_exit = hns_roce_v2_exit,
+ .post_mbox = hns_roce_v2_post_mbox,
+ .chk_mbox = hns_roce_v2_chk_mbox,
++ .rst_prc_mbox = hns_roce_v2_rst_process_cmd,
+ .set_gid = hns_roce_v2_set_gid,
+ .set_mac = hns_roce_v2_set_mac,
+ .write_mtpt = hns_roce_v2_write_mtpt,
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -96,6 +96,8 @@
+ #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
+ #define HNS_ROCE_V2_RSV_QPS 8
+
++#define HNS_ROCE_V2_HW_RST_TIMEOUT 1000
++
+ #define HNS_ROCE_CONTEXT_HOP_NUM 1
+ #define HNS_ROCE_SCCC_HOP_NUM 1
+ #define HNS_ROCE_MTT_HOP_NUM 1
diff --git a/patches.drivers/RDMA-hns-Fix-the-state-of-rereg-mr.patch b/patches.drivers/RDMA-hns-Fix-the-state-of-rereg-mr.patch
new file mode 100644
index 0000000000..40bf3f4bf9
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Fix-the-state-of-rereg-mr.patch
@@ -0,0 +1,29 @@
+From: Yixian Liu <liuyixian@huawei.com>
+Date: Sun, 3 Feb 2019 16:13:04 +0800
+Subject: RDMA/hns: Fix the state of rereg mr
+Patch-mainline: v5.1-rc1
+Git-commit: ab22bf05216a6bb4812448f3a8609489047cf311
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+The state of mr after reregister operation should be set to valid
+state. Otherwise, it will keep the same as the state before reregistered.
+
+Signed-off-by: Yixian Liu <liuyixian@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -2195,6 +2195,9 @@ static int hns_roce_v2_rereg_write_mtpt(
+ struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
+ int ret = 0;
+
++ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
++ V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
++
+ if (flags & IB_MR_REREG_PD) {
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
+ V2_MPT_BYTE_4_PD_S, pdn);
diff --git a/patches.drivers/RDMA-hns-Hide-error-print-information-with-roce-vf-d.patch b/patches.drivers/RDMA-hns-Hide-error-print-information-with-roce-vf-d.patch
new file mode 100644
index 0000000000..6460ca7056
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Hide-error-print-information-with-roce-vf-d.patch
@@ -0,0 +1,54 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:25 +0800
+Subject: RDMA/hns: Hide error print information with roce vf device
+Patch-mainline: v5.2-rc1
+Git-commit: 07c2339a91c1ec3a8b8ada00361eced7b153ec0c
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+The driver should not print the error information when the hip08 driver
+not support virtual function.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -6123,15 +6123,8 @@ static int hns_roce_hw_v2_get_cfg(struct
+ struct hnae3_handle *handle)
+ {
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+- const struct pci_device_id *id;
+ int i;
+
+- id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
+- if (!id) {
+- dev_err(hr_dev->dev, "device is not compatible!\n");
+- return -ENXIO;
+- }
+-
+ hr_dev->hw = &hns_roce_hw_v2;
+ hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
+ hr_dev->odb_offset = hr_dev->sdb_offset;
+@@ -6219,6 +6212,7 @@ static void __hns_roce_hw_v2_uninit_inst
+ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
+ {
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++ const struct pci_device_id *id;
+ struct device *dev = &handle->pdev->dev;
+ int ret;
+
+@@ -6229,6 +6223,10 @@ static int hns_roce_hw_v2_init_instance(
+ goto reset_chk_err;
+ }
+
++ id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
++ if (!id)
++ return 0;
++
+ ret = __hns_roce_hw_v2_init_instance(handle);
+ if (ret) {
+ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
diff --git a/patches.drivers/RDMA-hns-Limit-minimum-ROCE-CQ-depth-to-64.patch b/patches.drivers/RDMA-hns-Limit-minimum-ROCE-CQ-depth-to-64.patch
new file mode 100644
index 0000000000..7a6da7c908
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Limit-minimum-ROCE-CQ-depth-to-64.patch
@@ -0,0 +1,27 @@
+From: chenglang <chenglang@huawei.com>
+Date: Sun, 3 Feb 2019 16:13:03 +0800
+Subject: RDMA/hns: Limit minimum ROCE CQ depth to 64
+Patch-mainline: v5.1-rc1
+Git-commit: 704e0e613a6d584fde1c80ead0329e918b4f8671
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+This patch modifies the minimum CQ depth specification of hip08 and is
+consistent with the processing of hip06.
+
+Signed-off-by: chenglang <chenglang@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1512,6 +1512,7 @@ static int hns_roce_v2_profile(struct hn
+ caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
+ caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
+ caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
++ caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
+ caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
+ caps->max_srqwqes = HNS_ROCE_V2_MAX_SRQWQE_NUM;
+ caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
diff --git a/patches.drivers/RDMA-hns-Limit-scope-of-hns_roce_cmq_send.patch b/patches.drivers/RDMA-hns-Limit-scope-of-hns_roce_cmq_send.patch
new file mode 100644
index 0000000000..8c19fffea0
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Limit-scope-of-hns_roce_cmq_send.patch
@@ -0,0 +1,34 @@
+From: Leon Romanovsky <leonro@mellanox.com>
+Date: Tue, 19 Mar 2019 11:10:09 +0200
+Subject: RDMA/hns: Limit scope of hns_roce_cmq_send()
+Patch-mainline: v5.2-rc1
+Git-commit: e95e52a1788d4a8af547261875c0fbae2e6e3028
+References: bsc#1104427 FATE#326416
+
+The forgotten static keyword causes to the following error to appear while
+building HNS driver. Declare hns_roce_cmq_send() to be static function to
+fix this warning.
+
+drivers/infiniband/hw/hns/hns_roce_hw_v2.c:1089:5: warning: no previous
+prototype for _hns_roce_cmq_send_ [-Wmissing-prototypes]
+ int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+
+Fixes: 6a04aed6afae ("RDMA/hns: Fix the chip hanging caused by sending mailbox&CMQ during reset")
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1086,7 +1086,7 @@ static int __hns_roce_cmq_send(struct hn
+ return ret;
+ }
+
+-int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
++static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc, int num)
+ {
+ int retval;
diff --git a/patches.drivers/RDMA-hns-Make-some-function-static.patch b/patches.drivers/RDMA-hns-Make-some-function-static.patch
new file mode 100644
index 0000000000..95f39ddb79
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Make-some-function-static.patch
@@ -0,0 +1,60 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Fri, 1 Feb 2019 11:11:04 +0800
+Subject: RDMA/hns: Make some function static
+Patch-mainline: v5.1-rc1
+Git-commit: c3c668e742397dfc107e44c09606cc68b37df30d
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+Fixes the following sparse warnings:
+
+drivers/infiniband/hw/hns/hns_roce_hw_v2.c:5822:5: warning:
+ symbol 'hns_roce_v2_query_srq' was not declared. Should it be static?
+drivers/infiniband/hw/hns/hns_roce_srq.c:158:6: warning:
+ symbol 'hns_roce_srq_free' was not declared. Should it be static?
+drivers/infiniband/hw/hns/hns_roce_srq.c:81:5: warning:
+ symbol 'hns_roce_srq_alloc' was not declared. Should it be static?
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +-
+ drivers/infiniband/hw/hns/hns_roce_srq.c | 9 +++++----
+ 2 files changed, 6 insertions(+), 5 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -5818,7 +5818,7 @@ static int hns_roce_v2_modify_srq(struct
+ return 0;
+ }
+
+-int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
++static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+ {
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
+ struct hns_roce_srq *srq = to_hr_srq(ibsrq);
+--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
+@@ -79,9 +79,9 @@ static int hns_roce_hw2sw_srq(struct hns
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ }
+
+-int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn, u16 xrcd,
+- struct hns_roce_mtt *hr_mtt, u64 db_rec_addr,
+- struct hns_roce_srq *srq)
++static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
++ u16 xrcd, struct hns_roce_mtt *hr_mtt,
++ u64 db_rec_addr, struct hns_roce_srq *srq)
+ {
+ struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
+ struct hns_roce_cmd_mailbox *mailbox;
+@@ -160,7 +160,8 @@ err_out:
+ return ret;
+ }
+
+-void hns_roce_srq_free(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
++static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
++ struct hns_roce_srq *srq)
+ {
+ struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
+ int ret;
diff --git a/patches.drivers/RDMA-hns-Modify-qp-cq-pd-specification-according-to-.patch b/patches.drivers/RDMA-hns-Modify-qp-cq-pd-specification-according-to-.patch
new file mode 100644
index 0000000000..09e16f07be
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Modify-qp-cq-pd-specification-according-to-.patch
@@ -0,0 +1,48 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 16 Feb 2019 20:10:24 +0800
+Subject: RDMA/hns: Modify qp&cq&pd specification according to UM
+Patch-mainline: v5.1-rc1
+Git-commit: 3e394f9413ecba2779b6a1d77095f4d8611a52d2
+References: bsc#1104427 FATE#326416 bsc#1137233
+
+Accroding to hip08's limitation, qp&cq specification is 1M, mtpt
+specification 1M in kernel space.
+
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -45,14 +45,14 @@
+ #define HNS_ROCE_VF_SGID_NUM 32
+ #define HNS_ROCE_VF_SL_NUM 8
+
+-#define HNS_ROCE_V2_MAX_QP_NUM 0x2000
+-#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
++#define HNS_ROCE_V2_MAX_QP_NUM 0x100000
++#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
+ #define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
+ #define HNS_ROCE_V2_MAX_SRQ 0x100000
+ #define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
+ #define HNS_ROCE_V2_MAX_SRQ_SGE 0x100
+-#define HNS_ROCE_V2_MAX_CQ_NUM 0x8000
+-#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
++#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
++#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
+ #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
+ #define HNS_ROCE_V2_MAX_CQE_NUM 0x10000
+ #define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000
+@@ -67,7 +67,7 @@
+ #define HNS_ROCE_V2_COMP_VEC_NUM 63
+ #define HNS_ROCE_V2_AEQE_VEC_NUM 1
+ #define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
+-#define HNS_ROCE_V2_MAX_MTPT_NUM 0x8000
++#define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000
+ #define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000
+ #define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000
+ #define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
diff --git a/patches.drivers/RDMA-hns-Modify-the-pbl-ba-page-size-for-hip08.patch b/patches.drivers/RDMA-hns-Modify-the-pbl-ba-page-size-for-hip08.patch
new file mode 100644
index 0000000000..cf9eae1d3f
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Modify-the-pbl-ba-page-size-for-hip08.patch
@@ -0,0 +1,28 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Wed, 12 Dec 2018 17:49:09 +0800
+Subject: RDMA/hns: Modify the pbl ba page size for hip08
+Patch-mainline: v5.1-rc1
+Git-commit: 91fb4d83b88a7b544ce564c44167aad29d4154f0
+References: bsc#1104427 FATE#326416 bsc#1137233
+
+Modify the pbl ba page size to 16K for in order to support 4G MR size.
+
+Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1366,7 +1366,7 @@ static int hns_roce_v2_profile(struct hn
+ caps->mpt_ba_pg_sz = 0;
+ caps->mpt_buf_pg_sz = 0;
+ caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
+- caps->pbl_ba_pg_sz = 0;
++ caps->pbl_ba_pg_sz = 2;
+ caps->pbl_buf_pg_sz = 0;
+ caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
+ caps->mtt_ba_pg_sz = 0;
diff --git a/patches.drivers/RDMA-hns-Move-spin_lock_irqsave-to-the-correct-place.patch b/patches.drivers/RDMA-hns-Move-spin_lock_irqsave-to-the-correct-place.patch
new file mode 100644
index 0000000000..3a110d3ffd
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Move-spin_lock_irqsave-to-the-correct-place.patch
@@ -0,0 +1,92 @@
+From: Lang Cheng <chenglang@huawei.com>
+Date: Fri, 24 May 2019 15:31:22 +0800
+Subject: RDMA/hns: Move spin_lock_irqsave to the correct place
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
+Git-commit: 780f33962ef27d7f27c6b47a55593c6ffd357922
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+When hip08 set gid, it will call spin_unlock_bh when send cmq. if main.ko
+call spin_lock_irqsave firstly, and the kernel is before commit
+f71b74bca637 ("irq/softirqs: Use lockdep to assert IRQs are
+disabled/enabled"), it will cause WARN_ON_ONCE because of calling
+spin_unlock_bh in disable context.
+
+In fact, the spin_lock_irqsave in main.ko is only used for hip06, and
+should be placed in hns_roce_hw_v1.c. hns_roce_hw_v2.c uses its own
+spin_unlock_bh and does not need main.ko manage spin_lock.
+
+Signed-off-by: Lang Cheng <chenglang@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 5 +++++
+ drivers/infiniband/hw/hns/hns_roce_main.c | 10 ----------
+ 2 files changed, 5 insertions(+), 10 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+@@ -1780,11 +1780,14 @@ static int hns_roce_v1_set_gid(struct hn
+ int gid_index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr)
+ {
++ unsigned long flags;
+ u32 *p = NULL;
+ u8 gid_idx = 0;
+
+ gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
+
++ spin_lock_irqsave(&hr_dev->iboe.lock, flags);
++
+ p = (u32 *)&gid->raw[0];
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
+ (HNS_ROCE_V1_GID_NUM * gid_idx));
+@@ -1801,6 +1804,8 @@ static int hns_roce_v1_set_gid(struct hn
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
+ (HNS_ROCE_V1_GID_NUM * gid_idx));
+
++ spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
++
+ return 0;
+ }
+
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -78,18 +78,13 @@ static int hns_roce_add_gid(const struct
+ {
+ struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
+ u8 port = attr->port_num - 1;
+- unsigned long flags;
+ int ret;
+
+ if (port >= hr_dev->caps.num_ports)
+ return -EINVAL;
+
+- spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+-
+ ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr);
+
+- spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+-
+ return ret;
+ }
+
+@@ -98,18 +93,13 @@ static int hns_roce_del_gid(const struct
+ struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
+ struct ib_gid_attr zattr = { };
+ u8 port = attr->port_num - 1;
+- unsigned long flags;
+ int ret;
+
+ if (port >= hr_dev->caps.num_ports)
+ return -EINVAL;
+
+- spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+-
+ ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &zgid, &zattr);
+
+- spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+-
+ return ret;
+ }
+
diff --git a/patches.drivers/RDMA-hns-Only-assgin-some-fields-if-the-relatived-at.patch b/patches.drivers/RDMA-hns-Only-assgin-some-fields-if-the-relatived-at.patch
new file mode 100644
index 0000000000..1f92cbf96e
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Only-assgin-some-fields-if-the-relatived-at.patch
@@ -0,0 +1,248 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:24 +0800
+Subject: RDMA/hns: Only assgin some fields if the relatived attr_mask is set
+Patch-mainline: v5.2-rc1
+Git-commit: 5b01b243b0b3725b4460e8924e1f105bb4038969
+References: bsc#1104427 FATE#326416
+
+According to IB protocol, some fields of qp context are filled with
+optional when the relatived attr_mask are set. The relatived attr_mask
+include IB_QP_TIMEOUT, IB_QP_RETRY_CNT, IB_QP_RNR_RETRY and
+IB_QP_MIN_RNR_TIMER. Besides, we move some assignments of the fields of
+qp context into the outside of the specific qp state jump function.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 152 +++++++++++++++--------------
+ 1 file changed, 81 insertions(+), 71 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3176,12 +3176,6 @@ static void modify_qp_reset_to_init(stru
+ roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
+ roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
+
+- if (attr_mask & IB_QP_QKEY) {
+- context->qkey_xrcd = attr->qkey;
+- qpc_mask->qkey_xrcd = 0;
+- hr_qp->qkey = attr->qkey;
+- }
+-
+ if (hr_qp->rdb_en) {
+ roce_set_bit(context->byte_68_rq_db,
+ V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
+@@ -3393,7 +3387,6 @@ static void modify_qp_reset_to_init(stru
+ 0);
+
+ hr_qp->access_flags = attr->qp_access_flags;
+- hr_qp->pkey_index = attr->pkey_index;
+ roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
+ V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
+ roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
+@@ -3517,11 +3510,6 @@ static void modify_qp_init_to_init(struc
+ V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
+ }
+
+- if (attr_mask & IB_QP_QKEY) {
+- context->qkey_xrcd = attr->qkey;
+- qpc_mask->qkey_xrcd = 0;
+- }
+-
+ roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
+ V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
+ roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
+@@ -3641,13 +3629,6 @@ static int modify_qp_init_to_rtr(struct
+ V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
+ V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
+
+- roce_set_field(context->byte_80_rnr_rx_cqn,
+- V2_QPC_BYTE_80_MIN_RNR_TIME_M,
+- V2_QPC_BYTE_80_MIN_RNR_TIME_S, attr->min_rnr_timer);
+- roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
+- V2_QPC_BYTE_80_MIN_RNR_TIME_M,
+- V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
+-
+ page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
+ context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size]
+ >> PAGE_ADDR_SHIFT);
+@@ -3713,15 +3694,6 @@ static int modify_qp_init_to_rtr(struct
+ roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
+ }
+
+- if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
+- attr->max_dest_rd_atomic) {
+- roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+- V2_QPC_BYTE_140_RR_MAX_S,
+- fls(attr->max_dest_rd_atomic - 1));
+- roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+- V2_QPC_BYTE_140_RR_MAX_S, 0);
+- }
+-
+ if (attr_mask & IB_QP_DEST_QPN) {
+ roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+ V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
+@@ -3902,57 +3874,14 @@ static int modify_qp_rtr_to_rts(struct i
+ roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
+ V2_QPC_BYTE_212_CHECK_FLG_S, 0);
+
+- roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
+- V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
+- roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
+- V2_QPC_BYTE_212_RETRY_CNT_S, 0);
+-
+- roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
+- V2_QPC_BYTE_212_RETRY_NUM_INIT_S, attr->retry_cnt);
+- roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
+- V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
+-
+- roce_set_field(context->byte_244_rnr_rxack,
+- V2_QPC_BYTE_244_RNR_NUM_INIT_M,
+- V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
+- roce_set_field(qpc_mask->byte_244_rnr_rxack,
+- V2_QPC_BYTE_244_RNR_NUM_INIT_M,
+- V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
+-
+- roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
+- V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
+- roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
+- V2_QPC_BYTE_244_RNR_CNT_S, 0);
+-
+ roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
+ V2_QPC_BYTE_212_LSN_S, 0x100);
+ roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
+ V2_QPC_BYTE_212_LSN_S, 0);
+
+- if (attr_mask & IB_QP_TIMEOUT) {
+- if (attr->timeout < 31) {
+- roce_set_field(context->byte_28_at_fl,
+- V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
+- attr->timeout);
+- roce_set_field(qpc_mask->byte_28_at_fl,
+- V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
+- 0);
+- } else {
+- dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
+- }
+- }
+-
+ roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
+ V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
+
+- if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
+- roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+- V2_QPC_BYTE_208_SR_MAX_S,
+- fls(attr->max_rd_atomic - 1));
+- roce_set_field(qpc_mask->byte_208_irrl,
+- V2_QPC_BYTE_208_SR_MAX_M,
+- V2_QPC_BYTE_208_SR_MAX_S, 0);
+- }
+ return 0;
+ }
+
+@@ -4146,6 +4075,53 @@ static int hns_roce_v2_modify_qp(struct
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+ }
+
++ if (attr_mask & IB_QP_TIMEOUT) {
++ if (attr->timeout < 31) {
++ roce_set_field(context->byte_28_at_fl,
++ V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
++ attr->timeout);
++ roce_set_field(qpc_mask->byte_28_at_fl,
++ V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
++ 0);
++ } else {
++ dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
++ }
++ }
++
++ if (attr_mask & IB_QP_RETRY_CNT) {
++ roce_set_field(context->byte_212_lsn,
++ V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
++ V2_QPC_BYTE_212_RETRY_NUM_INIT_S,
++ attr->retry_cnt);
++ roce_set_field(qpc_mask->byte_212_lsn,
++ V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
++ V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
++
++ roce_set_field(context->byte_212_lsn,
++ V2_QPC_BYTE_212_RETRY_CNT_M,
++ V2_QPC_BYTE_212_RETRY_CNT_S,
++ attr->retry_cnt);
++ roce_set_field(qpc_mask->byte_212_lsn,
++ V2_QPC_BYTE_212_RETRY_CNT_M,
++ V2_QPC_BYTE_212_RETRY_CNT_S, 0);
++ }
++
++ if (attr_mask & IB_QP_RNR_RETRY) {
++ roce_set_field(context->byte_244_rnr_rxack,
++ V2_QPC_BYTE_244_RNR_NUM_INIT_M,
++ V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
++ roce_set_field(qpc_mask->byte_244_rnr_rxack,
++ V2_QPC_BYTE_244_RNR_NUM_INIT_M,
++ V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
++
++ roce_set_field(context->byte_244_rnr_rxack,
++ V2_QPC_BYTE_244_RNR_CNT_M,
++ V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
++ roce_set_field(qpc_mask->byte_244_rnr_rxack,
++ V2_QPC_BYTE_244_RNR_CNT_M,
++ V2_QPC_BYTE_244_RNR_CNT_S, 0);
++ }
++
+ if (attr_mask & IB_QP_SQ_PSN) {
+ roce_set_field(context->byte_172_sq_psn,
+ V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+@@ -4192,9 +4168,37 @@ static int hns_roce_v2_modify_qp(struct
+ V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
+ }
+
++ if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
++ attr->max_dest_rd_atomic) {
++ roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
++ V2_QPC_BYTE_140_RR_MAX_S,
++ fls(attr->max_dest_rd_atomic - 1));
++ roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
++ V2_QPC_BYTE_140_RR_MAX_S, 0);
++ }
++
++ if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
++ roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
++ V2_QPC_BYTE_208_SR_MAX_S,
++ fls(attr->max_rd_atomic - 1));
++ roce_set_field(qpc_mask->byte_208_irrl,
++ V2_QPC_BYTE_208_SR_MAX_M,
++ V2_QPC_BYTE_208_SR_MAX_S, 0);
++ }
++
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
+ set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
+
++ if (attr_mask & IB_QP_MIN_RNR_TIMER) {
++ roce_set_field(context->byte_80_rnr_rx_cqn,
++ V2_QPC_BYTE_80_MIN_RNR_TIME_M,
++ V2_QPC_BYTE_80_MIN_RNR_TIME_S,
++ attr->min_rnr_timer);
++ roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
++ V2_QPC_BYTE_80_MIN_RNR_TIME_M,
++ V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
++ }
++
+ /* RC&UC required attr */
+ if (attr_mask & IB_QP_RQ_PSN) {
+ roce_set_field(context->byte_108_rx_reqepsn,
+@@ -4211,6 +4215,12 @@ static int hns_roce_v2_modify_qp(struct
+ V2_QPC_BYTE_152_RAQ_PSN_S, 0);
+ }
+
++ if (attr_mask & IB_QP_QKEY) {
++ context->qkey_xrcd = attr->qkey;
++ qpc_mask->qkey_xrcd = 0;
++ hr_qp->qkey = attr->qkey;
++ }
++
+ roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
+ ibqp->srq ? 1 : 0);
+ roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
diff --git a/patches.drivers/RDMA-hns-Only-assign-the-fields-of-the-rq-psn-if-IB_.patch b/patches.drivers/RDMA-hns-Only-assign-the-fields-of-the-rq-psn-if-IB_.patch
new file mode 100644
index 0000000000..ae6f922422
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Only-assign-the-fields-of-the-rq-psn-if-IB_.patch
@@ -0,0 +1,68 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:22 +0800
+Subject: RDMA/hns: Only assign the fields of the rq psn if IB_QP_RQ_PSN is set
+Patch-mainline: v5.2-rc1
+Git-commit: 601f3e6d067c4399953dc7ede8f4c5448f91b02a
+References: bsc#1104427 FATE#326416
+
+Only when the IB_QP_RQ_PSN flags of attr_mask is set is it valid to assign
+the relatived fields of rq'psn into the qp context when modified qp.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 28 ++++++++++++++++------------
+ 1 file changed, 16 insertions(+), 12 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3675,13 +3675,6 @@ static int modify_qp_init_to_rtr(struct
+ V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
+ V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
+
+- roce_set_field(context->byte_108_rx_reqepsn,
+- V2_QPC_BYTE_108_RX_REQ_EPSN_M,
+- V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
+- roce_set_field(qpc_mask->byte_108_rx_reqepsn,
+- V2_QPC_BYTE_108_RX_REQ_EPSN_M,
+- V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
+-
+ roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
+ V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
+ roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
+@@ -3789,11 +3782,6 @@ static int modify_qp_init_to_rtr(struct
+ context->rq_rnr_timer = 0;
+ qpc_mask->rq_rnr_timer = 0;
+
+- roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
+- V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
+- roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
+- V2_QPC_BYTE_152_RAQ_PSN_S, 0);
+-
+ roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
+ V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
+ roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
+@@ -4207,6 +4195,22 @@ static int hns_roce_v2_modify_qp(struct
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
+ set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
+
++ /* RC&UC required attr */
++ if (attr_mask & IB_QP_RQ_PSN) {
++ roce_set_field(context->byte_108_rx_reqepsn,
++ V2_QPC_BYTE_108_RX_REQ_EPSN_M,
++ V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
++ roce_set_field(qpc_mask->byte_108_rx_reqepsn,
++ V2_QPC_BYTE_108_RX_REQ_EPSN_M,
++ V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
++
++ roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
++ V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
++ roce_set_field(qpc_mask->byte_152_raq,
++ V2_QPC_BYTE_152_RAQ_PSN_M,
++ V2_QPC_BYTE_152_RAQ_PSN_S, 0);
++ }
++
+ roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
+ ibqp->srq ? 1 : 0);
+ roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
diff --git a/patches.drivers/RDMA-hns-Only-assign-the-relatived-fields-of-psn-if-.patch b/patches.drivers/RDMA-hns-Only-assign-the-relatived-fields-of-psn-if-.patch
new file mode 100644
index 0000000000..3dfafb3c11
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Only-assign-the-relatived-fields-of-psn-if-.patch
@@ -0,0 +1,133 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:21 +0800
+Subject: RDMA/hns: Only assign the relatived fields of psn if IB_QP_SQ_PSN is
+ set
+Patch-mainline: v5.2-rc1
+Git-commit: f04cc17878b47bfa47af2e50f481d7f6eaaf3ca7
+References: bsc#1104427 FATE#326416
+
+Only when the IB_QP_SQ_PSN flags of attr_mask is set is it valid to assign
+the relatived fields of psn into the qp context when modified qp.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 83 ++++++++++++++++-------------
+ 1 file changed, 46 insertions(+), 37 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3891,13 +3891,6 @@ static int modify_qp_rtr_to_rts(struct i
+ V2_QPC_BYTE_240_RX_ACK_MSN_M,
+ V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
+
+- roce_set_field(context->byte_244_rnr_rxack,
+- V2_QPC_BYTE_244_RX_ACK_EPSN_M,
+- V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
+- roce_set_field(qpc_mask->byte_244_rnr_rxack,
+- V2_QPC_BYTE_244_RX_ACK_EPSN_M,
+- V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
+-
+ roce_set_field(qpc_mask->byte_248_ack_psn,
+ V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
+ V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
+@@ -3911,27 +3904,6 @@ static int modify_qp_rtr_to_rts(struct i
+ V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
+ V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
+
+- roce_set_field(context->byte_220_retry_psn_msn,
+- V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
+- V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
+- roce_set_field(qpc_mask->byte_220_retry_psn_msn,
+- V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
+- V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
+-
+- roce_set_field(context->byte_224_retry_msg,
+- V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
+- V2_QPC_BYTE_224_RETRY_MSG_PSN_S, attr->sq_psn >> 16);
+- roce_set_field(qpc_mask->byte_224_retry_msg,
+- V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
+- V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
+-
+- roce_set_field(context->byte_224_retry_msg,
+- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
+- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, attr->sq_psn);
+- roce_set_field(qpc_mask->byte_224_retry_msg,
+- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
+- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
+-
+ roce_set_field(qpc_mask->byte_220_retry_psn_msn,
+ V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
+ V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
+@@ -3982,17 +3954,8 @@ static int modify_qp_rtr_to_rts(struct i
+ }
+ }
+
+- roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+- V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
+- roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+- V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
+-
+ roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
+ V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
+- roce_set_field(context->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
+- V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
+- roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
+- V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
+
+ if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
+ roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+@@ -4195,6 +4158,52 @@ static int hns_roce_v2_modify_qp(struct
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+ }
+
++ if (attr_mask & IB_QP_SQ_PSN) {
++ roce_set_field(context->byte_172_sq_psn,
++ V2_QPC_BYTE_172_SQ_CUR_PSN_M,
++ V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
++ roce_set_field(qpc_mask->byte_172_sq_psn,
++ V2_QPC_BYTE_172_SQ_CUR_PSN_M,
++ V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
++
++ roce_set_field(context->byte_196_sq_psn,
++ V2_QPC_BYTE_196_SQ_MAX_PSN_M,
++ V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
++ roce_set_field(qpc_mask->byte_196_sq_psn,
++ V2_QPC_BYTE_196_SQ_MAX_PSN_M,
++ V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
++
++ roce_set_field(context->byte_220_retry_psn_msn,
++ V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
++ V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
++ roce_set_field(qpc_mask->byte_220_retry_psn_msn,
++ V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
++ V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
++
++ roce_set_field(context->byte_224_retry_msg,
++ V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
++ V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
++ attr->sq_psn >> 16);
++ roce_set_field(qpc_mask->byte_224_retry_msg,
++ V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
++ V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
++
++ roce_set_field(context->byte_224_retry_msg,
++ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
++ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S,
++ attr->sq_psn);
++ roce_set_field(qpc_mask->byte_224_retry_msg,
++ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
++ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
++
++ roce_set_field(context->byte_244_rnr_rxack,
++ V2_QPC_BYTE_244_RX_ACK_EPSN_M,
++ V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
++ roce_set_field(qpc_mask->byte_244_rnr_rxack,
++ V2_QPC_BYTE_244_RX_ACK_EPSN_M,
++ V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
++ }
++
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
+ set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
+
diff --git a/patches.drivers/RDMA-hns-RDMA-hns-Assign-rq-head-pointer-when-enable.patch b/patches.drivers/RDMA-hns-RDMA-hns-Assign-rq-head-pointer-when-enable.patch
new file mode 100644
index 0000000000..87a93b911f
--- /dev/null
+++ b/patches.drivers/RDMA-hns-RDMA-hns-Assign-rq-head-pointer-when-enable.patch
@@ -0,0 +1,62 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 12 Jan 2019 18:36:29 +0800
+Subject: RDMA/hns: RDMA/hns: Assign rq head pointer when enable rq record db
+Patch-mainline: v5.1-rc1
+Git-commit: de77503a59403e7045c18c6bb0a10c245a99b648
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+When flush cqe, it needs to get the pointer of rq and sq from db address
+space of user and update it into qp context by modified qp. if rq does not
+exist, it will not get the value from db address space of user.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -676,6 +676,10 @@ static int hns_roce_create_qp_common(str
+ dev_err(dev, "rq record doorbell map failed!\n");
+ goto err_sq_dbmap;
+ }
++
++ /* indicate kernel supports rq record db */
++ resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
++ hr_qp->rdb_en = 1;
+ }
+ } else {
+ if (init_attr->create_flags &
+@@ -784,16 +788,11 @@ static int hns_roce_create_qp_common(str
+ else
+ hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
+
+- if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) &&
+- (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) {
+-
+- /* indicate kernel supports rq record db */
+- resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
+- ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
++ if (udata) {
++ ret = ib_copy_to_udata(udata, &resp,
++ min(udata->outlen, sizeof(resp)));
+ if (ret)
+ goto err_qp;
+-
+- hr_qp->rdb_en = 1;
+ }
+ hr_qp->event = hns_roce_ib_qp_event;
+
+@@ -970,7 +969,9 @@ int hns_roce_modify_qp(struct ib_qp *ibq
+ (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
+ if (hr_qp->sdb_en == 1) {
+ hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
+- hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
++
++ if (hr_qp->rdb_en == 1)
++ hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
+ } else {
+ dev_warn(dev, "flush cqe is not supported in userspace!\n");
+ goto out;
diff --git a/patches.drivers/RDMA-hns-Remove-jiffies-operation-in-disable-interru.patch b/patches.drivers/RDMA-hns-Remove-jiffies-operation-in-disable-interru.patch
new file mode 100644
index 0000000000..741207d5f6
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Remove-jiffies-operation-in-disable-interru.patch
@@ -0,0 +1,124 @@
+From: Lang Cheng <chenglang@huawei.com>
+Date: Fri, 24 May 2019 15:31:23 +0800
+Subject: RDMA/hns: Remove jiffies operation in disable interrupt context
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
+Git-commit: 669cefb654cb69b280e31380f5fc7e3b5755b0cd
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+In some functions, the jiffies operation is unnecessary, and we can
+control delay using mdelay and udelay functions only. Especially, in
+hns_roce_v1_clear_hem, the function calls spin_lock_irqsave, the context
+disables interrupt, so we can not use jiffies and msleep functions.
+
+Signed-off-by: Lang Cheng <chenglang@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hem.c | 21 +++++++++++----------
+ drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 19 ++++++++++---------
+ 2 files changed, 21 insertions(+), 19 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -376,18 +376,19 @@ static int hns_roce_set_hem(struct hns_r
+
+ bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
+
+- end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
+- while (1) {
+- if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
+- if (!(time_before(jiffies, end))) {
+- dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
+- spin_unlock_irqrestore(lock, flags);
+- return -EBUSY;
+- }
+- } else {
++ end = HW_SYNC_TIMEOUT_MSECS;
++ while (end) {
++ if (!readl(bt_cmd) >> BT_CMD_SYNC_SHIFT)
+ break;
+- }
++
+ mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
++ end -= HW_SYNC_SLEEP_TIME_INTERVAL;
++ }
++
++ if (end <= 0) {
++ dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
++ spin_unlock_irqrestore(lock, flags);
++ return -EBUSY;
+ }
+
+ bt_cmd_l = (u32)bt_ba;
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+@@ -960,8 +960,7 @@ static int hns_roce_v1_recreate_lp_qp(st
+ struct hns_roce_free_mr *free_mr;
+ struct hns_roce_v1_priv *priv;
+ struct completion comp;
+- unsigned long end =
+- msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies;
++ unsigned long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
+ free_mr = &priv->free_mr;
+@@ -981,10 +980,11 @@ static int hns_roce_v1_recreate_lp_qp(st
+
+ queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
+
+- while (time_before_eq(jiffies, end)) {
++ while (end) {
+ if (try_wait_for_completion(&comp))
+ return 0;
+ msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
++ end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE;
+ }
+
+ lp_qp_work->comp_flag = 0;
+@@ -1098,8 +1098,7 @@ static int hns_roce_v1_dereg_mr(struct h
+ struct hns_roce_free_mr *free_mr;
+ struct hns_roce_v1_priv *priv;
+ struct completion comp;
+- unsigned long end =
+- msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
++ unsigned long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
+ unsigned long start = jiffies;
+ int npages;
+ int ret = 0;
+@@ -1129,10 +1128,11 @@ static int hns_roce_v1_dereg_mr(struct h
+
+ queue_work(free_mr->free_mr_wq, &(mr_work->work));
+
+- while (time_before_eq(jiffies, end)) {
++ while (end) {
+ if (try_wait_for_completion(&comp))
+ goto free_mr;
+ msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
++ end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE;
+ }
+
+ mr_work->comp_flag = 0;
+@@ -2502,10 +2502,10 @@ static int hns_roce_v1_clear_hem(struct
+
+ bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
+
+- end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
++ end = HW_SYNC_TIMEOUT_MSECS;
+ while (1) {
+ if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
+- if (!(time_before(jiffies, end))) {
++ if (end < 0) {
+ dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
+ spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
+ flags);
+@@ -2514,7 +2514,8 @@ static int hns_roce_v1_clear_hem(struct
+ } else {
+ break;
+ }
+- msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
++ mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
++ end -= HW_SYNC_SLEEP_TIME_INTERVAL;
+ }
+
+ bt_cmd_val[0] = (__le32)bt_ba;
diff --git a/patches.drivers/RDMA-hns-Remove-set-but-not-used-variable-rst.patch b/patches.drivers/RDMA-hns-Remove-set-but-not-used-variable-rst.patch
new file mode 100644
index 0000000000..02c32d2774
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Remove-set-but-not-used-variable-rst.patch
@@ -0,0 +1,41 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Thu, 31 Jan 2019 15:19:21 +0000
+Subject: RDMA/hns: Remove set but not used variable 'rst'
+Patch-mainline: v5.1-rc1
+Git-commit: da91ddfdc7212e6e716be55a5cf2305ce84a422f
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+Fixes gcc '-Wunused-but-set-variable' warning:
+
+drivers/infiniband/hw/hns/hns_roce_hw_v2.c: In function 'hns_roce_v2_qp_flow_control_init':
+drivers/infiniband/hw/hns/hns_roce_hw_v2.c:4384:33: warning:
+ variable 'rst' set but not used [-Wunused-but-set-variable]
+
+It never used since introduction.
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -4381,7 +4381,7 @@ static int hns_roce_v2_destroy_qp(struct
+ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+ {
+- struct hns_roce_sccc_clr_done *rst, *resp;
++ struct hns_roce_sccc_clr_done *resp;
+ struct hns_roce_sccc_clr *clr;
+ struct hns_roce_cmq_desc desc;
+ int ret, i;
+@@ -4390,7 +4390,6 @@ static int hns_roce_v2_qp_flow_control_i
+
+ /* set scc ctx clear done flag */
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
+- rst = (struct hns_roce_sccc_clr_done *)desc.data;
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret) {
+ dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret);
diff --git a/patches.drivers/RDMA-hns-Set-allocated-memory-to-zero-for-wrid.patch b/patches.drivers/RDMA-hns-Set-allocated-memory-to-zero-for-wrid.patch
new file mode 100644
index 0000000000..5141ca4e8d
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Set-allocated-memory-to-zero-for-wrid.patch
@@ -0,0 +1,33 @@
+From: Yixian Liu <liuyixian@huawei.com>
+Date: Sun, 3 Feb 2019 16:13:05 +0800
+Subject: RDMA/hns: Set allocated memory to zero for wrid
+Patch-mainline: v5.1-rc1
+Git-commit: f7f27a5f03cc9f47cc14f75a5be25f0f26b1b5ff
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+The memory allocated for wrid should be initialized to zero.
+
+Signed-off-by: Yixian Liu <liuyixian@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -764,10 +764,10 @@ static int hns_roce_create_qp_common(str
+ goto err_mtt;
+ }
+
+- hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64),
+- GFP_KERNEL);
+- hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64),
+- GFP_KERNEL);
++ hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64),
++ GFP_KERNEL);
++ hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64),
++ GFP_KERNEL);
+ if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
+ ret = -ENOMEM;
+ goto err_wrid;
diff --git a/patches.drivers/RDMA-hns-Support-to-create-1M-srq-queue.patch b/patches.drivers/RDMA-hns-Support-to-create-1M-srq-queue.patch
new file mode 100644
index 0000000000..406090e514
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Support-to-create-1M-srq-queue.patch
@@ -0,0 +1,29 @@
+From: chenglang <chenglang@huawei.com>
+Date: Sun, 7 Apr 2019 13:23:37 +0800
+Subject: RDMA/hns: Support to create 1M srq queue
+Patch-mainline: v5.2-rc1
+Git-commit: 2b277dae0679c8177f161278dbad035688838d6e
+References: bsc#1104427 FATE#326416
+
+In mhop 0 mode, 64*bt_num queues can be supported.
+In mhop 1 mode, 32K*bt_num queues can be supported.
+Config srqc_hop_num to 1 to support 1M SRQ queues.
+
+Signed-off-by: chenglang <chenglang@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1559,7 +1559,7 @@ static int hns_roce_v2_profile(struct hn
+ caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
+ caps->srqc_ba_pg_sz = 0;
+ caps->srqc_buf_pg_sz = 0;
+- caps->srqc_hop_num = HNS_ROCE_HOP_NUM_0;
++ caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
+ caps->cqc_ba_pg_sz = 0;
+ caps->cqc_buf_pg_sz = 0;
+ caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
diff --git a/patches.drivers/RDMA-hns-Update-CQE-specifications.patch b/patches.drivers/RDMA-hns-Update-CQE-specifications.patch
new file mode 100644
index 0000000000..8ce6152688
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Update-CQE-specifications.patch
@@ -0,0 +1,29 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Fri, 24 May 2019 15:31:21 +0800
+Subject: RDMA/hns: Update CQE specifications
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
+Git-commit: 0502849d0bb133b492eed24fd270441e652c84cc
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+According to hip08 UM, the maximum number of CQEs supported by each CQ is
+4M.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -54,7 +54,7 @@
+ #define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
+ #define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
+ #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
+-#define HNS_ROCE_V2_MAX_CQE_NUM 0x10000
++#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
+ #define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000
+ #define HNS_ROCE_V2_MAX_RQ_SGE_NUM 0x100
+ #define HNS_ROCE_V2_MAX_SQ_SGE_NUM 0xff
diff --git a/patches.drivers/RDMA-hns-Update-the-range-of-raq_psn-field-of-qp-con.patch b/patches.drivers/RDMA-hns-Update-the-range-of-raq_psn-field-of-qp-con.patch
new file mode 100644
index 0000000000..cfa8610d7d
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Update-the-range-of-raq_psn-field-of-qp-con.patch
@@ -0,0 +1,29 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:23 +0800
+Subject: RDMA/hns: Update the range of raq_psn field of qp context
+Patch-mainline: v5.2-rc1
+Git-commit: 834fa8cf6f7002706b02873fc0d16f9b06ef4819
+References: bsc#1104427 FATE#326416
+
+According to hip08 UM(User Manual), the raq_psn field size is [23:0].
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -719,8 +719,8 @@ struct hns_roce_v2_qp_context {
+ #define V2_QPC_BYTE_148_RAQ_SYNDROME_S 24
+ #define V2_QPC_BYTE_148_RAQ_SYNDROME_M GENMASK(31, 24)
+
+-#define V2_QPC_BYTE_152_RAQ_PSN_S 8
+-#define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(31, 8)
++#define V2_QPC_BYTE_152_RAQ_PSN_S 0
++#define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(23, 0)
+
+ #define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S 24
+ #define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M GENMASK(31, 24)
diff --git a/patches.drivers/RDMA-qedr-Fix-incorrect-device-rate.patch b/patches.drivers/RDMA-qedr-Fix-incorrect-device-rate.patch
new file mode 100644
index 0000000000..fa9bb4ed91
--- /dev/null
+++ b/patches.drivers/RDMA-qedr-Fix-incorrect-device-rate.patch
@@ -0,0 +1,89 @@
+From: Sagiv Ozeri <sagiv.ozeri@marvell.com>
+Date: Mon, 20 May 2019 12:33:20 +0300
+Subject: RDMA/qedr: Fix incorrect device rate.
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
+Git-commit: 69054666df0a9b4e8331319f98b6b9a88bc3fcc4
+References: bsc#1136188
+
+Use the correct enum value introduced in commit 12113a35ada6 ("IB/core:
+Add HDR speed enum") Prior to this change a 50Gbps port would show 40Gbps.
+
+This patch also cleaned up the redundant redefiniton of ib speeds for
+qedr.
+
+Fixes: 12113a35ada6 ("IB/core: Add HDR speed enum")
+Signed-off-by: Sagiv Ozeri <sagiv.ozeri@marvell.com>
+Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/qedr/verbs.c | 25 +++++++++----------------
+ 1 file changed, 9 insertions(+), 16 deletions(-)
+
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -158,54 +158,47 @@ int qedr_query_device(struct ib_device *
+ return 0;
+ }
+
+-#define QEDR_SPEED_SDR (1)
+-#define QEDR_SPEED_DDR (2)
+-#define QEDR_SPEED_QDR (4)
+-#define QEDR_SPEED_FDR10 (8)
+-#define QEDR_SPEED_FDR (16)
+-#define QEDR_SPEED_EDR (32)
+-
+ static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
+ u8 *ib_width)
+ {
+ switch (speed) {
+ case 1000:
+- *ib_speed = QEDR_SPEED_SDR;
++ *ib_speed = IB_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+ case 10000:
+- *ib_speed = QEDR_SPEED_QDR;
++ *ib_speed = IB_SPEED_QDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 20000:
+- *ib_speed = QEDR_SPEED_DDR;
++ *ib_speed = IB_SPEED_DDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 25000:
+- *ib_speed = QEDR_SPEED_EDR;
++ *ib_speed = IB_SPEED_EDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 40000:
+- *ib_speed = QEDR_SPEED_QDR;
++ *ib_speed = IB_SPEED_QDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 50000:
+- *ib_speed = QEDR_SPEED_QDR;
+- *ib_width = IB_WIDTH_4X;
++ *ib_speed = IB_SPEED_HDR;
++ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 100000:
+- *ib_speed = QEDR_SPEED_EDR;
++ *ib_speed = IB_SPEED_EDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ default:
+ /* Unsupported */
+- *ib_speed = QEDR_SPEED_SDR;
++ *ib_speed = IB_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ }
+ }
diff --git a/patches.drivers/Revert-net-phy-marvell-avoid-pause-mode-on-SGMII-to-.patch b/patches.drivers/Revert-net-phy-marvell-avoid-pause-mode-on-SGMII-to-.patch
index 360417b2c6..514839040c 100644
--- a/patches.drivers/Revert-net-phy-marvell-avoid-pause-mode-on-SGMII-to-.patch
+++ b/patches.drivers/Revert-net-phy-marvell-avoid-pause-mode-on-SGMII-to-.patch
@@ -22,7 +22,7 @@ Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
-@@ -826,8 +826,6 @@ static int m88e1510_config_init(struct p
+@@ -842,8 +842,6 @@ static int m88e1510_config_init(struct p
/* SGMII-to-Copper mode initialization */
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
@@ -31,7 +31,7 @@ Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
/* Select page 18 */
err = marvell_set_page(phydev, 18);
if (err < 0)
-@@ -850,16 +848,6 @@ static int m88e1510_config_init(struct p
+@@ -866,16 +864,6 @@ static int m88e1510_config_init(struct p
err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
@@ -47,4 +47,4 @@ Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
- phydev->advertising &= ~pause;
}
- return m88e1121_config_init(phydev);
+ return m88e1318_config_init(phydev);
diff --git a/patches.drivers/net-hns3-Add-handling-of-MAC-tunnel-interruption.patch b/patches.drivers/net-hns3-Add-handling-of-MAC-tunnel-interruption.patch
new file mode 100644
index 0000000000..c45558470e
--- /dev/null
+++ b/patches.drivers/net-hns3-Add-handling-of-MAC-tunnel-interruption.patch
@@ -0,0 +1,260 @@
+From: Weihang Li <liweihang@hisilicon.com>
+Date: Fri, 19 Apr 2019 11:05:45 +0800
+Subject: net: hns3: Add handling of MAC tunnel interruption
+Patch-mainline: v5.2-rc1
+Git-commit: a63457878b12b1be3d0a09fdc0c93b348f6161c9
+References: bsc#1104353 FATE#326415 bsc#1134983
+
+MAC tnl interruptions are different from other type of RAS and MSI-X
+errors, because some bits, such as OVF/LR/RF will occur during link up
+and down.
+
+The drivers should clear status of all MAC tnl interruption bits but
+shouldn't print any message that would mislead the users.
+
+In case that link down and re-up in a short time because of some reasons,
+we record when they occurred, and users can query them by debugfs.
+
+Signed-off-by: Weihang Li <liweihang@hisilicon.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c | 1
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 3
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c | 22 +++++
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c | 51 +++++++++++++
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h | 4 +
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 4 +
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 11 ++
+ 7 files changed, 96 insertions(+)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -249,6 +249,7 @@ static void hns3_dbg_help(struct hnae3_h
+ dev_info(&h->pdev->dev, "dump mng tbl\n");
+ dev_info(&h->pdev->dev, "dump reset info\n");
+ dev_info(&h->pdev->dev, "dump ncl_config <offset> <length>(in hex)\n");
++ dev_info(&h->pdev->dev, "dump mac tnl status\n");
+
+ memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
+ strncat(printf_buf, "dump reg [[bios common] [ssu <prt_id>]",
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+@@ -109,6 +109,9 @@ enum hclge_opcode_type {
+ HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
+ HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
+ HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
++ HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310,
++ HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
++ HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
+ HCLGE_OPC_SERDES_LOOPBACK = 0x0315,
+
+ /* PFC/Pause commands */
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+@@ -984,6 +984,26 @@ static void hclge_dbg_dump_ncl_config(st
+ }
+ }
+
++/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
++ * @hdev: pointer to struct hclge_dev
++ */
++static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
++{
++#define HCLGE_BILLION_NANO_SECONDS 1000000000
++
++ struct hclge_mac_tnl_stats stats;
++ unsigned long rem_nsec;
++
++ dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n");
++
++ while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
++ rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
++ dev_info(&hdev->pdev->dev, "[%07lu.%03lu]status = 0x%x\n",
++ (unsigned long)stats.time, rem_nsec / 1000,
++ stats.status);
++ }
++}
++
+ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
+ {
+ struct hclge_vport *vport = hclge_get_vport(handle);
+@@ -1012,6 +1032,8 @@ int hclge_dbg_run_cmd(struct hnae3_handl
+ } else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
+ hclge_dbg_dump_ncl_config(hdev,
+ &cmd_buf[sizeof("dump ncl_config")]);
++ } else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
++ hclge_dbg_dump_mac_tnl_status(hdev);
+ } else {
+ dev_info(&hdev->pdev->dev, "unknown command\n");
+ return -EINVAL;
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+@@ -692,6 +692,16 @@ static int hclge_cmd_query_error(struct
+ return ret;
+ }
+
++static int hclge_clear_mac_tnl_int(struct hclge_dev *hdev)
++{
++ struct hclge_desc desc;
++
++ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_MAC_TNL_INT, false);
++ desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_CLR);
++
++ return hclge_cmd_send(&hdev->hw, &desc, 1);
++}
++
+ static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en)
+ {
+ struct device *dev = &hdev->pdev->dev;
+@@ -911,6 +921,21 @@ static int hclge_config_mac_err_int(stru
+ return ret;
+ }
+
++int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en)
++{
++ struct hclge_desc desc;
++
++ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_TNL_INT_EN, false);
++ if (en)
++ desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN);
++ else
++ desc.data[0] = 0;
++
++ desc.data[1] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN_MASK);
++
++ return hclge_cmd_send(&hdev->hw, &desc, 1);
++}
++
+ static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
+ bool en)
+ {
+@@ -1611,6 +1636,7 @@ pci_ers_result_t hclge_handle_hw_ras_err
+ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
+ unsigned long *reset_requests)
+ {
++ struct hclge_mac_tnl_stats mac_tnl_stats;
+ struct device *dev = &hdev->pdev->dev;
+ u32 mpf_bd_num, pf_bd_num, bd_num;
+ enum hnae3_reset_type reset_level;
+@@ -1745,6 +1771,31 @@ int hclge_handle_hw_msix_error(struct hc
+ set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ }
+
++ /* query and clear mac tnl interruptions */
++ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_MAC_TNL_INT,
++ true);
++ ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
++ if (ret) {
++ dev_err(dev, "query mac tnl int cmd failed (%d)\n", ret);
++ goto msi_error;
++ }
++
++ status = le32_to_cpu(desc->data[0]);
++ if (status) {
++ /* When mac tnl interrupt occurs, we record current time and
++ * register status here in a fifo, then clear the status. So
++ * that if link status changes suddenly at some time, we can
++ * query them by debugfs.
++ */
++ mac_tnl_stats.time = local_clock();
++ mac_tnl_stats.status = status;
++ kfifo_put(&hdev->mac_tnl_log, mac_tnl_stats);
++ ret = hclge_clear_mac_tnl_int(hdev);
++ if (ret)
++ dev_err(dev, "clear mac tnl int failed (%d)\n", ret);
++ set_bit(HNAE3_NONE_RESET, reset_requests);
++ }
++
+ msi_error:
+ kfree(desc);
+ out:
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+@@ -47,6 +47,9 @@
+ #define HCLGE_NCSI_ERR_INT_TYPE 0x9
+ #define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF
+ #define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF
++#define HCLGE_MAC_TNL_INT_EN GENMASK(7, 0)
++#define HCLGE_MAC_TNL_INT_EN_MASK GENMASK(7, 0)
++#define HCLGE_MAC_TNL_INT_CLR GENMASK(7, 0)
+ #define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0)
+ #define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0)
+ #define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0)
+@@ -115,6 +118,7 @@ struct hclge_hw_error {
+ enum hnae3_reset_type reset_level;
+ };
+
++int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en);
+ int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state);
+ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev);
+ int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -2248,6 +2248,7 @@ static void hclge_update_link_status(str
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+ handle = &hdev->vport[i].nic;
+ client->ops->link_status_change(handle, state);
++ hclge_config_mac_tnl_int(hdev, state);
+ rhandle = &hdev->vport[i].roce;
+ if (rclient && rclient->ops->link_status_change)
+ rclient->ops->link_status_change(rhandle,
+@@ -7973,6 +7974,8 @@ static int hclge_init_ae_dev(struct hnae
+ goto err_mdiobus_unreg;
+ }
+
++ INIT_KFIFO(hdev->mac_tnl_log);
++
+ hclge_dcb_ops_set(hdev);
+
+ timer_setup(&hdev->service_timer, hclge_service_timer, 0);
+@@ -8126,6 +8129,7 @@ static void hclge_uninit_ae_dev(struct h
+ hclge_enable_vector(&hdev->misc_vector, false);
+ synchronize_irq(hdev->misc_vector.vector_irq);
+
++ hclge_config_mac_tnl_int(hdev, false);
+ hclge_hw_error_set_state(hdev, false);
+ hclge_cmd_uninit(hdev);
+ hclge_misc_irq_uninit(hdev);
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+@@ -7,6 +7,7 @@
+ #include <linux/types.h>
+ #include <linux/phy.h>
+ #include <linux/if_vlan.h>
++#include <linux/kfifo.h>
+
+ #include "hclge_cmd.h"
+ #include "hnae3.h"
+@@ -660,6 +661,12 @@ struct hclge_rst_stats {
+ u32 reset_cnt; /* the number of reset */
+ };
+
++/* time and register status when mac tunnel interruption occur */
++struct hclge_mac_tnl_stats {
++ u64 time;
++ u32 status;
++};
++
+ /* For each bit of TCAM entry, it uses a pair of 'x' and
+ * 'y' to indicate which value to match, like below:
+ * ----------------------------------
+@@ -686,6 +693,7 @@ struct hclge_rst_stats {
+ (y) = (_k_ ^ ~_v_) & (_k_); \
+ } while (0)
+
++#define HCLGE_MAC_TNL_LOG_SIZE 8
+ #define HCLGE_VPORT_NUM 256
+ struct hclge_dev {
+ struct pci_dev *pdev;
+@@ -802,6 +810,9 @@ struct hclge_dev {
+ struct mutex umv_mutex; /* protect share_umv_size */
+
+ struct mutex vport_cfg_mutex; /* Protect stored vf table */
++
++ DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
++ HCLGE_MAC_TNL_LOG_SIZE);
+ };
+
+ /* VPort level vlan tag configuration for TX direction */
diff --git a/patches.drivers/net-hns3-Add-support-for-netif-message-level-setting.patch b/patches.drivers/net-hns3-Add-support-for-netif-message-level-setting.patch
new file mode 100644
index 0000000000..621222ff3b
--- /dev/null
+++ b/patches.drivers/net-hns3-Add-support-for-netif-message-level-setting.patch
@@ -0,0 +1,231 @@
+From: Yonglong Liu <liuyonglong@huawei.com>
+Date: Fri, 19 Apr 2019 11:05:43 +0800
+Subject: net: hns3: Add support for netif message level settings
+Patch-mainline: v5.2-rc1
+Git-commit: bb87be87b1658f7ee95c0b7625553a6e7f8fea1c
+References: bsc#1104353 FATE#326415 bsc#1134989
+
+This patch adds support for network interface message level
+settings. The message level can be changed by module parameter
+or ethtool.
+
+Signed-off-by: Yonglong Liu <liuyonglong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hnae3.h | 3 +
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 33 +++++++++++++-
+ drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 18 +++++++
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 29 ++++++++++++
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 20 ++++++++
+ 5 files changed, 101 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+@@ -590,6 +590,9 @@ struct hnae3_handle {
+
+ u8 netdev_flags;
+ struct dentry *hnae3_dbgfs;
++
++ /* Network interface message level enabled bits */
++ u32 msg_enable;
+ };
+
+ #define hnae3_set_field(origin, mask, shift, val) \
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -35,6 +35,13 @@ static const char hns3_driver_string[] =
+ static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
+ static struct hnae3_client client;
+
++static int debug = -1;
++module_param(debug, int, 0);
++MODULE_PARM_DESC(debug, " Network interface message level setting");
++
++#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
++ NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
++
+ /* hns3_pci_tbl - PCI Device ID Table
+ *
+ * Last entry must be all 0s
+@@ -3736,6 +3743,21 @@ static void hns3_client_stop(struct hnae
+ handle->ae_algo->ops->client_stop(handle);
+ }
+
++static void hns3_info_show(struct hns3_nic_priv *priv)
++{
++ struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
++
++ dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
++ dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps);
++ dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size);
++ dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size);
++ dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len);
++ dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc);
++ dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc);
++ dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc);
++ dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu);
++}
++
+ static int hns3_client_init(struct hnae3_handle *handle)
+ {
+ struct pci_dev *pdev = handle->pdev;
+@@ -3757,6 +3779,8 @@ static int hns3_client_init(struct hnae3
+ priv->tx_timeout_count = 0;
+ set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+
++ handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
++
+ handle->kinfo.netdev = netdev;
+ handle->priv = (void *)priv;
+
+@@ -3823,6 +3847,9 @@ static int hns3_client_init(struct hnae3
+
+ set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+
++ if (netif_msg_drv(handle))
++ hns3_info_show(priv);
++
+ return ret;
+
+ out_client_start:
+@@ -3897,11 +3924,13 @@ static void hns3_link_status_change(stru
+ if (linkup) {
+ netif_carrier_on(netdev);
+ netif_tx_wake_all_queues(netdev);
+- netdev_info(netdev, "link up\n");
++ if (netif_msg_link(handle))
++ netdev_info(netdev, "link up\n");
+ } else {
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+- netdev_info(netdev, "link down\n");
++ if (netif_msg_link(handle))
++ netdev_info(netdev, "link down\n");
+ }
+ }
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -1110,6 +1110,20 @@ static int hns3_set_phys_id(struct net_d
+ return h->ae_algo->ops->set_led_id(h, state);
+ }
+
++static u32 hns3_get_msglevel(struct net_device *netdev)
++{
++ struct hnae3_handle *h = hns3_get_handle(netdev);
++
++ return h->msg_enable;
++}
++
++static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level)
++{
++ struct hnae3_handle *h = hns3_get_handle(netdev);
++
++ h->msg_enable = msg_level;
++}
++
+ static const struct ethtool_ops hns3vf_ethtool_ops = {
+ .get_drvinfo = hns3_get_drvinfo,
+ .get_ringparam = hns3_get_ringparam,
+@@ -1130,6 +1144,8 @@ static const struct ethtool_ops hns3vf_e
+ .get_regs_len = hns3_get_regs_len,
+ .get_regs = hns3_get_regs,
+ .get_link = hns3_get_link,
++ .get_msglevel = hns3_get_msglevel,
++ .set_msglevel = hns3_set_msglevel,
+ };
+
+ static const struct ethtool_ops hns3_ethtool_ops = {
+@@ -1159,6 +1175,8 @@ static const struct ethtool_ops hns3_eth
+ .get_regs_len = hns3_get_regs_len,
+ .get_regs = hns3_get_regs,
+ .set_phys_id = hns3_set_phys_id,
++ .get_msglevel = hns3_get_msglevel,
++ .set_msglevel = hns3_set_msglevel,
+ };
+
+ void hns3_ethtool_set_ops(struct net_device *netdev)
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -7564,6 +7564,32 @@ static void hclge_get_mdix_mode(struct h
+ *tp_mdix = ETH_TP_MDI;
+ }
+
++static void hclge_info_show(struct hclge_dev *hdev)
++{
++ struct device *dev = &hdev->pdev->dev;
++
++ dev_info(dev, "PF info begin:\n");
++
++ dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
++ dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
++ dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
++ dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
++ dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport);
++ dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs);
++ dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
++ dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size);
++ dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size);
++ dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size);
++ dev_info(dev, "This is %s PF\n",
++ hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
++ dev_info(dev, "DCB %s\n",
++ hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
++ dev_info(dev, "MQPRIO %s\n",
++ hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
++
++ dev_info(dev, "PF info end.\n");
++}
++
+ static int hclge_init_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+ {
+@@ -7585,6 +7611,9 @@ static int hclge_init_client_instance(st
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
++ if (netif_msg_drv(&hdev->vport->nic))
++ hclge_info_show(hdev);
++
+ if (hdev->roce_client &&
+ hnae3_dev_roce_supported(hdev)) {
+ struct hnae3_client *rc = hdev->roce_client;
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -2222,6 +2222,23 @@ static void hclgevf_misc_irq_uninit(stru
+ hclgevf_free_vector(hdev, 0);
+ }
+
++static void hclgevf_info_show(struct hclgevf_dev *hdev)
++{
++ struct device *dev = &hdev->pdev->dev;
++
++ dev_info(dev, "VF info begin:\n");
++
++ dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps);
++ dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc);
++ dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc);
++ dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport);
++ dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map);
++ dev_info(dev, "PF media type of this VF: %d\n",
++ hdev->hw.mac.media_type);
++
++ dev_info(dev, "VF info end.\n");
++}
++
+ static int hclgevf_init_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+ {
+@@ -2239,6 +2256,9 @@ static int hclgevf_init_client_instance(
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
++ if (netif_msg_drv(&hdev->nic))
++ hclgevf_info_show(hdev);
++
+ if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) {
+ struct hnae3_client *rc = hdev->roce_client;
+
diff --git a/patches.drivers/net-hns3-Make-hclge_destroy_cmd_queue-static.patch b/patches.drivers/net-hns3-Make-hclge_destroy_cmd_queue-static.patch
new file mode 100644
index 0000000000..dc76367a66
--- /dev/null
+++ b/patches.drivers/net-hns3-Make-hclge_destroy_cmd_queue-static.patch
@@ -0,0 +1,30 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Wed, 20 Mar 2019 21:37:13 +0800
+Subject: net: hns3: Make hclge_destroy_cmd_queue static
+Patch-mainline: v5.2-rc1
+Git-commit: 881d7afdff165159a7138e56523889eac27d84c8
+References: bsc#1104353 FATE#326415 bsc#1137201
+
+Fix sparse warning:
+
+drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c:414:6:
+ warning: symbol 'hclge_destroy_cmd_queue' was not declared. Should it be static?
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+@@ -412,7 +412,7 @@ static void hclge_destroy_queue(struct h
+ spin_unlock(&ring->lock);
+ }
+
+-void hclge_destroy_cmd_queue(struct hclge_hw *hw)
++static void hclge_destroy_cmd_queue(struct hclge_hw *hw)
+ {
+ hclge_destroy_queue(&hw->cmq.csq);
+ hclge_destroy_queue(&hw->cmq.crq);
diff --git a/patches.drivers/net-hns3-Make-hclgevf_update_link_mode-static.patch b/patches.drivers/net-hns3-Make-hclgevf_update_link_mode-static.patch
new file mode 100644
index 0000000000..b5538c13d8
--- /dev/null
+++ b/patches.drivers/net-hns3-Make-hclgevf_update_link_mode-static.patch
@@ -0,0 +1,30 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Tue, 19 Mar 2019 22:46:41 +0800
+Subject: net: hns3: Make hclgevf_update_link_mode static
+Patch-mainline: v5.2-rc1
+Git-commit: 538abaf38e75857cbc0eda3b28994808878e3017
+References: bsc#1104353 FATE#326415 bsc#1137201
+
+Fix sparse warning:
+
+drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c:407:6:
+ warning: symbol 'hclgevf_update_link_mode' was not declared. Should it be static?
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -404,7 +404,7 @@ void hclgevf_update_link_status(struct h
+ }
+ }
+
+-void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
++static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
+ {
+ #define HCLGEVF_ADVERTISING 0
+ #define HCLGEVF_SUPPORTED 1
diff --git a/patches.drivers/net-hns3-add-counter-for-times-RX-pages-gets-allocat.patch b/patches.drivers/net-hns3-add-counter-for-times-RX-pages-gets-allocat.patch
new file mode 100644
index 0000000000..281be17acb
--- /dev/null
+++ b/patches.drivers/net-hns3-add-counter-for-times-RX-pages-gets-allocat.patch
@@ -0,0 +1,58 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Mon, 6 May 2019 10:48:43 +0800
+Subject: net: hns3: add counter for times RX pages gets allocated
+Patch-mainline: v5.2-rc1
+Git-commit: d21ff4f90d975f5027678eb84e0d53fb8ca19c9b
+References: bsc#1104353 FATE#326415 bsc#1134947
+
+Currently, using "ethtool --statistics" can show how many time RX
+page have been reused, but there is no counter for RX page not
+being reused.
+
+This patch adds non_reuse_pg counter to better debug the performance
+issue, because it is hard to determine when the RX page is reused
+or not if there is no such counter.
+
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 4 ++++
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 1 +
+ drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 1 +
+ 3 files changed, 6 insertions(+)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -2316,6 +2316,10 @@ hns3_nic_alloc_rx_buffers(struct hns3_en
+ break;
+ }
+ hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
++
++ u64_stats_update_begin(&ring->syncp);
++ ring->stats.non_reuse_pg++;
++ u64_stats_update_end(&ring->syncp);
+ }
+
+ ring_ptr_move_fw(ring, next_to_use);
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+@@ -389,6 +389,7 @@ struct ring_stats {
+ u64 l2_err;
+ u64 l3l4_csum_err;
+ u64 rx_multicast;
++ u64 non_reuse_pg;
+ };
+ };
+ };
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -49,6 +49,7 @@ static const struct hns3_stats hns3_rxq_
+ HNS3_TQP_STAT("l2_err", l2_err),
+ HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
+ HNS3_TQP_STAT("multicast", rx_multicast),
++ HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg),
+ };
+
+ #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
diff --git a/patches.drivers/net-hns3-add-error-handler-for-initializing-command-.patch b/patches.drivers/net-hns3-add-error-handler-for-initializing-command-.patch
new file mode 100644
index 0000000000..a7d2ba8ddb
--- /dev/null
+++ b/patches.drivers/net-hns3-add-error-handler-for-initializing-command-.patch
@@ -0,0 +1,84 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Sat, 6 Apr 2019 15:43:34 +0800
+Subject: net: hns3: add error handler for initializing command queue
+Patch-mainline: v5.2-rc1
+Git-commit: 4339ef396ab65a61f7f22f36d7ba94b6e9e0939b
+References: bsc#1104353 FATE#326415 bsc#1135058
+
+This patch adds error handler for the failure of command queue
+initialization both PF and VF.
+
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 11 ++++++++---
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c | 11 ++++++++---
+ 2 files changed, 16 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+@@ -374,21 +374,26 @@ int hclge_cmd_init(struct hclge_dev *hde
+ * reset may happen when lower level reset is being processed.
+ */
+ if ((hclge_is_reset_pending(hdev))) {
+- set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+- return -EBUSY;
++ ret = -EBUSY;
++ goto err_cmd_init;
+ }
+
+ ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "firmware version query failed %d\n", ret);
+- return ret;
++ goto err_cmd_init;
+ }
+ hdev->fw_version = version;
+
+ dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
+
+ return 0;
++
++err_cmd_init:
++ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
++
++ return ret;
+ }
+
+ static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+@@ -358,8 +358,8 @@ int hclgevf_cmd_init(struct hclgevf_dev
+ * reset may happen when lower level reset is being processed.
+ */
+ if (hclgevf_is_reset_pending(hdev)) {
+- set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+- return -EBUSY;
++ ret = -EBUSY;
++ goto err_cmd_init;
+ }
+
+ /* get firmware version */
+@@ -367,13 +367,18 @@ int hclgevf_cmd_init(struct hclgevf_dev
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to query firmware version\n", ret);
+- return ret;
++ goto err_cmd_init;
+ }
+ hdev->fw_version = version;
+
+ dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
+
+ return 0;
++
++err_cmd_init:
++ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
++
++ return ret;
+ }
+
+ static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
diff --git a/patches.drivers/net-hns3-add-function-type-check-for-debugfs-help-in.patch b/patches.drivers/net-hns3-add-function-type-check-for-debugfs-help-in.patch
new file mode 100644
index 0000000000..45400eba0e
--- /dev/null
+++ b/patches.drivers/net-hns3-add-function-type-check-for-debugfs-help-in.patch
@@ -0,0 +1,66 @@
+From: Yufeng Mo <moyufeng@huawei.com>
+Date: Fri, 19 Apr 2019 11:05:47 +0800
+Subject: net: hns3: add function type check for debugfs help information
+Patch-mainline: v5.2-rc1
+Git-commit: 97afd47b36dbe976c72191cf862a92d2e8756fa9
+References: bsc#1104353 FATE#326415 bsc#1134980
+
+PF supports all debugfs command, but VF only supports part of
+debugfs command. So VF should not show unsupported help information.
+
+This patch adds a check for PF and PF to show the supportable help
+information.
+
+Signed-off-by: Yufeng Mo <moyufeng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c | 6 ++++++
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 2 +-
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 1 +
+ 3 files changed, 8 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -239,6 +239,10 @@ static void hns3_dbg_help(struct hnae3_h
+ dev_info(&h->pdev->dev, "queue info [number]\n");
+ dev_info(&h->pdev->dev, "queue map\n");
+ dev_info(&h->pdev->dev, "bd info [q_num] <bd index>\n");
++
++ if (!hns3_is_phys_func(h->pdev))
++ return;
++
+ dev_info(&h->pdev->dev, "dump fd tcam\n");
+ dev_info(&h->pdev->dev, "dump tc\n");
+ dev_info(&h->pdev->dev, "dump tm map [q_num]\n");
+@@ -344,6 +348,8 @@ static ssize_t hns3_dbg_cmd_write(struct
+ ret = hns3_dbg_bd_info(handle, cmd_buf);
+ else if (handle->ae_algo->ops->dbg_run_cmd)
+ ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
++ else
++ ret = -EOPNOTSUPP;
+
+ if (ret)
+ hns3_dbg_help(handle);
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -1764,7 +1764,7 @@ static const struct net_device_ops hns3_
+ .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
+ };
+
+-static bool hns3_is_phys_func(struct pci_dev *pdev)
++bool hns3_is_phys_func(struct pci_dev *pdev)
+ {
+ u32 dev_id = pdev->device;
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+@@ -663,6 +663,7 @@ int hns3_init_all_ring(struct hns3_nic_p
+ int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
+ int hns3_nic_reset_all_ring(struct hnae3_handle *h);
+ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
++bool hns3_is_phys_func(struct pci_dev *pdev);
+ int hns3_clean_rx_ring(
+ struct hns3_enet_ring *ring, int budget,
+ void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
diff --git a/patches.drivers/net-hns3-add-hns3_gro_complete-for-HW-GRO-process.patch b/patches.drivers/net-hns3-add-hns3_gro_complete-for-HW-GRO-process.patch
new file mode 100644
index 0000000000..adafe3508e
--- /dev/null
+++ b/patches.drivers/net-hns3-add-hns3_gro_complete-for-HW-GRO-process.patch
@@ -0,0 +1,301 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Sun, 14 Apr 2019 09:47:40 +0800
+Subject: net: hns3: add hns3_gro_complete for HW GRO process
+Patch-mainline: v5.2-rc1
+Git-commit: d474d88f882610850abbf0ec6cf81ff90014c8ed
+References: bsc#1104353 FATE#326415 bsc#1135051
+
+When a GRO packet is received by driver, the cwr field in the
+struct tcphdr needs to be checked to decide whether to set the
+SKB_GSO_TCP_ECN for skb_shinfo(skb)->gso_type.
+
+So this patch adds hns3_gro_complete to do that, and adds the
+hns3_handle_bdinfo to handle the hns3_gro_complete and
+hns3_rx_checksum.
+
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 198 ++++++++++++++----------
+ 1 file changed, 123 insertions(+), 75 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -2332,6 +2332,44 @@ static void hns3_nic_reuse_page(struct s
+ }
+ }
+
++static int hns3_gro_complete(struct sk_buff *skb)
++{
++ __be16 type = skb->protocol;
++ struct tcphdr *th;
++ int depth = 0;
++
++ while (type == htons(ETH_P_8021Q)) {
++ struct vlan_hdr *vh;
++
++ if ((depth + VLAN_HLEN) > skb_headlen(skb))
++ return -EFAULT;
++
++ vh = (struct vlan_hdr *)(skb->data + depth);
++ type = vh->h_vlan_encapsulated_proto;
++ depth += VLAN_HLEN;
++ }
++
++ if (type == htons(ETH_P_IP)) {
++ depth += sizeof(struct iphdr);
++ } else if (type == htons(ETH_P_IPV6)) {
++ depth += sizeof(struct ipv6hdr);
++ } else {
++ netdev_err(skb->dev,
++ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
++ be16_to_cpu(type), depth);
++ return -EFAULT;
++ }
++
++ th = (struct tcphdr *)(skb->data + depth);
++ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
++ if (th->cwr)
++ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
++
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++ return 0;
++}
++
+ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
+ u32 l234info, u32 bd_base_info)
+ {
+@@ -2346,12 +2384,6 @@ static void hns3_rx_checksum(struct hns3
+ if (!(netdev->features & NETIF_F_RXCSUM))
+ return;
+
+- /* We MUST enable hardware checksum before enabling hardware GRO */
+- if (skb_shinfo(skb)->gso_size) {
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+- return;
+- }
+-
+ /* check if hardware has done checksum */
+ if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
+ return;
+@@ -2568,8 +2600,9 @@ static int hns3_add_frag(struct hns3_ene
+ return 0;
+ }
+
+-static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
+- u32 bd_base_info)
++static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
++ struct sk_buff *skb, u32 l234info,
++ u32 bd_base_info)
+ {
+ u16 gro_count;
+ u32 l3_type;
+@@ -2577,12 +2610,11 @@ static void hns3_set_gro_param(struct sk
+ gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
+ HNS3_RXD_GRO_COUNT_S);
+ /* if there is no HW GRO, do not set gro params */
+- if (!gro_count)
+- return;
++ if (!gro_count) {
++ hns3_rx_checksum(ring, skb, l234info, bd_base_info);
++ return 0;
++ }
+
+- /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+- * to skb_shinfo(skb)->gso_segs
+- */
+ NAPI_GRO_CB(skb)->count = gro_count;
+
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+@@ -2592,13 +2624,13 @@ static void hns3_set_gro_param(struct sk
+ else if (l3_type == HNS3_L3_TYPE_IPV6)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+- return;
++ return -EFAULT;
+
+ skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
+ HNS3_RXD_GRO_SIZE_M,
+ HNS3_RXD_GRO_SIZE_S);
+- if (skb_shinfo(skb)->gso_size)
+- tcp_gro_complete(skb);
++
++ return hns3_gro_complete(skb);
+ }
+
+ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
+@@ -2623,16 +2655,85 @@ static void hns3_set_rx_skb_rss_type(str
+ skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
+ }
+
+-static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
+- struct sk_buff **out_skb)
++static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb,
++ struct hns3_desc *desc)
+ {
+ struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
++ u32 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
++ u32 l234info = le32_to_cpu(desc->rx.l234_info);
+ enum hns3_pkt_l2t_type l2_frame_type;
++ unsigned int len;
++ int ret;
++
++ /* Based on hw strategy, the tag offloaded will be stored at
++ * ot_vlan_tag in two layer tag case, and stored at vlan_tag
++ * in one layer tag case.
++ */
++ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
++ u16 vlan_tag;
++
++ if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
++ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
++ vlan_tag);
++ }
++
++ if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
++ u64_stats_update_begin(&ring->syncp);
++ ring->stats.non_vld_descs++;
++ u64_stats_update_end(&ring->syncp);
++
++ return -EINVAL;
++ }
++
++ if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
++ BIT(HNS3_RXD_L2E_B))))) {
++ u64_stats_update_begin(&ring->syncp);
++ if (l234info & BIT(HNS3_RXD_L2E_B))
++ ring->stats.l2_err++;
++ else
++ ring->stats.err_pkt_len++;
++ u64_stats_update_end(&ring->syncp);
++
++ return -EFAULT;
++ }
++
++ len = skb->len;
++
++ /* Do update ip stack process */
++ skb->protocol = eth_type_trans(skb, netdev);
++
++ /* This is needed in order to enable forwarding support */
++ ret = hns3_set_gro_and_checksum(ring, skb, l234info, bd_base_info);
++ if (unlikely(ret)) {
++ u64_stats_update_begin(&ring->syncp);
++ ring->stats.rx_err_cnt++;
++ u64_stats_update_end(&ring->syncp);
++ return ret;
++ }
++
++ l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
++ HNS3_RXD_DMAC_S);
++
++ u64_stats_update_begin(&ring->syncp);
++ ring->stats.rx_pkts++;
++ ring->stats.rx_bytes += len;
++
++ if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
++ ring->stats.rx_multicast++;
++
++ u64_stats_update_end(&ring->syncp);
++
++ ring->tqp_vector->rx_group.total_bytes += len;
++ return 0;
++}
++
++static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
++ struct sk_buff **out_skb)
++{
+ struct sk_buff *skb = ring->skb;
+ struct hns3_desc_cb *desc_cb;
+ struct hns3_desc *desc;
+ u32 bd_base_info;
+- u32 l234info;
+ int length;
+ int ret;
+
+@@ -2692,62 +2793,12 @@ static int hns3_handle_rx_bd(struct hns3
+ ALIGN(ring->pull_len, sizeof(long)));
+ }
+
+- l234info = le32_to_cpu(desc->rx.l234_info);
+- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+-
+- /* Based on hw strategy, the tag offloaded will be stored at
+- * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+- * in one layer tag case.
+- */
+- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+- u16 vlan_tag;
+-
+- if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
+- __vlan_hwaccel_put_tag(skb,
+- htons(ETH_P_8021Q),
+- vlan_tag);
+- }
+-
+- if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) {
+- u64_stats_update_begin(&ring->syncp);
+- ring->stats.non_vld_descs++;
+- u64_stats_update_end(&ring->syncp);
+-
+- dev_kfree_skb_any(skb);
+- return -EINVAL;
+- }
+-
+- if (unlikely((!desc->rx.pkt_len) ||
+- (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
+- BIT(HNS3_RXD_L2E_B))))) {
+- u64_stats_update_begin(&ring->syncp);
+- if (l234info & BIT(HNS3_RXD_L2E_B))
+- ring->stats.l2_err++;
+- else
+- ring->stats.err_pkt_len++;
+- u64_stats_update_end(&ring->syncp);
+-
++ ret = hns3_handle_bdinfo(ring, skb, desc);
++ if (unlikely(ret)) {
+ dev_kfree_skb_any(skb);
+- return -EFAULT;
++ return ret;
+ }
+
+-
+- l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
+- HNS3_RXD_DMAC_S);
+- u64_stats_update_begin(&ring->syncp);
+- if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
+- ring->stats.rx_multicast++;
+-
+- ring->stats.rx_pkts++;
+- ring->stats.rx_bytes += skb->len;
+- u64_stats_update_end(&ring->syncp);
+-
+- ring->tqp_vector->rx_group.total_bytes += skb->len;
+-
+- /* This is needed in order to enable forwarding support */
+- hns3_set_gro_param(skb, l234info, bd_base_info);
+-
+- hns3_rx_checksum(ring, skb, l234info, bd_base_info);
+ *out_skb = skb;
+ hns3_set_rx_skb_rss_type(ring, skb);
+
+@@ -2759,7 +2810,6 @@ int hns3_clean_rx_ring(
+ void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
+ {
+ #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
+- struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ int recv_pkts, recv_bds, clean_count, err;
+ int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
+ struct sk_buff *skb = ring->skb;
+@@ -2796,8 +2846,6 @@ int hns3_clean_rx_ring(
+ continue;
+ }
+
+- /* Do update ip stack process */
+- skb->protocol = eth_type_trans(skb, netdev);
+ rx_fn(ring, skb);
+ recv_bds += ring->pending_buf;
+ clean_count += ring->pending_buf;
diff --git a/patches.drivers/net-hns3-add-linearizing-checking-for-TSO-case.patch b/patches.drivers/net-hns3-add-linearizing-checking-for-TSO-case.patch
new file mode 100644
index 0000000000..398d5e29c5
--- /dev/null
+++ b/patches.drivers/net-hns3-add-linearizing-checking-for-TSO-case.patch
@@ -0,0 +1,92 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Mon, 6 May 2019 10:48:44 +0800
+Subject: net: hns3: add linearizing checking for TSO case
+Patch-mainline: v5.2-rc1
+Git-commit: db4970aa92a148389826057290cd45bb30f5650e
+References: bsc#1104353 FATE#326415 bsc#1134947
+
+HW requires every continuous 8 buffer data to be larger than MSS,
+we simplify it by ensuring skb_headlen + the first continuous
+7 frags to to be larger than GSO header len + mss, and the
+remaining continuous 7 frags to be larger than MSS except the
+last 7 frags.
+
+This patch adds hns3_skb_need_linearized to handle it for TSO
+case.
+
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 45 ++++++++++++++++++++++++
+ 1 file changed, 45 insertions(+)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -1181,6 +1181,47 @@ static int hns3_nic_bd_num(struct sk_buf
+ return bd_num;
+ }
+
++static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
++{
++ if (!skb->encapsulation)
++ return skb_transport_offset(skb) + tcp_hdrlen(skb);
++
++ return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
++}
++
++/* HW need every continuous 8 buffer data to be larger than MSS,
++ * we simplify it by ensuring skb_headlen + the first continuous
++ * 7 frags to to be larger than gso header len + mss, and the remaining
++ * continuous 7 frags to be larger than MSS except the last 7 frags.
++ */
++static bool hns3_skb_need_linearized(struct sk_buff *skb)
++{
++ int bd_limit = HNS3_MAX_BD_PER_FRAG - 1;
++ unsigned int tot_len = 0;
++ int i;
++
++ for (i = 0; i < bd_limit; i++)
++ tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
++
++ /* ensure headlen + the first 7 frags is greater than mss + header
++ * and the first 7 frags is greater than mss.
++ */
++ if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size +
++ hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size))
++ return true;
++
++ /* ensure the remaining continuous 7 buffer is greater than mss */
++ for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) {
++ tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]);
++ tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]);
++
++ if (tot_len < skb_shinfo(skb)->gso_size)
++ return true;
++ }
++
++ return false;
++}
++
+ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
+ struct sk_buff **out_skb)
+ {
+@@ -1194,6 +1235,9 @@ static int hns3_nic_maybe_stop_tx(struct
+ if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) {
+ struct sk_buff *new_skb;
+
++ if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb))
++ goto out;
++
+ bd_num = hns3_tx_bd_count(skb->len);
+ if (unlikely(ring_space(ring) < bd_num))
+ return -EBUSY;
+@@ -1209,6 +1253,7 @@ static int hns3_nic_maybe_stop_tx(struct
+ u64_stats_update_end(&ring->syncp);
+ }
+
++out:
+ if (unlikely(ring_space(ring) < bd_num))
+ return -EBUSY;
+
diff --git a/patches.drivers/net-hns3-add-protect-when-handling-mac-addr-list.patch b/patches.drivers/net-hns3-add-protect-when-handling-mac-addr-list.patch
new file mode 100644
index 0000000000..75acc56384
--- /dev/null
+++ b/patches.drivers/net-hns3-add-protect-when-handling-mac-addr-list.patch
@@ -0,0 +1,68 @@
+From: Jian Shen <shenjian15@huawei.com>
+Date: Thu, 4 Apr 2019 16:17:55 +0800
+Subject: net: hns3: add protect when handling mac addr list
+Patch-mainline: v5.2-rc1
+Git-commit: 389775a6605e040dddea21a778a88eaaa57c068d
+References: bsc#1104353 FATE#326415
+
+It used netdev->uc and netdev->mc list in function
+hns3_recover_hw_addr() and hns3_remove_hw_addr().
+We should add protect for them.
+
+Fixes: f05e21097121 ("net: hns3: Clear mac vlan table entries when unload driver or function reset")
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -3790,12 +3790,13 @@ static int hns3_recover_hw_addr(struct n
+ struct netdev_hw_addr *ha, *tmp;
+ int ret = 0;
+
++ netif_addr_lock_bh(ndev);
+ /* go through and sync uc_addr entries to the device */
+ list = &ndev->uc;
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+ ret = hns3_nic_uc_sync(ndev, ha->addr);
+ if (ret)
+- return ret;
++ goto out;
+ }
+
+ /* go through and sync mc_addr entries to the device */
+@@ -3803,9 +3804,11 @@ static int hns3_recover_hw_addr(struct n
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+ ret = hns3_nic_mc_sync(ndev, ha->addr);
+ if (ret)
+- return ret;
++ goto out;
+ }
+
++out:
++ netif_addr_unlock_bh(ndev);
+ return ret;
+ }
+
+@@ -3816,6 +3819,7 @@ static void hns3_remove_hw_addr(struct n
+
+ hns3_nic_uc_unsync(netdev, netdev->dev_addr);
+
++ netif_addr_lock_bh(netdev);
+ /* go through and unsync uc_addr entries to the device */
+ list = &netdev->uc;
+ list_for_each_entry_safe(ha, tmp, &list->list, list)
+@@ -3826,6 +3830,8 @@ static void hns3_remove_hw_addr(struct n
+ list_for_each_entry_safe(ha, tmp, &list->list, list)
+ if (ha->refcount > 1)
+ hns3_nic_mc_unsync(netdev, ha->addr);
++
++ netif_addr_unlock_bh(netdev);
+ }
+
+ static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
diff --git a/patches.drivers/net-hns3-add-queue-s-statistics-update-to-service-ta.patch b/patches.drivers/net-hns3-add-queue-s-statistics-update-to-service-ta.patch
new file mode 100644
index 0000000000..37f15e9914
--- /dev/null
+++ b/patches.drivers/net-hns3-add-queue-s-statistics-update-to-service-ta.patch
@@ -0,0 +1,66 @@
+From: liuzhongzhu <liuzhongzhu@huawei.com>
+Date: Fri, 19 Apr 2019 11:05:46 +0800
+Subject: net: hns3: add queue's statistics update to service task
+Patch-mainline: v5.2-rc1
+Git-commit: db01afeb6614b75299b0ecba06b246143d4b894d
+References: bsc#1104353 FATE#326415 bsc#1134981
+
+This patch updates VF's TQP statistic info in the service task,
+and adds a limitation to prevent update too frequently.
+
+Signed-off-by: liuzhongzhu <liuzhongzhu@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 8 ++++++++
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h | 3 +++
+ 2 files changed, 11 insertions(+)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -1649,6 +1649,7 @@ static void hclgevf_service_timer(unsign
+
+ mod_timer(&hdev->service_timer, jiffies + 5 * HZ);
+
++ hdev->stats_timer++;
+ hclgevf_task_schedule(hdev);
+ }
+
+@@ -1769,9 +1770,16 @@ static void hclgevf_keep_alive_task(stru
+
+ static void hclgevf_service_task(struct work_struct *work)
+ {
++ struct hnae3_handle *handle;
+ struct hclgevf_dev *hdev;
+
+ hdev = container_of(work, struct hclgevf_dev, service_task);
++ handle = &hdev->nic;
++
++ if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) {
++ hclgevf_tqps_update_stats(handle);
++ hdev->stats_timer = 0;
++ }
+
+ /* request the link status from the PF. PF would be able to tell VF
+ * about such updates in future so we might remove this later
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+@@ -116,6 +116,8 @@
+ #define HCLGEVF_S_IP_BIT BIT(3)
+ #define HCLGEVF_V_TAG_BIT BIT(4)
+
++#define HCLGEVF_STATS_TIMER_INTERVAL (36)
++
+ enum hclgevf_evt_cause {
+ HCLGEVF_VECTOR0_EVENT_RST,
+ HCLGEVF_VECTOR0_EVENT_MBX,
+@@ -281,6 +283,7 @@ struct hclgevf_dev {
+ struct hnae3_client *nic_client;
+ struct hnae3_client *roce_client;
+ u32 flag;
++ u32 stats_timer;
+ };
+
+ static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
diff --git a/patches.drivers/net-hns3-add-reset-statistics-for-VF.patch b/patches.drivers/net-hns3-add-reset-statistics-for-VF.patch
new file mode 100644
index 0000000000..bdc1e89833
--- /dev/null
+++ b/patches.drivers/net-hns3-add-reset-statistics-for-VF.patch
@@ -0,0 +1,102 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Fri, 19 Apr 2019 11:05:37 +0800
+Subject: net: hns3: add reset statistics for VF
+Patch-mainline: v5.2-rc1
+Git-commit: c88a6e7d8801fc5ffcd704b0b1f3e67b6266182b
+References: bsc#1104353 FATE#326415 bsc#1134995
+
+This patch adds some statistics for VF reset.
+
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 10 ++++++++--
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h | 11 ++++++++++-
+ 2 files changed, 18 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -1415,9 +1415,11 @@ static int hclgevf_reset_prepare_wait(st
+ case HNAE3_VF_FUNC_RESET:
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
+ 0, true, NULL, sizeof(u8));
++ hdev->rst_stats.vf_func_rst_cnt++;
+ break;
+ case HNAE3_FLR_RESET:
+ set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
++ hdev->rst_stats.flr_rst_cnt++;
+ break;
+ default:
+ break;
+@@ -1440,7 +1442,7 @@ static int hclgevf_reset(struct hclgevf_
+ * know if device is undergoing reset
+ */
+ ae_dev->reset_type = hdev->reset_type;
+- hdev->reset_count++;
++ hdev->rst_stats.rst_cnt++;
+ rtnl_lock();
+
+ /* bring down the nic to stop any ongoing TX/RX */
+@@ -1466,6 +1468,8 @@ static int hclgevf_reset(struct hclgevf_
+ goto err_reset;
+ }
+
++ hdev->rst_stats.hw_rst_done_cnt++;
++
+ rtnl_lock();
+
+ /* now, re-initialize the nic client and ae device*/
+@@ -1484,6 +1488,7 @@ static int hclgevf_reset(struct hclgevf_
+
+ hdev->last_reset_time = jiffies;
+ ae_dev->reset_type = HNAE3_NONE_RESET;
++ hdev->rst_stats.rst_done_cnt++;
+
+ return ret;
+ err_reset_lock:
+@@ -1803,6 +1808,7 @@ static enum hclgevf_evt_cause hclgevf_ch
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
+ *clearval = cmdq_src_reg;
++ hdev->rst_stats.vf_rst_cnt++;
+ return HCLGEVF_VECTOR0_EVENT_RST;
+ }
+
+@@ -2738,7 +2744,7 @@ static unsigned long hclgevf_ae_dev_rese
+ {
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+- return hdev->reset_count;
++ return hdev->rst_stats.hw_rst_done_cnt;
+ }
+
+ static void hclgevf_get_link_mode(struct hnae3_handle *handle,
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+@@ -210,6 +210,15 @@ struct hclgevf_misc_vector {
+ int vector_irq;
+ };
+
++struct hclgevf_rst_stats {
++ u32 rst_cnt; /* the number of reset */
++ u32 vf_func_rst_cnt; /* the number of VF function reset */
++ u32 flr_rst_cnt; /* the number of FLR */
++ u32 vf_rst_cnt; /* the number of VF reset */
++ u32 rst_done_cnt; /* the number of reset completed */
++ u32 hw_rst_done_cnt; /* the number of HW reset completed */
++};
++
+ struct hclgevf_dev {
+ struct pci_dev *pdev;
+ struct hnae3_ae_dev *ae_dev;
+@@ -227,7 +236,7 @@ struct hclgevf_dev {
+ #define HCLGEVF_RESET_REQUESTED 0
+ #define HCLGEVF_RESET_PENDING 1
+ unsigned long reset_state; /* requested, pending */
+- unsigned long reset_count; /* the number of reset has been done */
++ struct hclgevf_rst_stats rst_stats;
+ u32 reset_attempts;
+
+ u32 fw_version;
diff --git a/patches.drivers/net-hns3-add-reset-statistics-info-for-PF.patch b/patches.drivers/net-hns3-add-reset-statistics-info-for-PF.patch
new file mode 100644
index 0000000000..d5a015ad87
--- /dev/null
+++ b/patches.drivers/net-hns3-add-reset-statistics-info-for-PF.patch
@@ -0,0 +1,175 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Fri, 19 Apr 2019 11:05:36 +0800
+Subject: net: hns3: add reset statistics info for PF
+Patch-mainline: v5.2-rc1
+Git-commit: f02eb82dfe12a0922b539f8cd3c4151826cae94e
+References: bsc#1104353 FATE#326415 bsc#1134995
+
+This patch adds statistics for PF's reset information,
+also, provides a debugfs command to dump these statistics.
+
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c | 1
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c | 22 +++++++++++++
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 12 +++++--
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 13 +++++++
+ 4 files changed, 45 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -247,6 +247,7 @@ static void hns3_dbg_help(struct hnae3_h
+ dev_info(&h->pdev->dev, "dump qos pri map\n");
+ dev_info(&h->pdev->dev, "dump qos buf cfg\n");
+ dev_info(&h->pdev->dev, "dump mng tbl\n");
++ dev_info(&h->pdev->dev, "dump reset info\n");
+
+ memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
+ strncat(printf_buf, "dump reg [[bios common] [ssu <prt_id>]",
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+@@ -901,6 +901,26 @@ static void hclge_dbg_fd_tcam(struct hcl
+ }
+ }
+
++static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
++{
++ dev_info(&hdev->pdev->dev, "PF reset count: %d\n",
++ hdev->rst_stats.pf_rst_cnt);
++ dev_info(&hdev->pdev->dev, "FLR reset count: %d\n",
++ hdev->rst_stats.flr_rst_cnt);
++ dev_info(&hdev->pdev->dev, "CORE reset count: %d\n",
++ hdev->rst_stats.core_rst_cnt);
++ dev_info(&hdev->pdev->dev, "GLOBAL reset count: %d\n",
++ hdev->rst_stats.global_rst_cnt);
++ dev_info(&hdev->pdev->dev, "IMP reset count: %d\n",
++ hdev->rst_stats.imp_rst_cnt);
++ dev_info(&hdev->pdev->dev, "reset done count: %d\n",
++ hdev->rst_stats.reset_done_cnt);
++ dev_info(&hdev->pdev->dev, "HW reset done count: %d\n",
++ hdev->rst_stats.hw_reset_done_cnt);
++ dev_info(&hdev->pdev->dev, "reset count: %d\n",
++ hdev->rst_stats.reset_cnt);
++}
++
+ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
+ {
+ struct hclge_vport *vport = hclge_get_vport(handle);
+@@ -924,6 +944,8 @@ int hclge_dbg_run_cmd(struct hnae3_handl
+ hclge_dbg_dump_mng_table(hdev);
+ } else if (strncmp(cmd_buf, "dump reg", 8) == 0) {
+ hclge_dbg_dump_reg_cmd(hdev, cmd_buf);
++ } else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
++ hclge_dbg_dump_rst_info(hdev);
+ } else {
+ dev_info(&hdev->pdev->dev, "unknown command\n");
+ return -EINVAL;
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -2360,6 +2360,7 @@ static u32 hclge_check_event_cause(struc
+ set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
++ hdev->rst_stats.imp_rst_cnt++;
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+@@ -2368,6 +2369,7 @@ static u32 hclge_check_event_cause(struc
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
+ *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
++ hdev->rst_stats.global_rst_cnt++;
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+@@ -2376,6 +2378,7 @@ static u32 hclge_check_event_cause(struc
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
+ *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
++ hdev->rst_stats.core_rst_cnt++;
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+@@ -2873,6 +2876,7 @@ static int hclge_reset_prepare_wait(stru
+ * after hclge_cmd_init is called.
+ */
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
++ hdev->rst_stats.pf_rst_cnt++;
+ break;
+ case HNAE3_FLR_RESET:
+ /* There is no mechanism for PF to know if VF has stopped IO
+@@ -2881,6 +2885,7 @@ static int hclge_reset_prepare_wait(stru
+ msleep(100);
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
++ hdev->rst_stats.flr_rst_cnt++;
+ break;
+ case HNAE3_IMP_RESET:
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+@@ -2961,7 +2966,7 @@ static void hclge_reset(struct hclge_dev
+ * know if device is undergoing reset
+ */
+ ae_dev->reset_type = hdev->reset_type;
+- hdev->reset_count++;
++ hdev->rst_stats.reset_cnt++;
+ /* perform reset of the stack & ae device for a client */
+ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+@@ -2987,6 +2992,8 @@ static void hclge_reset(struct hclge_dev
+ goto err_reset;
+ }
+
++ hdev->rst_stats.hw_reset_done_cnt++;
++
+ ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ goto err_reset;
+@@ -3030,6 +3037,7 @@ static void hclge_reset(struct hclge_dev
+
+ hdev->last_reset_time = jiffies;
+ hdev->reset_fail_cnt = 0;
++ hdev->rst_stats.reset_done_cnt++;
+ ae_dev->reset_type = HNAE3_NONE_RESET;
+ del_timer(&hdev->reset_timer);
+
+@@ -5224,7 +5232,7 @@ static unsigned long hclge_ae_dev_reset_
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+- return hdev->reset_count;
++ return hdev->rst_stats.hw_reset_done_cnt;
+ }
+
+ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+@@ -649,6 +649,17 @@ struct hclge_vport_vlan_cfg {
+ u16 vlan_id;
+ };
+
++struct hclge_rst_stats {
++ u32 reset_done_cnt; /* the number of reset has completed */
++ u32 hw_reset_done_cnt; /* the number of HW reset has completed */
++ u32 pf_rst_cnt; /* the number of PF reset */
++ u32 flr_rst_cnt; /* the number of FLR */
++ u32 core_rst_cnt; /* the number of CORE reset */
++ u32 global_rst_cnt; /* the number of GLOBAL */
++ u32 imp_rst_cnt; /* the number of IMP reset */
++ u32 reset_cnt; /* the number of reset */
++};
++
+ /* For each bit of TCAM entry, it uses a pair of 'x' and
+ * 'y' to indicate which value to match, like below:
+ * ----------------------------------
+@@ -691,7 +702,7 @@ struct hclge_dev {
+ unsigned long default_reset_request;
+ unsigned long reset_request; /* reset has been requested */
+ unsigned long reset_pending; /* client rst is pending to be served */
+- unsigned long reset_count; /* the number of reset has been done */
++ struct hclge_rst_stats rst_stats;
+ u32 reset_fail_cnt;
+ u32 fw_version;
+ u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
diff --git a/patches.drivers/net-hns3-add-some-debug-info-for-hclgevf_get_mbx_res.patch b/patches.drivers/net-hns3-add-some-debug-info-for-hclgevf_get_mbx_res.patch
new file mode 100644
index 0000000000..f8174a20e7
--- /dev/null
+++ b/patches.drivers/net-hns3-add-some-debug-info-for-hclgevf_get_mbx_res.patch
@@ -0,0 +1,44 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Fri, 19 Apr 2019 11:05:39 +0800
+Subject: net: hns3: add some debug info for hclgevf_get_mbx_resp()
+Patch-mainline: v5.2-rc1
+Git-commit: fbf3cd3fc11ce80270a80e65a75e4e32a56a5a7d
+References: bsc#1104353 FATE#326415 bsc#1134994
+
+When wait for response timeout or response code not match, there
+should be more information for debugging.
+
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+@@ -49,8 +49,8 @@ static int hclgevf_get_mbx_resp(struct h
+
+ if (i >= HCLGEVF_MAX_TRY_TIMES) {
+ dev_err(&hdev->pdev->dev,
+- "VF could not get mbx resp(=%d) from PF in %d tries\n",
+- hdev->mbx_resp.received_resp, i);
++ "VF could not get mbx(%d,%d) resp(=%d) from PF in %d tries\n",
++ code0, code1, hdev->mbx_resp.received_resp, i);
+ return -EIO;
+ }
+
+@@ -68,8 +68,11 @@ static int hclgevf_get_mbx_resp(struct h
+
+ if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
+ dev_err(&hdev->pdev->dev,
+- "VF could not match resp code(code0=%d,code1=%d), %d",
++ "VF could not match resp code(code0=%d,code1=%d), %d\n",
+ code0, code1, mbx_resp->resp_status);
++ dev_err(&hdev->pdev->dev,
++ "VF could not match resp r_code(r_code0=%d,r_code1=%d)\n",
++ r_code0, r_code1);
+ return -EIO;
+ }
+
diff --git a/patches.drivers/net-hns3-add-some-debug-information-for-hclge_check_.patch b/patches.drivers/net-hns3-add-some-debug-information-for-hclge_check_.patch
new file mode 100644
index 0000000000..4ebdfa4381
--- /dev/null
+++ b/patches.drivers/net-hns3-add-some-debug-information-for-hclge_check_.patch
@@ -0,0 +1,43 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Fri, 19 Apr 2019 11:05:38 +0800
+Subject: net: hns3: add some debug information for hclge_check_event_cause
+Patch-mainline: v5.2-rc1
+Git-commit: 147175c92a5cd83337157bf784389466bb380eef
+References: bsc#1104353 FATE#326415 bsc#1134994
+
+When received vector0 msix and other event interrupt, it should
+print the value of the register for debugging.
+
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -2383,8 +2383,11 @@ static u32 hclge_check_event_cause(struc
+ }
+
+ /* check for vector0 msix event source */
+- if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK)
++ if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
++ dev_dbg(&hdev->pdev->dev, "received event 0x%x\n",
++ msix_src_reg);
+ return HCLGE_VECTOR0_EVENT_ERR;
++ }
+
+ /* check for vector0 mailbox(=CMDQ RX) event source */
+ if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
+@@ -2393,6 +2396,9 @@ static u32 hclge_check_event_cause(struc
+ return HCLGE_VECTOR0_EVENT_MBX;
+ }
+
++ /* print other vector0 event source */
++ dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n",
++ cmdq_src_reg, msix_src_reg);
+ return HCLGE_VECTOR0_EVENT_OTHER;
+ }
+
diff --git a/patches.drivers/net-hns3-add-support-for-dump-ncl-config-by-debugfs.patch b/patches.drivers/net-hns3-add-support-for-dump-ncl-config-by-debugfs.patch
new file mode 100644
index 0000000000..f0891e8096
--- /dev/null
+++ b/patches.drivers/net-hns3-add-support-for-dump-ncl-config-by-debugfs.patch
@@ -0,0 +1,132 @@
+From: Weihang Li <liweihang@hisilicon.com>
+Date: Fri, 19 Apr 2019 11:05:44 +0800
+Subject: net: hns3: add support for dump ncl config by debugfs
+Patch-mainline: v5.2-rc1
+Git-commit: ffd140e2ea980ea0ded8631f8bc1f43bca8a509e
+References: bsc#1104353 FATE#326415 bsc#1134987
+
+This patch allow users to dump content of NCL_CONFIG by using debugfs
+command.
+Command format:
+ echo dump ncl_config <offset> <length> > cmd
+It will print as follows:
+ hns3 0000:7d:00.0: offset | data
+ hns3 0000:7d:00.0: 0x0000 | 0x00000020
+ hns3 0000:7d:00.0: 0x0004 | 0x00000400
+ hns3 0000:7d:00.0: 0x0008 | 0x08020401
+
+Signed-off-by: Weihang Li <liweihang@hisilicon.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c | 1
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h | 3
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c | 66 +++++++++++++
+ 3 files changed, 70 insertions(+)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+@@ -248,6 +248,7 @@ static void hns3_dbg_help(struct hnae3_h
+ dev_info(&h->pdev->dev, "dump qos buf cfg\n");
+ dev_info(&h->pdev->dev, "dump mng tbl\n");
+ dev_info(&h->pdev->dev, "dump reset info\n");
++ dev_info(&h->pdev->dev, "dump ncl_config <offset> <length>(in hex)\n");
+
+ memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
+ strncat(printf_buf, "dump reg [[bios common] [ssu <prt_id>]",
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+@@ -237,6 +237,9 @@ enum hclge_opcode_type {
+ /* Led command */
+ HCLGE_OPC_LED_STATUS_CFG = 0xB000,
+
++ /* NCL config command */
++ HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
++
+ /* SFP command */
+ HCLGE_OPC_SFP_GET_SPEED = 0x7104,
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+@@ -921,6 +921,69 @@ static void hclge_dbg_dump_rst_info(stru
+ hdev->rst_stats.reset_cnt);
+ }
+
++/* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
++ * @hdev: pointer to struct hclge_dev
++ * @cmd_buf: string that contains offset and length
++ */
++static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *cmd_buf)
++{
++#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
++#define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4)
++#define HCLGE_CMD_DATA_NUM 6
++
++ struct hclge_desc desc[5];
++ u32 byte_offset;
++ int bd_num = 5;
++ int offset;
++ int length;
++ int data0;
++ int ret;
++ int i;
++ int j;
++
++ ret = sscanf(cmd_buf, "%x %x", &offset, &length);
++ if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
++ length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
++ dev_err(&hdev->pdev->dev, "Invalid offset or length.\n");
++ return;
++ }
++ if (offset < 0 || length <= 0) {
++ dev_err(&hdev->pdev->dev, "Non-positive offset or length.\n");
++ return;
++ }
++
++ dev_info(&hdev->pdev->dev, "offset | data\n");
++
++ while (length > 0) {
++ data0 = offset;
++ if (length >= HCLGE_MAX_NCL_CONFIG_LENGTH)
++ data0 |= HCLGE_MAX_NCL_CONFIG_LENGTH << 16;
++ else
++ data0 |= length << 16;
++ ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
++ HCLGE_OPC_QUERY_NCL_CONFIG);
++ if (ret)
++ return;
++
++ byte_offset = offset;
++ for (i = 0; i < bd_num; i++) {
++ for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
++ if (i == 0 && j == 0)
++ continue;
++
++ dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
++ byte_offset,
++ le32_to_cpu(desc[i].data[j]));
++ byte_offset += sizeof(u32);
++ length -= sizeof(u32);
++ if (length <= 0)
++ return;
++ }
++ }
++ offset += HCLGE_MAX_NCL_CONFIG_LENGTH;
++ }
++}
++
+ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf)
+ {
+ struct hclge_vport *vport = hclge_get_vport(handle);
+@@ -946,6 +1009,9 @@ int hclge_dbg_run_cmd(struct hnae3_handl
+ hclge_dbg_dump_reg_cmd(hdev, cmd_buf);
+ } else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
+ hclge_dbg_dump_rst_info(hdev);
++ } else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
++ hclge_dbg_dump_ncl_config(hdev,
++ &cmd_buf[sizeof("dump ncl_config")]);
+ } else {
+ dev_info(&hdev->pdev->dev, "unknown command\n");
+ return -EINVAL;
diff --git a/patches.drivers/net-hns3-adjust-the-timing-of-hns3_client_stop-when-.patch b/patches.drivers/net-hns3-adjust-the-timing-of-hns3_client_stop-when-.patch
new file mode 100644
index 0000000000..3b620ba906
--- /dev/null
+++ b/patches.drivers/net-hns3-adjust-the-timing-of-hns3_client_stop-when-.patch
@@ -0,0 +1,37 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Sat, 6 Apr 2019 15:43:28 +0800
+Subject: net: hns3: adjust the timing of hns3_client_stop when unloading
+Patch-mainline: v5.2-rc1
+Git-commit: eb32c896f10a8685162480279bd79f992b33319e
+References: bsc#1104353 FATE#326415 bsc#1137201
+
+hns3_client_stop() should be called after unregister_netdev(),
+otherwise the ongoing reset task may start the client just after it.
+
+Fixes: a6d818e31d08 ("net: hns3: Add vport alive state checking support")
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -3717,13 +3717,13 @@ static void hns3_client_uninit(struct hn
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ int ret;
+
+- hns3_client_stop(handle);
+-
+ hns3_remove_hw_addr(netdev);
+
+ if (netdev->reg_state != NETREG_UNINITIALIZED)
+ unregister_netdev(netdev);
+
++ hns3_client_stop(handle);
++
+ if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_warn(netdev, "already uninitialized\n");
+ goto out_netdev_free;
diff --git a/patches.drivers/net-hns3-always-assume-no-drop-TC-for-performance-re.patch b/patches.drivers/net-hns3-always-assume-no-drop-TC-for-performance-re.patch
new file mode 100644
index 0000000000..a8e0266205
--- /dev/null
+++ b/patches.drivers/net-hns3-always-assume-no-drop-TC-for-performance-re.patch
@@ -0,0 +1,87 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Sun, 14 Apr 2019 09:47:41 +0800
+Subject: net: hns3: always assume no drop TC for performance reason
+Patch-mainline: v5.2-rc1
+Git-commit: db5936db8f9e024cdbb988dab1606f2c205bb385
+References: bsc#1104353 FATE#326415 bsc#1135049
+
+Currently RX shared buffer' threshold size for speific TC is
+set to smaller value when the TC's PFC is not enabled, which may
+cause performance problem because hardware may not have enough
+hardware buffer when PFC is not enabled.
+
+This patch sets the same threshold size for all TC no matter if
+the specific TC's PFC is enabled.
+
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 29 ++--------------
+ 1 file changed, 4 insertions(+), 25 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -1432,17 +1432,6 @@ static int hclge_get_tc_num(struct hclge
+ return cnt;
+ }
+
+-static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev)
+-{
+- int i, cnt = 0;
+-
+- for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
+- if (hdev->hw_tc_map & BIT(i) &&
+- hdev->tm_info.hw_pfc_map & BIT(i))
+- cnt++;
+- return cnt;
+-}
+-
+ /* Get the number of pfc enabled TCs, which have private buffer */
+ static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+@@ -1507,13 +1496,11 @@ static bool hclge_is_rx_buf_ok(struct h
+ u32 rx_all)
+ {
+ u32 shared_buf_min, shared_buf_tc, shared_std;
+- int tc_num, pfc_enable_num;
++ int tc_num = hclge_get_tc_num(hdev);
+ u32 shared_buf, aligned_mps;
+ u32 rx_priv;
+ int i;
+
+- tc_num = hclge_get_tc_num(hdev);
+- pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
+ aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
+
+ if (hnae3_dev_dcb_supported(hdev))
+@@ -1522,9 +1509,7 @@ static bool hclge_is_rx_buf_ok(struct h
+ shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
+ + hdev->dv_buf_size;
+
+- shared_buf_tc = pfc_enable_num * aligned_mps +
+- (tc_num - pfc_enable_num) * aligned_mps / 2 +
+- aligned_mps;
++ shared_buf_tc = tc_num * aligned_mps + aligned_mps;
+ shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
+ HCLGE_BUF_SIZE_UNIT);
+
+@@ -1546,14 +1531,8 @@ static bool hclge_is_rx_buf_ok(struct h
+ }
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+- if ((hdev->hw_tc_map & BIT(i)) &&
+- (hdev->tm_info.hw_pfc_map & BIT(i))) {
+- buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
+- buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
+- } else {
+- buf_alloc->s_buf.tc_thrd[i].low = 0;
+- buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
+- }
++ buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
++ buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
+ }
+
+ return true;
diff --git a/patches.drivers/net-hns3-check-1000M-half-for-hns3_ethtool_ops.set_l.patch b/patches.drivers/net-hns3-check-1000M-half-for-hns3_ethtool_ops.set_l.patch
new file mode 100644
index 0000000000..7140023ca6
--- /dev/null
+++ b/patches.drivers/net-hns3-check-1000M-half-for-hns3_ethtool_ops.set_l.patch
@@ -0,0 +1,32 @@
+From: Peng Li <lipeng321@huawei.com>
+Date: Thu, 4 Apr 2019 16:17:48 +0800
+Subject: net: hns3: check 1000M half for hns3_ethtool_ops.set_link_ksettings
+Patch-mainline: v5.2-rc1
+Git-commit: 95dbab9f3606e9f3724fc0e38298830b09a57559
+References: bsc#1104353 FATE#326415 bsc#1137201
+
+Hip08 SOC does not support 1000M half, this patch adds 1000M half
+check for hns3_ethtool_ops.set_link_ksettings, so the user can not
+set 1000M half by ethtool.
+
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -648,6 +648,10 @@ static int hns3_get_link_ksettings(struc
+ static int hns3_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+ {
++ /* Chip doesn't support this mode. */
++ if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
++ return -EINVAL;
++
+ /* Only support ksettings_set for netdev with phy attached for now */
+ if (netdev->phydev)
+ return phy_ethtool_ksettings_set(netdev->phydev, cmd);
diff --git a/patches.drivers/net-hns3-check-resetting-status-in-hns3_get_stats.patch b/patches.drivers/net-hns3-check-resetting-status-in-hns3_get_stats.patch
new file mode 100644
index 0000000000..a8ffc759c1
--- /dev/null
+++ b/patches.drivers/net-hns3-check-resetting-status-in-hns3_get_stats.patch
@@ -0,0 +1,34 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Thu, 4 Apr 2019 16:17:56 +0800
+Subject: net: hns3: check resetting status in hns3_get_stats()
+Patch-mainline: v5.2-rc1
+Git-commit: c4e401e5a934bb0798ebbba98e08dab129695eff
+References: bsc#1104353 FATE#326415 bsc#1137201
+
+hns3_get_stats() should check the resetting status firstly,
+since the device will be reinitialized when resetting. If the
+reset has not completed, the hns3_get_stats() may access
+invalid memory.
+
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -483,6 +483,11 @@ static void hns3_get_stats(struct net_de
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u64 *p = data;
+
++ if (hns3_nic_resetting(netdev)) {
++ netdev_err(netdev, "dev resetting, could not get stats\n");
++ return;
++ }
++
+ if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) {
+ netdev_err(netdev, "could not get any statistics\n");
+ return;
diff --git a/patches.drivers/net-hns3-code-optimization-for-command-queue-spin-lo.patch b/patches.drivers/net-hns3-code-optimization-for-command-queue-spin-lo.patch
new file mode 100644
index 0000000000..37fc132918
--- /dev/null
+++ b/patches.drivers/net-hns3-code-optimization-for-command-queue-spin-lo.patch
@@ -0,0 +1,59 @@
+From: Peng Li <lipeng321@huawei.com>
+Date: Sun, 14 Apr 2019 09:47:46 +0800
+Subject: net: hns3: code optimization for command queue' spin lock
+Patch-mainline: v5.2-rc1
+Git-commit: 6814b5900b83de632d6709e21f906391496f5fc5
+References: bsc#1104353 FATE#326415 bsc#1135042
+
+This patch removes some redundant BH disable when initializing
+and uninitializing command queue.
+
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c | 4 ++--
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c | 4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+@@ -356,7 +356,7 @@ int hclge_cmd_init(struct hclge_dev *hde
+ int ret;
+
+ spin_lock_bh(&hdev->hw.cmq.csq.lock);
+- spin_lock_bh(&hdev->hw.cmq.crq.lock);
++ spin_lock(&hdev->hw.cmq.crq.lock);
+
+ hdev->hw.cmq.csq.next_to_clean = 0;
+ hdev->hw.cmq.csq.next_to_use = 0;
+@@ -365,7 +365,7 @@ int hclge_cmd_init(struct hclge_dev *hde
+
+ hclge_cmd_init_regs(&hdev->hw);
+
+- spin_unlock_bh(&hdev->hw.cmq.crq.lock);
++ spin_unlock(&hdev->hw.cmq.crq.lock);
+ spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+
+ clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+@@ -335,7 +335,7 @@ int hclgevf_cmd_init(struct hclgevf_dev
+ int ret;
+
+ spin_lock_bh(&hdev->hw.cmq.csq.lock);
+- spin_lock_bh(&hdev->hw.cmq.crq.lock);
++ spin_lock(&hdev->hw.cmq.crq.lock);
+
+ /* initialize the pointers of async rx queue of mailbox */
+ hdev->arq.hdev = hdev;
+@@ -349,7 +349,7 @@ int hclgevf_cmd_init(struct hclgevf_dev
+
+ hclgevf_cmd_init_regs(&hdev->hw);
+
+- spin_unlock_bh(&hdev->hw.cmq.crq.lock);
++ spin_unlock(&hdev->hw.cmq.crq.lock);
+ spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+
+ clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
diff --git a/patches.drivers/net-hns3-combine-len-and-checksum-handling-for-inner.patch b/patches.drivers/net-hns3-combine-len-and-checksum-handling-for-inner.patch
new file mode 100644
index 0000000000..8efa0af850
--- /dev/null
+++ b/patches.drivers/net-hns3-combine-len-and-checksum-handling-for-inner.patch
@@ -0,0 +1,280 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Mon, 6 May 2019 10:48:47 +0800
+Subject: net: hns3: combine len and checksum handling for inner and outer
+ header.
+Patch-mainline: v5.2-rc1
+Git-commit: 757cd1e4a4d81181fcd7130c4315d169ad9f5b81
+References: bsc#1104353 FATE#326415 bsc#1134947
+
+When filling len and checksum info to description, there is some
+similar checking or calculation.
+
+So this patch adds hns3_set_l2l3l4 to fill the inner(/normal)
+header's len and checksum info. If it is a encapsulation skb, it
+calls hns3_set_outer_l2l3l4 to handle the outer header's len and
+checksum info, in order to avoid some similar checking or
+calculation.
+
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 192 ++++++++++--------------
+ 1 file changed, 81 insertions(+), 111 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -737,79 +737,6 @@ static int hns3_get_l4_protocol(struct s
+ return 0;
+ }
+
+-static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
+- u8 il4_proto, u32 *type_cs_vlan_tso,
+- u32 *ol_type_vlan_len_msec)
+-{
+- unsigned char *l2_hdr = skb->data;
+- u8 l4_proto = ol4_proto;
+- union l3_hdr_info l3;
+- union l4_hdr_info l4;
+- u32 l2_len;
+- u32 l3_len;
+- u32 l4_len;
+-
+- l3.hdr = skb_network_header(skb);
+- l4.hdr = skb_transport_header(skb);
+-
+- /* tunnel packet */
+- if (skb->encapsulation) {
+- /* not MAC in UDP, MAC in GRE (0x6558) */
+- if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE))
+- return;
+-
+- /* compute OL2 header size, defined in 2 Bytes */
+- l2_len = l3.hdr - skb->data;
+- hns3_set_field(*ol_type_vlan_len_msec,
+- HNS3_TXD_L2LEN_S, l2_len >> 1);
+-
+- /* compute OL3 header size, defined in 4 Bytes */
+- l3_len = l4.hdr - l3.hdr;
+- hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S,
+- l3_len >> 2);
+-
+- l2_hdr = skb_inner_mac_header(skb);
+- /* compute OL4 header size, defined in 4 Bytes. */
+- l4_len = l2_hdr - l4.hdr;
+- hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S,
+- l4_len >> 2);
+-
+- /* switch to inner header */
+- l2_hdr = skb_inner_mac_header(skb);
+- l3.hdr = skb_inner_network_header(skb);
+- l4.hdr = skb_inner_transport_header(skb);
+- l4_proto = il4_proto;
+- }
+-
+- l2_len = l3.hdr - l2_hdr;
+- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
+-
+- /* compute inner(/normal) L3 header size, defined in 4 Bytes */
+- l3_len = l4.hdr - l3.hdr;
+- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
+-
+- /* compute inner(/normal) L4 header size, defined in 4 Bytes */
+- switch (l4_proto) {
+- case IPPROTO_TCP:
+- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+- l4.tcp->doff);
+- break;
+- case IPPROTO_SCTP:
+- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+- (sizeof(struct sctphdr) >> 2));
+- break;
+- case IPPROTO_UDP:
+- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+- (sizeof(struct udphdr) >> 2));
+- break;
+- default:
+- /* skb packet types not supported by hardware,
+- * txbd len fild doesn't be filled.
+- */
+- return;
+- }
+-}
+-
+ /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
+ * and it is udp packet, which has a dest port as the IANA assigned.
+ * the hardware is expected to do the checksum offload, but the
+@@ -831,46 +758,71 @@ static bool hns3_tunnel_csum_bug(struct
+ return true;
+ }
+
+-static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
+- u8 il4_proto, u32 *type_cs_vlan_tso,
+- u32 *ol_type_vlan_len_msec)
++static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
++ u32 *ol_type_vlan_len_msec)
+ {
++ u32 l2_len, l3_len, l4_len;
++ unsigned char *il2_hdr;
+ union l3_hdr_info l3;
+- u32 l4_proto = ol4_proto;
++ union l4_hdr_info l4;
+
+ l3.hdr = skb_network_header(skb);
++ l4.hdr = skb_transport_header(skb);
+
+- /* define OL3 type and tunnel type(OL4).*/
+- if (skb->encapsulation) {
+- /* define outer network header type.*/
+- if (skb->protocol == htons(ETH_P_IP)) {
+- if (skb_is_gso(skb))
+- hns3_set_field(*ol_type_vlan_len_msec,
+- HNS3_TXD_OL3T_S,
+- HNS3_OL3T_IPV4_CSUM);
+- else
+- hns3_set_field(*ol_type_vlan_len_msec,
+- HNS3_TXD_OL3T_S,
+- HNS3_OL3T_IPV4_NO_CSUM);
+-
+- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+- hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
+- HNS3_OL3T_IPV6);
+- }
++ /* compute OL2 header size, defined in 2 Bytes */
++ l2_len = l3.hdr - skb->data;
++ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1);
+
+- /* define tunnel type(OL4).*/
+- switch (l4_proto) {
+- case IPPROTO_UDP:
++ /* compute OL3 header size, defined in 4 Bytes */
++ l3_len = l4.hdr - l3.hdr;
++ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
++
++ il2_hdr = skb_inner_mac_header(skb);
++ /* compute OL4 header size, defined in 4 Bytes. */
++ l4_len = il2_hdr - l4.hdr;
++ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
++
++ /* define outer network header type */
++ if (skb->protocol == htons(ETH_P_IP)) {
++ if (skb_is_gso(skb))
+ hns3_set_field(*ol_type_vlan_len_msec,
+- HNS3_TXD_TUNTYPE_S,
+- HNS3_TUN_MAC_IN_UDP);
+- break;
+- case IPPROTO_GRE:
++ HNS3_TXD_OL3T_S,
++ HNS3_OL3T_IPV4_CSUM);
++ else
+ hns3_set_field(*ol_type_vlan_len_msec,
+- HNS3_TXD_TUNTYPE_S,
+- HNS3_TUN_NVGRE);
+- break;
+- default:
++ HNS3_TXD_OL3T_S,
++ HNS3_OL3T_IPV4_NO_CSUM);
++
++ } else if (skb->protocol == htons(ETH_P_IPV6)) {
++ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
++ HNS3_OL3T_IPV6);
++ }
++
++ if (ol4_proto == IPPROTO_UDP)
++ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
++ HNS3_TUN_MAC_IN_UDP);
++ else if (ol4_proto == IPPROTO_GRE)
++ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
++ HNS3_TUN_NVGRE);
++}
++
++static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
++ u8 il4_proto, u32 *type_cs_vlan_tso,
++ u32 *ol_type_vlan_len_msec)
++{
++ unsigned char *l2_hdr = l2_hdr = skb->data;
++ u32 l4_proto = ol4_proto;
++ union l4_hdr_info l4;
++ union l3_hdr_info l3;
++ u32 l2_len, l3_len;
++
++ l4.hdr = skb_transport_header(skb);
++ l3.hdr = skb_network_header(skb);
++
++ /* handle encapsulation skb */
++ if (skb->encapsulation) {
++ /* If this is a not UDP/GRE encapsulation skb */
++ if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
+ /* drop the skb tunnel packet if hardware don't support,
+ * because hardware can't calculate csum when TSO.
+ */
+@@ -884,7 +836,12 @@ static int hns3_set_l3l4_type_csum(struc
+ return 0;
+ }
+
++ hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
++
++ /* switch to inner header */
++ l2_hdr = skb_inner_mac_header(skb);
+ l3.hdr = skb_inner_network_header(skb);
++ l4.hdr = skb_inner_transport_header(skb);
+ l4_proto = il4_proto;
+ }
+
+@@ -902,11 +859,22 @@ static int hns3_set_l3l4_type_csum(struc
+ HNS3_L3T_IPV6);
+ }
+
++ /* compute inner(/normal) L2 header size, defined in 2 Bytes */
++ l2_len = l3.hdr - l2_hdr;
++ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
++
++ /* compute inner(/normal) L3 header size, defined in 4 Bytes */
++ l3_len = l4.hdr - l3.hdr;
++ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
++
++ /* compute inner(/normal) L4 header size, defined in 4 Bytes */
+ switch (l4_proto) {
+ case IPPROTO_TCP:
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
+ HNS3_L4T_TCP);
++ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
++ l4.tcp->doff);
+ break;
+ case IPPROTO_UDP:
+ if (hns3_tunnel_csum_bug(skb))
+@@ -915,11 +883,15 @@ static int hns3_set_l3l4_type_csum(struc
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
+ HNS3_L4T_UDP);
++ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
++ (sizeof(struct udphdr) >> 2));
+ break;
+ case IPPROTO_SCTP:
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
+ HNS3_L4T_SCTP);
++ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
++ (sizeof(struct sctphdr) >> 2));
+ break;
+ default:
+ /* drop the skb tunnel packet if hardware don't support,
+@@ -1050,12 +1022,10 @@ static int hns3_fill_desc(struct hns3_en
+ ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
+ if (unlikely(ret))
+ return ret;
+- hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
+- &type_cs_vlan_tso,
+- &ol_type_vlan_len_msec);
+- ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
+- &type_cs_vlan_tso,
+- &ol_type_vlan_len_msec);
++
++ ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
++ &type_cs_vlan_tso,
++ &ol_type_vlan_len_msec);
+ if (unlikely(ret))
+ return ret;
+
diff --git a/patches.drivers/net-hns3-deactive-the-reset-timer-when-reset-success.patch b/patches.drivers/net-hns3-deactive-the-reset-timer-when-reset-success.patch
new file mode 100644
index 0000000000..56dadd6525
--- /dev/null
+++ b/patches.drivers/net-hns3-deactive-the-reset-timer-when-reset-success.patch
@@ -0,0 +1,28 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Sat, 6 Apr 2019 15:43:29 +0800
+Subject: net: hns3: deactive the reset timer when reset successfully
+Patch-mainline: v5.2-rc1
+Git-commit: 056cbab332940b53d0841b052ed1cf4abf7307a5
+References: bsc#1104353 FATE#326415 bsc#1137201
+
+If the reset has been done successfully, the ongoing reset timer
+is unnecessary.
+
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -3022,6 +3022,7 @@ static void hclge_reset(struct hclge_dev
+ hdev->last_reset_time = jiffies;
+ hdev->reset_fail_cnt = 0;
+ ae_dev->reset_type = HNAE3_NONE_RESET;
++ del_timer(&hdev->reset_timer);
+
+ return;
+
diff --git a/patches.drivers/net-hns3-divide-shared-buffer-between-TC.patch b/patches.drivers/net-hns3-divide-shared-buffer-between-TC.patch
new file mode 100644
index 0000000000..5d854b5157
--- /dev/null
+++ b/patches.drivers/net-hns3-divide-shared-buffer-between-TC.patch
@@ -0,0 +1,81 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Sun, 14 Apr 2019 09:47:42 +0800
+Subject: net: hns3: divide shared buffer between TC
+Patch-mainline: v5.2-rc1
+Git-commit: 1a49f3c6146f33c42523c8e4f5a72b6f322d5357
+References: bsc#1104353 FATE#326415 bsc#1135047
+
+Currently hardware may have not enough buffer to receive packet
+when it has used more than two MPS(maximum packet size) of
+buffer, but there are still a lot of shared buffer left unused
+when TC num is small.
+
+This patch divides shared buffer to be used between TC when
+the port supports DCB, and adjusts the waterline and threshold
+according to user manual for the port that does not support
+DCB.
+
+This patch also change hclge_get_tc_num's return type to u32
+to avoid signed-unsigned mix with divide.
+
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 27 +++++++++++-----
+ 1 file changed, 20 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -1422,7 +1422,7 @@ static int hclge_tx_buffer_alloc(struct
+ return ret;
+ }
+
+-static int hclge_get_tc_num(struct hclge_dev *hdev)
++static u32 hclge_get_tc_num(struct hclge_dev *hdev)
+ {
+ int i, cnt = 0;
+
+@@ -1495,8 +1495,8 @@ static bool hclge_is_rx_buf_ok(struct h
+ struct hclge_pkt_buf_alloc *buf_alloc,
+ u32 rx_all)
+ {
+- u32 shared_buf_min, shared_buf_tc, shared_std;
+- int tc_num = hclge_get_tc_num(hdev);
++ u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
++ u32 tc_num = hclge_get_tc_num(hdev);
+ u32 shared_buf, aligned_mps;
+ u32 rx_priv;
+ int i;
+@@ -1526,13 +1526,26 @@ static bool hclge_is_rx_buf_ok(struct h
+ } else {
+ buf_alloc->s_buf.self.high = aligned_mps +
+ HCLGE_NON_DCB_ADDITIONAL_BUF;
+- buf_alloc->s_buf.self.low =
+- roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
++ buf_alloc->s_buf.self.low = aligned_mps;
++ }
++
++ if (hnae3_dev_dcb_supported(hdev)) {
++ if (tc_num)
++ hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num;
++ else
++ hi_thrd = shared_buf - hdev->dv_buf_size;
++
++ hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps);
++ hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
++ lo_thrd = hi_thrd - aligned_mps / 2;
++ } else {
++ hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
++ lo_thrd = aligned_mps;
+ }
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+- buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
+- buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
++ buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
++ buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
+ }
+
+ return true;
diff --git a/patches.drivers/net-hns3-do-not-initialize-MDIO-bus-when-PHY-is-inex.patch b/patches.drivers/net-hns3-do-not-initialize-MDIO-bus-when-PHY-is-inex.patch
new file mode 100644
index 0000000000..becff4b93d
--- /dev/null
+++ b/patches.drivers/net-hns3-do-not-initialize-MDIO-bus-when-PHY-is-inex.patch
@@ -0,0 +1,45 @@
+From: Jian Shen <shenjian15@huawei.com>
+Date: Sun, 14 Apr 2019 09:47:44 +0800
+Subject: net: hns3: do not initialize MDIO bus when PHY is inexistent
+Patch-mainline: v5.2-rc1
+Git-commit: 2d0075b4a7b795bb6e6c4e392d36c023b0d0e858
+References: bsc#1104353 FATE#326415 bsc#1135045
+
+For some cases, PHY may not be connected to MDIO bus, then
+the driver will initialize fail since MDIO bus initialization
+fails.
+
+This patch fixes it by skipping the MDIO bus initialization
+when PHY is inexistent.
+
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+@@ -129,12 +129,18 @@ static int hclge_mdio_read(struct mii_bu
+
+ int hclge_mac_mdio_config(struct hclge_dev *hdev)
+ {
++#define PHY_INEXISTENT 255
++
+ struct hclge_mac *mac = &hdev->hw.mac;
+ struct phy_device *phydev;
+ struct mii_bus *mdio_bus;
+ int ret;
+
+- if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
++ if (hdev->hw.mac.phy_addr == PHY_INEXISTENT) {
++ dev_info(&hdev->pdev->dev,
++ "no phy device is connected to mdio bus\n");
++ return 0;
++ } else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
+ dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n",
+ hdev->hw.mac.phy_addr);
+ return -EINVAL;
diff --git a/patches.drivers/net-hns3-do-not-request-reset-when-hardware-resettin.patch b/patches.drivers/net-hns3-do-not-request-reset-when-hardware-resettin.patch
new file mode 100644
index 0000000000..dd1f3baebd
--- /dev/null
+++ b/patches.drivers/net-hns3-do-not-request-reset-when-hardware-resettin.patch
@@ -0,0 +1,47 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Sat, 6 Apr 2019 15:43:31 +0800
+Subject: net: hns3: do not request reset when hardware resetting
+Patch-mainline: v5.2-rc1
+Git-commit: 4f765d3e5213da43a5f410ea62445f9591bfa4dc
+References: bsc#1104353 FATE#326415 bsc#1137201
+
+When hardware reset does not finish, the driver should not
+request a new reset, otherwise the ongoing hardware reset
+will get problem.
+
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -32,6 +32,7 @@
+ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
+ static int hclge_init_vlan_config(struct hclge_dev *hdev);
+ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
++static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
+ static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
+ u16 *allocated_size, bool is_alloc);
+
+@@ -2714,9 +2715,18 @@ int hclge_func_reset_cmd(struct hclge_de
+
+ static void hclge_do_reset(struct hclge_dev *hdev)
+ {
++ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ struct pci_dev *pdev = hdev->pdev;
+ u32 val;
+
++ if (hclge_get_hw_reset_stat(handle)) {
++ dev_info(&pdev->dev, "Hardware reset not finish\n");
++ dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
++ hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
++ hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
++ return;
++ }
++
+ switch (hdev->reset_type) {
+ case HNAE3_GLOBAL_RESET:
+ val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
diff --git a/patches.drivers/net-hns3-dump-more-information-when-tx-timeout-happe.patch b/patches.drivers/net-hns3-dump-more-information-when-tx-timeout-happe.patch
new file mode 100644
index 0000000000..3d449d076b
--- /dev/null
+++ b/patches.drivers/net-hns3-dump-more-information-when-tx-timeout-happe.patch
@@ -0,0 +1,166 @@
+From: Jian Shen <shenjian15@huawei.com>
+Date: Fri, 19 Apr 2019 11:05:42 +0800
+Subject: net: hns3: dump more information when tx timeout happens
+Patch-mainline: v5.2-rc1
+Git-commit: e511c97d0a26454dc2b4b478a7fd90802fca0b6a
+References: bsc#1104353 FATE#326415 bsc#1134990
+
+Currently we just print few information when tx timeout happens.
+In order to find out the cause of timeout, this patch prints more
+information about the packet statistics, tqp registers and
+napi state.
+
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hnae3.h | 3
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 63 ++++++++++++++--
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 4 -
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 11 ++
+ 4 files changed, 72 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+@@ -392,7 +392,8 @@ struct hnae3_ae_ops {
+ void (*update_stats)(struct hnae3_handle *handle,
+ struct net_device_stats *net_stats);
+ void (*get_stats)(struct hnae3_handle *handle, u64 *data);
+-
++ void (*get_mac_pause_stats)(struct hnae3_handle *handle, u64 *tx_cnt,
++ u64 *rx_cnt);
+ void (*get_strings)(struct hnae3_handle *handle,
+ u32 stringset, u8 *data);
+ int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -1628,9 +1628,15 @@ static int hns3_nic_change_mtu(struct ne
+ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
+ {
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
++ struct hnae3_handle *h = hns3_get_handle(ndev);
+ struct hns3_enet_ring *tx_ring = NULL;
++ struct napi_struct *napi;
+ int timeout_queue = 0;
+ int hw_head, hw_tail;
++ int fbd_num, fbd_oft;
++ int ebd_num, ebd_oft;
++ int bd_num, bd_err;
++ int ring_en, tc;
+ int i;
+
+ /* Find the stopped queue the same way the stack does */
+@@ -1658,20 +1664,63 @@ static bool hns3_get_tx_timeo_queue_info
+ priv->tx_timeout_count++;
+
+ tx_ring = priv->ring_data[timeout_queue].ring;
++ napi = &tx_ring->tqp_vector->napi;
++
++ netdev_info(ndev,
++ "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
++ priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
++ tx_ring->next_to_clean, napi->state);
++
++ netdev_info(ndev,
++ "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n",
++ tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
++ tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
++
++ netdev_info(ndev,
++ "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n",
++ tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
++ tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
++
++ /* When mac received many pause frames continuous, it's unable to send
++ * packets, which may cause tx timeout
++ */
++ if (h->ae_algo->ops->update_stats &&
++ h->ae_algo->ops->get_mac_pause_stats) {
++ u64 tx_pause_cnt, rx_pause_cnt;
++
++ h->ae_algo->ops->update_stats(h, &ndev->stats);
++ h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt,
++ &rx_pause_cnt);
++ netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
++ tx_pause_cnt, rx_pause_cnt);
++ }
+
+ hw_head = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_HEAD_REG);
+ hw_tail = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_TAIL_REG);
++ fbd_num = readl_relaxed(tx_ring->tqp->io_base +
++ HNS3_RING_TX_RING_FBDNUM_REG);
++ fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
++ HNS3_RING_TX_RING_OFFSET_REG);
++ ebd_num = readl_relaxed(tx_ring->tqp->io_base +
++ HNS3_RING_TX_RING_EBDNUM_REG);
++ ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
++ HNS3_RING_TX_RING_EBD_OFFSET_REG);
++ bd_num = readl_relaxed(tx_ring->tqp->io_base +
++ HNS3_RING_TX_RING_BD_NUM_REG);
++ bd_err = readl_relaxed(tx_ring->tqp->io_base +
++ HNS3_RING_TX_RING_BD_ERR_REG);
++ ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
++ tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
++
+ netdev_info(ndev,
+- "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
+- priv->tx_timeout_count,
+- timeout_queue,
+- tx_ring->next_to_use,
+- tx_ring->next_to_clean,
+- hw_head,
+- hw_tail,
++ "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
++ bd_num, hw_head, hw_tail, bd_err,
+ readl(tx_ring->tqp_vector->mask_addr));
++ netdev_info(ndev,
++ "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
++ ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
+
+ return true;
+ }
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+@@ -42,8 +42,10 @@ enum hns3_nic_state {
+ #define HNS3_RING_TX_RING_HEAD_REG 0x0005C
+ #define HNS3_RING_TX_RING_FBDNUM_REG 0x00060
+ #define HNS3_RING_TX_RING_OFFSET_REG 0x00064
++#define HNS3_RING_TX_RING_EBDNUM_REG 0x00068
+ #define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C
+-
++#define HNS3_RING_TX_RING_EBD_OFFSET_REG 0x00070
++#define HNS3_RING_TX_RING_BD_ERR_REG 0x00074
+ #define HNS3_RING_PREFETCH_EN_REG 0x0007C
+ #define HNS3_RING_CFG_VF_NUM_REG 0x00080
+ #define HNS3_RING_ASID_REG 0x0008C
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -699,6 +699,16 @@ static void hclge_get_stats(struct hnae3
+ p = hclge_tqps_get_stats(handle, p);
+ }
+
++static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
++ u64 *rx_cnt)
++{
++ struct hclge_vport *vport = hclge_get_vport(handle);
++ struct hclge_dev *hdev = vport->back;
++
++ *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
++ *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
++}
++
+ static int hclge_parse_func_status(struct hclge_dev *hdev,
+ struct hclge_func_status_cmd *status)
+ {
+@@ -8532,6 +8542,7 @@ static const struct hnae3_ae_ops hclge_o
+ .set_mtu = hclge_set_mtu,
+ .reset_queue = hclge_reset_tqp,
+ .get_stats = hclge_get_stats,
++ .get_mac_pause_stats = hclge_get_mac_pause_stat,
+ .update_stats = hclge_update_stats,
+ .get_strings = hclge_get_strings,
+ .get_sset_count = hclge_get_sset_count,
diff --git a/patches.drivers/net-hns3-extend-the-loopback-state-acquisition-time.patch b/patches.drivers/net-hns3-extend-the-loopback-state-acquisition-time.patch
new file mode 100644
index 0000000000..f3763e122c
--- /dev/null
+++ b/patches.drivers/net-hns3-extend-the-loopback-state-acquisition-time.patch
@@ -0,0 +1,35 @@
+From: liuzhongzhu <liuzhongzhu@huawei.com>
+Date: Thu, 25 Apr 2019 20:42:53 +0800
+Subject: net: hns3: extend the loopback state acquisition time
+Patch-mainline: v5.2-rc1
+Git-commit: fd85717d2800a352ce48799adcf7037b74df2854
+References: bsc#1104353 FATE#326415
+
+The test results show that the maximum time of hardware return
+to mac link state is 500MS.The software needs to set twice the
+maximum time of hardware return state (1000MS).
+
+If not modified, the loopback test returns probability failure.
+
+Signed-off-by: liuzhongzhu <liuzhongzhu@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -5337,8 +5337,8 @@ static int hclge_set_serdes_loopback(str
+ #define HCLGE_SERDES_RETRY_MS 10
+ #define HCLGE_SERDES_RETRY_NUM 100
+
+-#define HCLGE_MAC_LINK_STATUS_MS 20
+-#define HCLGE_MAC_LINK_STATUS_NUM 10
++#define HCLGE_MAC_LINK_STATUS_MS 10
++#define HCLGE_MAC_LINK_STATUS_NUM 100
+ #define HCLGE_MAC_LINK_STATUS_DOWN 0
+ #define HCLGE_MAC_LINK_STATUS_UP 1
+
diff --git a/patches.drivers/net-hns3-fix-VLAN-offload-handle-for-VLAN-inserted-b.patch b/patches.drivers/net-hns3-fix-VLAN-offload-handle-for-VLAN-inserted-b.patch
new file mode 100644
index 0000000000..881687a47f
--- /dev/null
+++ b/patches.drivers/net-hns3-fix-VLAN-offload-handle-for-VLAN-inserted-b.patch
@@ -0,0 +1,148 @@
+From: Jian Shen <shenjian15@huawei.com>
+Date: Sun, 14 Apr 2019 09:47:36 +0800
+Subject: net: hns3: fix VLAN offload handle for VLAN inserted by port
+Patch-mainline: v5.2-rc1
+Git-commit: 44e626f720c3176558df7840f2b52ba44cc0d414
+References: bsc#1104353 FATE#326415 bsc#1135053
+
+Currently, in TX direction, driver implements the TX VLAN offload
+by checking the VLAN header in skb, and filling it into TX descriptor.
+Usually it works well, but if enable inserting VLAN header based on
+port, it may conflict when out_tag field of TX descriptor is already
+used, and cause RAS error.
+
+In RX direction, hardware supports stripping max two VLAN headers.
+For vlan_tci in skb can only store one VLAN tag, when RX VLAN offload
+enabled, driver tells hardware to strip one VLAN header from RX
+packet; when RX VLAN offload disabled, driver tells hardware not to
+strip VLAN header from RX packet. Now if port based insert VLAN
+enabled, all RX packets will have the port based VLAN header. This
+header is useless for stack, driver needs to ask hardware to strip
+it. Unfortunately, hardware can't drop this VLAN header, and always
+fill it into RX descriptor, so driver has to identify and drop it.
+
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hnae3.h | 2
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 44 +++++++++++++++-
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 10 ++-
+ 3 files changed, 52 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+@@ -585,6 +585,8 @@ struct hnae3_handle {
+
+ u32 numa_node_mask; /* for multi-chip support */
+
++ enum hnae3_port_base_vlan_state port_base_vlan_state;
++
+ u8 netdev_flags;
+ struct dentry *hnae3_dbgfs;
+ };
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -963,6 +963,16 @@ static int hns3_fill_desc_vtags(struct s
+ {
+ #define HNS3_TX_VLAN_PRIO_SHIFT 13
+
++ struct hnae3_handle *handle = tx_ring->tqp->handle;
++
++ /* Since HW limitation, if port based insert VLAN enabled, only one VLAN
++ * header is allowed in skb, otherwise it will cause RAS error.
++ */
++ if (unlikely(skb_vlan_tagged_multi(skb) &&
++ handle->port_base_vlan_state ==
++ HNAE3_PORT_BASE_VLAN_ENABLE))
++ return -EINVAL;
++
+ if (skb->protocol == htons(ETH_P_8021Q) &&
+ !(tx_ring->tqp->handle->kinfo.netdev->features &
+ NETIF_F_HW_VLAN_CTAG_TX)) {
+@@ -984,8 +994,16 @@ static int hns3_fill_desc_vtags(struct s
+ * and use inner_vtag in one tag case.
+ */
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+- hns3_set_field(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
+- *out_vtag = vlan_tag;
++ if (handle->port_base_vlan_state ==
++ HNAE3_PORT_BASE_VLAN_DISABLE){
++ hns3_set_field(*out_vlan_flag,
++ HNS3_TXD_OVLAN_B, 1);
++ *out_vtag = vlan_tag;
++ } else {
++ hns3_set_field(*inner_vlan_flag,
++ HNS3_TXD_VLAN_B, 1);
++ *inner_vtag = vlan_tag;
++ }
+ } else {
+ hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
+ *inner_vtag = vlan_tag;
+@@ -2391,6 +2409,7 @@ static bool hns3_parse_vlan_tag(struct h
+ struct hns3_desc *desc, u32 l234info,
+ u16 *vlan_tag)
+ {
++ struct hnae3_handle *handle = ring->tqp->handle;
+ struct pci_dev *pdev = ring->tqp->handle->pdev;
+
+ if (pdev->revision == 0x20) {
+@@ -2403,15 +2422,36 @@ static bool hns3_parse_vlan_tag(struct h
+
+ #define HNS3_STRP_OUTER_VLAN 0x1
+ #define HNS3_STRP_INNER_VLAN 0x2
++#define HNS3_STRP_BOTH 0x3
+
++ /* Hardware always insert VLAN tag into RX descriptor when
++ * remove the tag from packet, driver needs to determine
++ * reporting which tag to stack.
++ */
+ switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
+ HNS3_RXD_STRP_TAGP_S)) {
+ case HNS3_STRP_OUTER_VLAN:
++ if (handle->port_base_vlan_state !=
++ HNAE3_PORT_BASE_VLAN_DISABLE)
++ return false;
++
+ *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ return true;
+ case HNS3_STRP_INNER_VLAN:
++ if (handle->port_base_vlan_state !=
++ HNAE3_PORT_BASE_VLAN_DISABLE)
++ return false;
++
+ *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+ return true;
++ case HNS3_STRP_BOTH:
++ if (handle->port_base_vlan_state ==
++ HNAE3_PORT_BASE_VLAN_DISABLE)
++ *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
++ else
++ *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
++
++ return true;
+ default:
+ return false;
+ }
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -6915,10 +6915,16 @@ int hclge_en_hw_strip_rxvtag(struct hnae
+ {
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+- vport->rxvlan_cfg.strip_tag1_en = false;
+- vport->rxvlan_cfg.strip_tag2_en = enable;
++ if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
++ vport->rxvlan_cfg.strip_tag1_en = false;
++ vport->rxvlan_cfg.strip_tag2_en = enable;
++ } else {
++ vport->rxvlan_cfg.strip_tag1_en = enable;
++ vport->rxvlan_cfg.strip_tag2_en = true;
++ }
+ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
++ vport->rxvlan_cfg.rx_vlan_offload_en = enable;
+
+ return hclge_set_vlan_rx_offload_cfg(vport);
+ }
diff --git a/patches.drivers/net-hns3-fix-data-race-between-ring-next_to_clean.patch b/patches.drivers/net-hns3-fix-data-race-between-ring-next_to_clean.patch
new file mode 100644
index 0000000000..41977bead0
--- /dev/null
+++ b/patches.drivers/net-hns3-fix-data-race-between-ring-next_to_clean.patch
@@ -0,0 +1,72 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Thu, 25 Apr 2019 20:42:45 +0800
+Subject: net: hns3: fix data race between ring->next_to_clean
+Patch-mainline: v5.2-rc1
+Git-commit: 26cda2f1613878d9bde11325559f4fca92fff395
+References: bsc#1104353 FATE#326415 bsc#1134975 bsc#1134945
+
+hns3_clean_tx_ring calls hns3_nic_reclaim_one_desc to clean
+buffers and set ring->next_to_clean, then hns3_nic_net_xmit
+reuses the cleaned buffers. But there are no memory barriers
+when buffers gets recycled, so the recycled buffers can be
+corrupted.
+
+This patch uses smp_store_release to update ring->next_to_clean
+and smp_load_acquire to read ring->next_to_clean to properly
+hand off buffers from hns3_clean_tx_ring to hns3_nic_net_xmit.
+
+Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC")
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 14 +++++++++++---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 7 +++++--
+ 2 files changed, 16 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -2215,14 +2215,22 @@ static void hns3_reuse_buffer(struct hns
+ static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
+ int *pkts)
+ {
+- struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
++ int ntc = ring->next_to_clean;
++ struct hns3_desc_cb *desc_cb;
+
++ desc_cb = &ring->desc_cb[ntc];
+ (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
+ (*bytes) += desc_cb->length;
+ /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
+- hns3_free_buffer_detach(ring, ring->next_to_clean);
++ hns3_free_buffer_detach(ring, ntc);
+
+- ring_ptr_move_fw(ring, next_to_clean);
++ if (++ntc == ring->desc_num)
++ ntc = 0;
++
++ /* This smp_store_release() pairs with smp_load_acquire() in
++ * ring_space called by hns3_nic_net_xmit.
++ */
++ smp_store_release(&ring->next_to_clean, ntc);
+ }
+
+ static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+@@ -581,8 +581,11 @@ union l4_hdr_info {
+
+ static inline int ring_space(struct hns3_enet_ring *ring)
+ {
+- int begin = ring->next_to_clean;
+- int end = ring->next_to_use;
++ /* This smp_load_acquire() pairs with smp_store_release() in
++ * hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring.
++ */
++ int begin = smp_load_acquire(&ring->next_to_clean);
++ int end = READ_ONCE(ring->next_to_use);
+
+ return ((end >= begin) ? (ring->desc_num - end + begin) :
+ (begin - end)) - 1;
diff --git a/patches.drivers/net-hns3-fix-error-handling-for-desc-filling.patch b/patches.drivers/net-hns3-fix-error-handling-for-desc-filling.patch
new file mode 100644
index 0000000000..28a7e7fc28
--- /dev/null
+++ b/patches.drivers/net-hns3-fix-error-handling-for-desc-filling.patch
@@ -0,0 +1,94 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Mon, 6 May 2019 10:48:48 +0800
+Subject: net: hns3: fix error handling for desc filling
+Patch-mainline: v5.2-rc1
+Git-commit: aa9d22dd456eb255db2a4a5b214ec2e243dda4c8
+References: bsc#1104353 FATE#326415
+
+When desc filling fails in hns3_nic_net_xmit, it will call
+hns3_clear_desc to unmap the dma mapping. But currently the
+ring->next_to_use points to the desc where the desc filling
+or dma mapping return error, which means the desc that
+ring->next_to_use points to has not done the dma mapping,
+the desc that need unmapping is before the ring->next_to_use.
+
+This patch fixes it by calling ring_ptr_move_bw(next_to_use)
+before doing unmapping operation, and set desc_cb->dma to
+zero to avoid freeing it again when unloading.
+
+Also, when filling skb head or frag fails, both need to unmap
+all the way back to next_to_use_head, so remove one desc filling
+error handling.
+
+Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC")
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 18 +++++++-----------
+ 1 file changed, 7 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -1224,6 +1224,9 @@ static void hns3_clear_desc(struct hns3_
+ if (ring->next_to_use == next_to_use_orig)
+ break;
+
++ /* rollback one */
++ ring_ptr_move_bw(ring, next_to_use);
++
+ /* unmap the descriptor dma address */
+ if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
+ dma_unmap_single(dev,
+@@ -1237,9 +1240,7 @@ static void hns3_clear_desc(struct hns3_
+ DMA_TO_DEVICE);
+
+ ring->desc_cb[ring->next_to_use].length = 0;
+-
+- /* rollback one */
+- ring_ptr_move_bw(ring, next_to_use);
++ ring->desc_cb[ring->next_to_use].dma = 0;
+ }
+ }
+
+@@ -1252,7 +1253,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_
+ struct netdev_queue *dev_queue;
+ struct skb_frag_struct *frag;
+ int next_to_use_head;
+- int next_to_use_frag;
+ int buf_num;
+ int seg_num;
+ int size;
+@@ -1291,9 +1291,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_
+ ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
+ DESC_TYPE_SKB);
+ if (unlikely(ret))
+- goto head_fill_err;
++ goto fill_err;
+
+- next_to_use_frag = ring->next_to_use;
+ /* Fill the fragments */
+ for (i = 1; i < seg_num; i++) {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+@@ -1304,7 +1303,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_
+ DESC_TYPE_PAGE);
+
+ if (unlikely(ret))
+- goto frag_fill_err;
++ goto fill_err;
+ }
+
+ /* Complete translate all packets */
+@@ -1317,10 +1316,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_
+
+ return NETDEV_TX_OK;
+
+-frag_fill_err:
+- hns3_clear_desc(ring, next_to_use_frag);
+-
+-head_fill_err:
++fill_err:
+ hns3_clear_desc(ring, next_to_use_head);
+
+ out_err_tx_ok:
diff --git a/patches.drivers/net-hns3-fix-for-HNS3_RXD_GRO_SIZE_M-macro.patch b/patches.drivers/net-hns3-fix-for-HNS3_RXD_GRO_SIZE_M-macro.patch
new file mode 100644
index 0000000000..3ac384017d
--- /dev/null
+++ b/patches.drivers/net-hns3-fix-for-HNS3_RXD_GRO_SIZE_M-macro.patch
@@ -0,0 +1,32 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Tue, 28 May 2019 17:02:52 +0800
+Subject: net: hns3: fix for HNS3_RXD_GRO_SIZE_M macro
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+Git-commit: eff858c178fcc513e620bb803b4e3bfb9727856c
+References: bsc#1104353 FATE#326415 bsc#1137201
+
+According to hardware user menual, the GRO_SIZE is 14 bits width,
+the HNS3_RXD_GRO_SIZE_M is 10 bits width now, which may cause
+hardware GRO received packet error problem.
+
+Fixes: a6d53b97a2e7 ("net: hns3: Adds GRO params to SKB for the stack")
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+@@ -145,7 +145,7 @@ enum hns3_nic_state {
+ #define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
+ #define HNS3_RXD_LKBK_B 15
+ #define HNS3_RXD_GRO_SIZE_S 16
+-#define HNS3_RXD_GRO_SIZE_M (0x3ff << HNS3_RXD_GRO_SIZE_S)
++#define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S)
+
+ #define HNS3_TXD_L3T_S 0
+ #define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
diff --git a/patches.drivers/net-hns3-fix-for-TX-clean-num-when-cleaning-TX-BD.patch b/patches.drivers/net-hns3-fix-for-TX-clean-num-when-cleaning-TX-BD.patch
new file mode 100644
index 0000000000..d3324b31c6
--- /dev/null
+++ b/patches.drivers/net-hns3-fix-for-TX-clean-num-when-cleaning-TX-BD.patch
@@ -0,0 +1,49 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Thu, 25 Apr 2019 20:42:46 +0800
+Subject: net: hns3: fix for TX clean num when cleaning TX BD
+Patch-mainline: v5.2-rc1
+Git-commit: 63380a1ae4ced8aef67659ff9547c69ef8b9613a
+References: bsc#1104353 FATE#326415
+
+hns3_desc_unused() returns how many BD have been cleaned, but new
+buffer has not been attached to them. The register of
+HNS3_RING_RX_RING_FBDNUM_REG returns how many BD need allocating new
+buffer to or need to cleaned. So the remaining BD need to be clean
+is HNS3_RING_RX_RING_FBDNUM_REG - hns3_desc_unused().
+
+Also, new buffer can not attach to the pending BD when the last BD is
+not handled, because memcpy has not been done on the first pending BD.
+
+This patch fixes by subtracting the pending BD num from unused_count
+after 'HNS3_RING_RX_RING_FBDNUM_REG - unused_count' is used to calculate
+the BD bum need to be clean.
+
+Fixes: e55970950556 ("net: hns3: Add handling of GRO Pkts not fully RX'ed in NAPI poll")
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -2875,7 +2875,7 @@ int hns3_clean_rx_ring(
+ {
+ #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
+ int recv_pkts, recv_bds, clean_count, err;
+- int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
++ int unused_count = hns3_desc_unused(ring);
+ struct sk_buff *skb = ring->skb;
+ int num;
+
+@@ -2884,6 +2884,7 @@ int hns3_clean_rx_ring(
+
+ recv_pkts = 0, recv_bds = 0, clean_count = 0;
+ num -= unused_count;
++ unused_count -= ring->pending_buf;
+
+ while (recv_pkts < budget && recv_bds < num) {
+ /* Reuse or realloc buffers */
diff --git a/patches.drivers/net-hns3-fix-for-tunnel-type-handling-in-hns3_rx_che.patch b/patches.drivers/net-hns3-fix-for-tunnel-type-handling-in-hns3_rx_che.patch
new file mode 100644
index 0000000000..b5d7048bff
--- /dev/null
+++ b/patches.drivers/net-hns3-fix-for-tunnel-type-handling-in-hns3_rx_che.patch
@@ -0,0 +1,89 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Mon, 6 May 2019 10:48:45 +0800
+Subject: net: hns3: fix for tunnel type handling in hns3_rx_checksum
+Patch-mainline: v5.2-rc1
+Git-commit: 39c38824c2a0b16bdc6450727847fd5c3da7e8b0
+References: bsc#1104353 FATE#326415 bsc#1134946
+
+According to hardware user manual, the tunnel packet type is
+available in the rx.ol_info field of struct hns3_desc. Currently
+the tunnel packet type is decided by the rx.l234_info, which may
+cause RX checksum handling error.
+
+This patch fixes it by using the correct field in struct hns3_desc
+to decide the tunnel packet type.
+
+Fixes: 76ad4f0ee747 ("net: hns3: Add support of HNS3 Ethernet Driver for hip08 SoC")
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -2464,7 +2464,7 @@ static int hns3_gro_complete(struct sk_b
+ }
+
+ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
+- u32 l234info, u32 bd_base_info)
++ u32 l234info, u32 bd_base_info, u32 ol_info)
+ {
+ struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ int l3_type, l4_type;
+@@ -2491,7 +2491,7 @@ static void hns3_rx_checksum(struct hns3
+ return;
+ }
+
+- ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M,
++ ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M,
+ HNS3_RXD_OL4ID_S);
+ switch (ol4_type) {
+ case HNS3_OL4_TYPE_MAC_IN_UDP:
+@@ -2695,7 +2695,7 @@ static int hns3_add_frag(struct hns3_ene
+
+ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, u32 l234info,
+- u32 bd_base_info)
++ u32 bd_base_info, u32 ol_info)
+ {
+ u16 gro_count;
+ u32 l3_type;
+@@ -2704,7 +2704,7 @@ static int hns3_set_gro_and_checksum(str
+ HNS3_RXD_GRO_COUNT_S);
+ /* if there is no HW GRO, do not set gro params */
+ if (!gro_count) {
+- hns3_rx_checksum(ring, skb, l234info, bd_base_info);
++ hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
+ return 0;
+ }
+
+@@ -2744,7 +2744,7 @@ static int hns3_handle_bdinfo(struct hns
+ {
+ struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ enum hns3_pkt_l2t_type l2_frame_type;
+- u32 bd_base_info, l234info;
++ u32 bd_base_info, l234info, ol_info;
+ struct hns3_desc *desc;
+ unsigned int len;
+ int pre_ntc, ret;
+@@ -2758,6 +2758,7 @@ static int hns3_handle_bdinfo(struct hns
+ desc = &ring->desc[pre_ntc];
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ l234info = le32_to_cpu(desc->rx.l234_info);
++ ol_info = le32_to_cpu(desc->rx.ol_info);
+
+ /* Based on hw strategy, the tag offloaded will be stored at
+ * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+@@ -2797,7 +2798,8 @@ static int hns3_handle_bdinfo(struct hns
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ /* This is needed in order to enable forwarding support */
+- ret = hns3_set_gro_and_checksum(ring, skb, l234info, bd_base_info);
++ ret = hns3_set_gro_and_checksum(ring, skb, l234info,
++ bd_base_info, ol_info);
+ if (unlikely(ret)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.rx_err_cnt++;
diff --git a/patches.drivers/net-hns3-fix-for-vport-bw_limit-overflow-problem.patch b/patches.drivers/net-hns3-fix-for-vport-bw_limit-overflow-problem.patch
new file mode 100644
index 0000000000..685a2c6314
--- /dev/null
+++ b/patches.drivers/net-hns3-fix-for-vport-bw_limit-overflow-problem.patch
@@ -0,0 +1,34 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Mon, 15 Apr 2019 21:48:39 +0800
+Subject: net: hns3: fix for vport->bw_limit overflow problem
+Patch-mainline: v5.2-rc1
+Git-commit: 2566f10676ba996b745e138f54f3e2f974311692
+References: bsc#1104353 FATE#326415 bsc#1134998
+
+When setting vport->bw_limit to hdev->tm_info.pg_info[0].bw_limit
+in hclge_tm_vport_tc_info_update, vport->bw_limit can be as big as
+HCLGE_ETHER_MAX_RATE (100000), which can not fit into u16 (65535).
+
+So this patch fixes it by using u32 for vport->bw_limit.
+
+Fixes: 848440544b41 ("net: hns3: Add support of TX Scheduler & Shaper to HNS3 driver")
+Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+@@ -854,7 +854,7 @@ struct hclge_vport {
+ u16 alloc_rss_size;
+
+ u16 qs_offset;
+- u16 bw_limit; /* VSI BW Limit (0 = disabled) */
++ u32 bw_limit; /* VSI BW Limit (0 = disabled) */
+ u8 dwrr;
+
+ struct hclge_port_base_vlan_config port_base_vlan_cfg;
diff --git a/patches.drivers/net-hns3-fix-keep_alive_timer-not-stop-problem.patch b/patches.drivers/net-hns3-fix-keep_alive_timer-not-stop-problem.patch
new file mode 100644
index 0000000000..5c00df9957
--- /dev/null
+++ b/patches.drivers/net-hns3-fix-keep_alive_timer-not-stop-problem.patch
@@ -0,0 +1,51 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Sat, 6 Apr 2019 15:43:36 +0800
+Subject: net: hns3: fix keep_alive_timer not stop problem
+Patch-mainline: v5.2-rc1
+Git-commit: e233516e6a92baeec20aa40fa5b63be6b94f1627
+References: bsc#1104353 FATE#326415 bsc#1135055
+
+When hclgevf_client_start() fails or VF driver unloaded, there is
+nobody to disable keep_alive_timer.
+
+So this patch fixes them.
+
+Fixes: a6d818e31d08 ("net: hns3: Add vport alive state checking support")
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -2031,9 +2031,15 @@ static int hclgevf_set_alive(struct hnae
+ static int hclgevf_client_start(struct hnae3_handle *handle)
+ {
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
++ int ret;
++
++ ret = hclgevf_set_alive(handle, true);
++ if (ret)
++ return ret;
+
+ mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
+- return hclgevf_set_alive(handle, true);
++
++ return 0;
+ }
+
+ static void hclgevf_client_stop(struct hnae3_handle *handle)
+@@ -2076,6 +2082,10 @@ static void hclgevf_state_uninit(struct
+ {
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
++ if (hdev->keep_alive_timer.function)
++ del_timer_sync(&hdev->keep_alive_timer);
++ if (hdev->keep_alive_task.func)
++ cancel_work_sync(&hdev->keep_alive_task);
+ if (hdev->service_timer.data)
+ del_timer_sync(&hdev->service_timer);
+ if (hdev->service_task.func)
diff --git a/patches.drivers/net-hns3-fix-loop-condition-of-hns3_get_tx_timeo_que.patch b/patches.drivers/net-hns3-fix-loop-condition-of-hns3_get_tx_timeo_que.patch
new file mode 100644
index 0000000000..6df42298d4
--- /dev/null
+++ b/patches.drivers/net-hns3-fix-loop-condition-of-hns3_get_tx_timeo_que.patch
@@ -0,0 +1,32 @@
+From: Jian Shen <shenjian15@huawei.com>
+Date: Fri, 19 Apr 2019 11:05:41 +0800
+Subject: net: hns3: fix loop condition of hns3_get_tx_timeo_queue_info()
+Patch-mainline: v5.2-rc1
+Git-commit: fa6c4084b98b82c98cada0f0d5c9f8577579f962
+References: bsc#1104353 FATE#326415 bsc#1134990
+
+In function hns3_get_tx_timeo_queue_info(), it should use
+netdev->num_tx_queues, instead of netdve->real_num_tx_queues
+as the loop limitation.
+
+Fixes: 424eb834a9be ("net: hns3: Unified HNS3 {VF|PF} Ethernet Driver for hip08 SoC")
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -1634,7 +1634,7 @@ static bool hns3_get_tx_timeo_queue_info
+ int i;
+
+ /* Find the stopped queue the same way the stack does */
+- for (i = 0; i < ndev->real_num_tx_queues; i++) {
++ for (i = 0; i < ndev->num_tx_queues; i++) {
+ struct netdev_queue *q;
+ unsigned long trans_start;
+
diff --git a/patches.drivers/net-hns3-fix-pause-configure-fail-problem.patch b/patches.drivers/net-hns3-fix-pause-configure-fail-problem.patch
new file mode 100644
index 0000000000..dc3c1f08ab
--- /dev/null
+++ b/patches.drivers/net-hns3-fix-pause-configure-fail-problem.patch
@@ -0,0 +1,36 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Thu, 25 Apr 2019 20:42:52 +0800
+Subject: net: hns3: fix pause configure fail problem
+Patch-mainline: v5.2-rc1
+Git-commit: fba2efdae8b4f998f66a2ff4c9f0575e1c4bbc40
+References: bsc#1104353 FATE#326415 bsc#1134951 bsc#1134951
+
+When configure pause, current implementation returns directly
+after setup PFC without setup BP, which is not sufficient.
+
+So this patch fixes it, only return while setting PFC failed.
+
+Fixes: 44e59e375bf7 ("net: hns3: do not return GE PFC setting err when initializing")
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+@@ -1331,8 +1331,11 @@ int hclge_pause_setup_hw(struct hclge_de
+ ret = hclge_pfc_setup_hw(hdev);
+ if (init && ret == -EOPNOTSUPP)
+ dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
+- else
++ else if (ret) {
++ dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
++ ret);
+ return ret;
++ }
+
+ return hclge_tm_bp_setup(hdev);
+ }
diff --git a/patches.drivers/net-hns3-fix-set-port-based-VLAN-for-PF.patch b/patches.drivers/net-hns3-fix-set-port-based-VLAN-for-PF.patch
new file mode 100644
index 0000000000..72830411b2
--- /dev/null
+++ b/patches.drivers/net-hns3-fix-set-port-based-VLAN-for-PF.patch
@@ -0,0 +1,321 @@
+From: Jian Shen <shenjian15@huawei.com>
+Date: Sun, 14 Apr 2019 09:47:37 +0800
+Subject: net: hns3: fix set port based VLAN for PF
+Patch-mainline: v5.2-rc1
+Git-commit: 21e043cd812492e0a02fbbd956fbe49e19daeb45
+References: bsc#1104353 FATE#326415 bsc#1135053
+
+In original codes, ndo_set_vf_vlan() in hns3 driver was implemented
+wrong. It adds or removes VLAN into VLAN filter for VF, but VF is
+unaware of it.
+
+Indeed, ndo_set_vf_vlan() is expected to enable or disable port based
+VLAN (hardware inserts a specified VLAN tag to all TX packets for a
+specified VF) . When enable port based VLAN, we use port based VLAN id
+as VLAN filter entry. When disable port based VLAN, we use VLAN id of
+VLAN device.
+
+This patch fixes it for PF, enable/disable port based VLAN when calls
+ndo_set_vf_vlan().
+
+Fixes: 46a3df9f9718 ("net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support")
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 229 ++++++++++++++--
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 5
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 3
+ 3 files changed, 203 insertions(+), 34 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -6585,30 +6585,6 @@ static int hclge_set_vlan_filter_hw(stru
+ return ret;
+ }
+
+-int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+- u16 vlan_id, bool is_kill)
+-{
+- struct hclge_vport *vport = hclge_get_vport(handle);
+- struct hclge_dev *hdev = vport->back;
+-
+- return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id,
+- 0, is_kill);
+-}
+-
+-static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
+- u16 vlan, u8 qos, __be16 proto)
+-{
+- struct hclge_vport *vport = hclge_get_vport(handle);
+- struct hclge_dev *hdev = vport->back;
+-
+- if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7))
+- return -EINVAL;
+- if (proto != htons(ETH_P_8021Q))
+- return -EPROTONOSUPPORT;
+-
+- return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false);
+-}
+-
+ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
+ {
+ struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
+@@ -6833,7 +6809,8 @@ static int hclge_init_vlan_config(struct
+ return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
+ }
+
+-void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id)
++static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
++ bool writen_to_tbl)
+ {
+ struct hclge_vport_vlan_cfg *vlan;
+
+@@ -6845,14 +6822,38 @@ void hclge_add_vport_vlan_table(struct h
+ if (!vlan)
+ return;
+
+- vlan->hd_tbl_status = true;
++ vlan->hd_tbl_status = writen_to_tbl;
+ vlan->vlan_id = vlan_id;
+
+ list_add_tail(&vlan->node, &vport->vlan_list);
+ }
+
+-void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+- bool is_write_tbl)
++static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
++{
++ struct hclge_vport_vlan_cfg *vlan, *tmp;
++ struct hclge_dev *hdev = vport->back;
++ int ret;
++
++ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
++ if (!vlan->hd_tbl_status) {
++ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
++ vport->vport_id,
++ vlan->vlan_id, 0, false);
++ if (ret) {
++ dev_err(&hdev->pdev->dev,
++ "restore vport vlan list failed, ret=%d\n",
++ ret);
++ return ret;
++ }
++ }
++ vlan->hd_tbl_status = true;
++ }
++
++ return 0;
++}
++
++static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
++ bool is_write_tbl)
+ {
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+@@ -6929,6 +6930,178 @@ int hclge_en_hw_strip_rxvtag(struct hnae
+ return hclge_set_vlan_rx_offload_cfg(vport);
+ }
+
++static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
++ u16 port_base_vlan_state,
++ struct hclge_vlan_info *new_info,
++ struct hclge_vlan_info *old_info)
++{
++ struct hclge_dev *hdev = vport->back;
++ int ret;
++
++ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
++ hclge_rm_vport_all_vlan_table(vport, false);
++ return hclge_set_vlan_filter_hw(hdev,
++ htons(new_info->vlan_proto),
++ vport->vport_id,
++ new_info->vlan_tag,
++ new_info->qos, false);
++ }
++
++ ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
++ vport->vport_id, old_info->vlan_tag,
++ old_info->qos, true);
++ if (ret)
++ return ret;
++
++ return hclge_add_vport_all_vlan_table(vport);
++}
++
++int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
++ struct hclge_vlan_info *vlan_info)
++{
++ struct hnae3_handle *nic = &vport->nic;
++ struct hclge_vlan_info *old_vlan_info;
++ struct hclge_dev *hdev = vport->back;
++ int ret;
++
++ old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
++
++ ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
++ if (ret)
++ return ret;
++
++ if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
++ /* add new VLAN tag */
++ ret = hclge_set_vlan_filter_hw(hdev, vlan_info->vlan_proto,
++ vport->vport_id,
++ vlan_info->vlan_tag,
++ vlan_info->qos, false);
++ if (ret)
++ return ret;
++
++ /* remove old VLAN tag */
++ ret = hclge_set_vlan_filter_hw(hdev, old_vlan_info->vlan_proto,
++ vport->vport_id,
++ old_vlan_info->vlan_tag,
++ old_vlan_info->qos, true);
++ if (ret)
++ return ret;
++
++ goto update;
++ }
++
++ ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
++ old_vlan_info);
++ if (ret)
++ return ret;
++
++ /* update state only when disable/enable port based VLAN */
++ vport->port_base_vlan_cfg.state = state;
++ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
++ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
++ else
++ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
++
++update:
++ vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
++ vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
++ vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
++
++ return 0;
++}
++
++static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
++ enum hnae3_port_base_vlan_state state,
++ u16 vlan)
++{
++ if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
++ if (!vlan)
++ return HNAE3_PORT_BASE_VLAN_NOCHANGE;
++ else
++ return HNAE3_PORT_BASE_VLAN_ENABLE;
++ } else {
++ if (!vlan)
++ return HNAE3_PORT_BASE_VLAN_DISABLE;
++ else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
++ return HNAE3_PORT_BASE_VLAN_NOCHANGE;
++ else
++ return HNAE3_PORT_BASE_VLAN_MODIFY;
++ }
++}
++
++static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
++ u16 vlan, u8 qos, __be16 proto)
++{
++ struct hclge_vport *vport = hclge_get_vport(handle);
++ struct hclge_dev *hdev = vport->back;
++ struct hclge_vlan_info vlan_info;
++ u16 state;
++ int ret;
++
++ if (hdev->pdev->revision == 0x20)
++ return -EOPNOTSUPP;
++
++ /* qos is a 3 bits value, so can not be bigger than 7 */
++ if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7)
++ return -EINVAL;
++ if (proto != htons(ETH_P_8021Q))
++ return -EPROTONOSUPPORT;
++
++ vport = &hdev->vport[vfid];
++ state = hclge_get_port_base_vlan_state(vport,
++ vport->port_base_vlan_cfg.state,
++ vlan);
++ if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
++ return 0;
++
++ vlan_info.vlan_tag = vlan;
++ vlan_info.qos = qos;
++ vlan_info.vlan_proto = ntohs(proto);
++
++ /* update port based VLAN for PF */
++ if (!vfid) {
++ hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
++ ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
++ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
++
++ return ret;
++ }
++
++ return -EOPNOTSUPP;
++}
++
++int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
++ u16 vlan_id, bool is_kill)
++{
++ struct hclge_vport *vport = hclge_get_vport(handle);
++ struct hclge_dev *hdev = vport->back;
++ bool writen_to_tbl = false;
++ int ret = 0;
++
++ /* when port based VLAN enabled, we use port based VLAN as the VLAN
++ * filter entry. In this case, we don't update VLAN filter table
++ * when user add new VLAN or remove exist VLAN, just update the vport
++ * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter
++ * table until port based VLAN disabled
++ */
++ if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
++ ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
++ vlan_id, 0, is_kill);
++ writen_to_tbl = true;
++ }
++
++ if (ret)
++ return ret;
++
++ if (is_kill)
++ hclge_rm_vport_vlan_table(vport, vlan_id, false);
++ else
++ hclge_add_vport_vlan_table(vport, vlan_id,
++ writen_to_tbl);
++
++ return 0;
++}
++
+ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
+ {
+ struct hclge_config_max_frm_size_cmd *req;
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+@@ -937,9 +937,8 @@ void hclge_rm_vport_mac_table(struct hcl
+ void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
+ enum HCLGE_MAC_ADDR_TYPE mac_type);
+ void hclge_uninit_vport_mac_table(struct hclge_dev *hdev);
+-void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id);
+-void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+- bool is_write_tbl);
+ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
+ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
++int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
++ struct hclge_vlan_info *vlan_info);
+ #endif
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+@@ -305,9 +305,6 @@ static int hclge_set_vf_vlan_cfg(struct
+ memcpy(&proto, &mbx_req->msg[5], sizeof(proto));
+ status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
+ vlan, is_kill);
+- if (!status)
+- is_kill ? hclge_rm_vport_vlan_table(vport, vlan, false)
+- : hclge_add_vport_vlan_table(vport, vlan);
+ } else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) {
+ struct hnae3_handle *handle = &vport->nic;
+ bool en = mbx_req->msg[2] ? true : false;
diff --git a/patches.drivers/net-hns3-fix-set-port-based-VLAN-issue-for-VF.patch b/patches.drivers/net-hns3-fix-set-port-based-VLAN-issue-for-VF.patch
new file mode 100644
index 0000000000..5168bb3745
--- /dev/null
+++ b/patches.drivers/net-hns3-fix-set-port-based-VLAN-issue-for-VF.patch
@@ -0,0 +1,263 @@
+From: Jian Shen <shenjian15@huawei.com>
+Date: Sun, 14 Apr 2019 09:47:38 +0800
+Subject: net: hns3: fix set port based VLAN issue for VF
+Patch-mainline: v5.2-rc1
+Git-commit: 92f11ea177cd77ebc790916eb9d3331e1d676b62
+References: bsc#1104353 FATE#326415 bsc#1135053
+
+In original codes, ndo_set_vf_vlan() in hns3 driver was implemented
+wrong. It adds or removes VLAN into VLAN filter for VF, but VF is
+unaware of it.
+
+This patch fixes it. When VF loads up, it firstly queries the port
+based VLAN state from PF. When user change port based VLAN state
+from PF, PF firstly checks whether the VF is alive. If the VF is
+alive, then PF notifies the VF the modification; otherwise PF
+configure the port based VLAN state directly.
+
+Fixes: 46a3df9f9718 ("net: hns3: Add HNS3 Acceleration Engine & Compatibility Layer Support")
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h | 3
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 11 ++-
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 3
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 39 +++++++++-
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 51 ++++++++++++++
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h | 2
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c | 11 ++-
+ 7 files changed, 111 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+@@ -43,6 +43,7 @@ enum HCLGE_MBX_OPCODE {
+ HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
+ HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */
+ HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
++ HLCGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
+ HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
+
+ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
+@@ -63,6 +64,8 @@ enum hclge_mbx_vlan_cfg_subcode {
+ HCLGE_MBX_VLAN_FILTER = 0, /* set vlan filter */
+ HCLGE_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */
+ HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */
++ HCLGE_MBX_PORT_BASE_VLAN_CFG, /* set port based vlan configuration */
++ HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */
+ };
+
+ #define HCLGE_MBX_MAX_MSG_SIZE 16
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -7067,7 +7067,16 @@ static int hclge_set_vf_vlan_filter(stru
+ return ret;
+ }
+
+- return -EOPNOTSUPP;
++ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
++ return hclge_update_port_base_vlan_cfg(vport, state,
++ &vlan_info);
++ } else {
++ ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
++ (u8)vfid, state,
++ vlan, qos,
++ ntohs(proto));
++ return ret;
++ }
+ }
+
+ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+@@ -941,4 +941,7 @@ void hclge_rm_vport_all_vlan_table(struc
+ void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
+ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
+ struct hclge_vlan_info *vlan_info);
++int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
++ u16 state, u16 vlan_tag, u16 qos,
++ u16 vlan_proto);
+ #endif
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+@@ -289,9 +289,25 @@ static int hclge_set_vf_mc_mac_addr(stru
+ return 0;
+ }
+
++int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
++ u16 state, u16 vlan_tag, u16 qos,
++ u16 vlan_proto)
++{
++#define MSG_DATA_SIZE 8
++
++ u8 msg_data[MSG_DATA_SIZE];
++
++ memcpy(&msg_data[0], &state, sizeof(u16));
++ memcpy(&msg_data[2], &vlan_proto, sizeof(u16));
++ memcpy(&msg_data[4], &qos, sizeof(u16));
++ memcpy(&msg_data[6], &vlan_tag, sizeof(u16));
++
++ return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
++ HLCGE_MBX_PUSH_VLAN_INFO, vfid);
++}
++
+ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
+- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+- bool gen_resp)
++ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+ {
+ int status = 0;
+
+@@ -310,11 +326,22 @@ static int hclge_set_vf_vlan_cfg(struct
+ bool en = mbx_req->msg[2] ? true : false;
+
+ status = hclge_en_hw_strip_rxvtag(handle, en);
++ } else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
++ struct hclge_vlan_info *vlan_info;
++ u16 *state;
++
++ state = (u16 *)&mbx_req->msg[2];
++ vlan_info = (struct hclge_vlan_info *)&mbx_req->msg[4];
++ status = hclge_update_port_base_vlan_cfg(vport, *state,
++ vlan_info);
++ } else if (mbx_req->msg[1] == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
++ u8 state;
++
++ state = vport->port_base_vlan_cfg.state;
++ status = hclge_gen_resp_to_vf(vport, mbx_req, 0, &state,
++ sizeof(u8));
+ }
+
+- if (gen_resp)
+- status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+-
+ return status;
+ }
+
+@@ -584,7 +611,7 @@ void hclge_mbx_handler(struct hclge_dev
+ ret);
+ break;
+ case HCLGE_MBX_SET_VLAN:
+- ret = hclge_set_vf_vlan_cfg(vport, req, false);
++ ret = hclge_set_vf_vlan_cfg(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to config VF's VLAN\n",
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -245,6 +245,27 @@ static int hclgevf_get_tc_info(struct hc
+ return 0;
+ }
+
++static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
++{
++ struct hnae3_handle *nic = &hdev->nic;
++ u8 resp_msg;
++ int ret;
++
++ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
++ HCLGE_MBX_GET_PORT_BASE_VLAN_STATE,
++ NULL, 0, true, &resp_msg, sizeof(u8));
++ if (ret) {
++ dev_err(&hdev->pdev->dev,
++ "VF request to get port based vlan state failed %d",
++ ret);
++ return ret;
++ }
++
++ nic->port_base_vlan_state = resp_msg;
++
++ return 0;
++}
++
+ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
+ {
+ #define HCLGEVF_TQPS_RSS_INFO_LEN 6
+@@ -1834,6 +1855,11 @@ static int hclgevf_configure(struct hclg
+ {
+ int ret;
+
++ /* get current port based vlan state from PF */
++ ret = hclgevf_get_port_base_vlan_filter_state(hdev);
++ if (ret)
++ return ret;
++
+ /* get queue configuration from PF */
+ ret = hclgevf_get_queue_info(hdev);
+ if (ret)
+@@ -2791,6 +2817,31 @@ static void hclgevf_get_regs(struct hnae
+ }
+ }
+
++void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
++ u8 *port_base_vlan_info, u8 data_size)
++{
++ struct hnae3_handle *nic = &hdev->nic;
++
++ rtnl_lock();
++ hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
++ rtnl_unlock();
++
++ /* send msg to PF and wait update port based vlan info */
++ hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN,
++ HCLGE_MBX_PORT_BASE_VLAN_CFG,
++ port_base_vlan_info, data_size,
++ false, NULL, 0);
++
++ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
++ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
++ else
++ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
++
++ rtnl_lock();
++ hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
++ rtnl_unlock();
++}
++
+ static const struct hnae3_ae_ops hclgevf_ops = {
+ .init_ae_dev = hclgevf_init_ae_dev,
+ .uninit_ae_dev = hclgevf_uninit_ae_dev,
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+@@ -290,4 +290,6 @@ void hclgevf_update_speed_duplex(struct
+ u8 duplex);
+ void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
+ void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
++void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
++ u8 *port_base_vlan_info, u8 data_size);
+ #endif
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+@@ -198,6 +198,7 @@ void hclgevf_mbx_handler(struct hclgevf_
+ case HCLGE_MBX_LINK_STAT_CHANGE:
+ case HCLGE_MBX_ASSERTING_RESET:
+ case HCLGE_MBX_LINK_STAT_MODE:
++ case HLCGE_MBX_PUSH_VLAN_INFO:
+ /* set this mbx event as pending. This is required as we
+ * might loose interrupt event when mbx task is busy
+ * handling. This shall be cleared when mbx task just
+@@ -243,8 +244,8 @@ void hclgevf_mbx_handler(struct hclgevf_
+ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
+ {
+ enum hnae3_reset_type reset_type;
+- u16 link_status;
+- u16 *msg_q;
++ u16 link_status, state;
++ u16 *msg_q, *vlan_info;
+ u8 duplex;
+ u32 speed;
+ u32 tail;
+@@ -299,6 +300,12 @@ void hclgevf_mbx_async_handler(struct hc
+ hclgevf_reset_task_schedule(hdev);
+
+ break;
++ case HLCGE_MBX_PUSH_VLAN_INFO:
++ state = le16_to_cpu(msg_q[1]);
++ vlan_info = &msg_q[1];
++ hclgevf_update_port_base_vlan_info(hdev, state,
++ (u8 *)vlan_info, 8);
++ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "fetched unsupported(%d) message from arq\n",
diff --git a/patches.drivers/net-hns3-fix-sparse-warning-when-calling-hclge_set_v.patch b/patches.drivers/net-hns3-fix-sparse-warning-when-calling-hclge_set_v.patch
new file mode 100644
index 0000000000..5ba568f145
--- /dev/null
+++ b/patches.drivers/net-hns3-fix-sparse-warning-when-calling-hclge_set_v.patch
@@ -0,0 +1,46 @@
+From: Jian Shen <shenjian15@huawei.com>
+Date: Mon, 15 Apr 2019 21:48:38 +0800
+Subject: net: hns3: fix sparse: warning when calling
+ hclge_set_vlan_filter_hw()
+Patch-mainline: v5.2-rc1
+Git-commit: 8a9a654b5b5233e7459abcc5f65c53df14b33f67
+References: bsc#1104353 FATE#326415 bsc#1134999
+
+The input parameter "proto" in function hclge_set_vlan_filter_hw()
+is asked to be __be16, but got u16 when calling it in function
+hclge_update_port_base_vlan_cfg().
+
+This patch fixes it by converting it with htons().
+
+Reported-by: kbuild test robot <lkp@intel.com>
+Fixes: 21e043cd8124 ("net: hns3: fix set port based VLAN for PF")
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -6964,7 +6964,8 @@ int hclge_update_port_base_vlan_cfg(stru
+
+ if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
+ /* add new VLAN tag */
+- ret = hclge_set_vlan_filter_hw(hdev, vlan_info->vlan_proto,
++ ret = hclge_set_vlan_filter_hw(hdev,
++ htons(vlan_info->vlan_proto),
+ vport->vport_id,
+ vlan_info->vlan_tag,
+ vlan_info->qos, false);
+@@ -6972,7 +6973,8 @@ int hclge_update_port_base_vlan_cfg(stru
+ return ret;
+
+ /* remove old VLAN tag */
+- ret = hclge_set_vlan_filter_hw(hdev, old_vlan_info->vlan_proto,
++ ret = hclge_set_vlan_filter_hw(hdev,
++ htons(old_vlan_info->vlan_proto),
+ vport->vport_id,
+ old_vlan_info->vlan_tag,
+ old_vlan_info->qos, true);
diff --git a/patches.drivers/net-hns3-free-the-pending-skb-when-clean-RX-ring.patch b/patches.drivers/net-hns3-free-the-pending-skb-when-clean-RX-ring.patch
new file mode 100644
index 0000000000..bc1da42fc5
--- /dev/null
+++ b/patches.drivers/net-hns3-free-the-pending-skb-when-clean-RX-ring.patch
@@ -0,0 +1,38 @@
+From: Peng Li <lipeng321@huawei.com>
+Date: Sun, 14 Apr 2019 09:47:45 +0800
+Subject: net: hns3: free the pending skb when clean RX ring
+Patch-mainline: v5.2-rc1
+Git-commit: cc5ff6e90f808f9a4c8229bf2f1de0dfe5d7931c
+References: bsc#1104353 FATE#326415 bsc#1135044
+
+If there is pending skb in RX flow when close the port, and the
+pending buffer is not cleaned, the new packet will be added to
+the pending skb when the port opens again, and the first new
+packet has error data.
+
+This patch cleans the pending skb when clean RX ring.
+
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -3959,6 +3959,13 @@ static int hns3_clear_rx_ring(struct hns
+ ring_ptr_move_fw(ring, next_to_use);
+ }
+
++ /* Free the pending skb in rx ring */
++ if (ring->skb) {
++ dev_kfree_skb_any(ring->skb);
++ ring->skb = NULL;
++ ring->pending_buf = 0;
++ }
++
+ return 0;
+ }
+
diff --git a/patches.drivers/net-hns3-handle-pending-reset-while-reset-fail.patch b/patches.drivers/net-hns3-handle-pending-reset-while-reset-fail.patch
new file mode 100644
index 0000000000..56b75dfe49
--- /dev/null
+++ b/patches.drivers/net-hns3-handle-pending-reset-while-reset-fail.patch
@@ -0,0 +1,30 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Sat, 6 Apr 2019 15:43:32 +0800
+Subject: net: hns3: handle pending reset while reset fail
+Patch-mainline: v5.2-rc1
+Git-commit: cf1f212916d9d59977aadd558a54aef6109bc2d1
+References: bsc#1104353 FATE#326415 bsc#1135058
+
+The ongoing lower-level reset will fail when there is a higher-level
+reset occurs, so the error handler should deal with this situation.
+
+Fixes: 6a5f6fa382f3 ("net: hns3: add error handler for hclgevf_reset()")
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -1474,6 +1474,8 @@ err_reset:
+ */
+ hclgevf_cmd_init(hdev);
+ dev_err(&hdev->pdev->dev, "failed to reset VF\n");
++ if (hclgevf_is_reset_pending(hdev))
++ hclgevf_reset_task_schedule(hdev);
+
+ return ret;
+ }
diff --git a/patches.drivers/net-hns3-handle-the-BD-info-on-the-last-BD-of-the-pa.patch b/patches.drivers/net-hns3-handle-the-BD-info-on-the-last-BD-of-the-pa.patch
new file mode 100644
index 0000000000..146f754ed2
--- /dev/null
+++ b/patches.drivers/net-hns3-handle-the-BD-info-on-the-last-BD-of-the-pa.patch
@@ -0,0 +1,111 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Thu, 25 Apr 2019 20:42:47 +0800
+Subject: net: hns3: handle the BD info on the last BD of the packet
+Patch-mainline: v5.2-rc1
+Git-commit: ea4858670717fd948dab0113a5ee65486494a607
+References: bsc#1104353 FATE#326415 bsc#1134974
+
+The bdinfo handled in hns3_handle_bdinfo is only valid on the
+last BD of the current packet, currently the bd info may be handled
+based on the first BD if the packet has more than two BDs, which
+may cause rx error.
+
+This patch fixes it by using the last BD of the current packet in
+hns3_handle_bdinfo.
+
+Also, hns3_set_rx_skb_rss_type has used RSS hash value from the last
+BD of the current packet, so remove the same last BD calculation in
+hns3_set_rx_skb_rss_type and call it from hns3_handle_bdinfo.
+
+Fixes: e55970950556 ("net: hns3: Add handling of GRO Pkts not fully RX'ed in NAPI poll")
+
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 38 ++++++++++++------------
+ 1 file changed, 20 insertions(+), 18 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -2698,36 +2698,37 @@ static int hns3_set_gro_and_checksum(str
+ }
+
+ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
+- struct sk_buff *skb)
++ struct sk_buff *skb, u32 rss_hash)
+ {
+ struct hnae3_handle *handle = ring->tqp->handle;
+ enum pkt_hash_types rss_type;
+- struct hns3_desc *desc;
+- int last_bd;
+-
+- /* When driver handle the rss type, ring->next_to_clean indicates the
+- * first descriptor of next packet, need -1 here.
+- */
+- last_bd = (ring->next_to_clean - 1 + ring->desc_num) % ring->desc_num;
+- desc = &ring->desc[last_bd];
+
+- if (le32_to_cpu(desc->rx.rss_hash))
++ if (rss_hash)
+ rss_type = handle->kinfo.rss_type;
+ else
+ rss_type = PKT_HASH_TYPE_NONE;
+
+- skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type);
++ skb_set_hash(skb, rss_hash, rss_type);
+ }
+
+-static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb,
+- struct hns3_desc *desc)
++static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
+ {
+ struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+- u32 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+- u32 l234info = le32_to_cpu(desc->rx.l234_info);
+ enum hns3_pkt_l2t_type l2_frame_type;
++ u32 bd_base_info, l234info;
++ struct hns3_desc *desc;
+ unsigned int len;
+- int ret;
++ int pre_ntc, ret;
++
++ /* bdinfo handled below is only valid on the last BD of the
++ * current packet, and ring->next_to_clean indicates the first
++ * descriptor of next packet, so need - 1 below.
++ */
++ pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
++ (ring->desc_num - 1);
++ desc = &ring->desc[pre_ntc];
++ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
++ l234info = le32_to_cpu(desc->rx.l234_info);
+
+ /* Based on hw strategy, the tag offloaded will be stored at
+ * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+@@ -2788,6 +2789,8 @@ static int hns3_handle_bdinfo(struct hns
+ u64_stats_update_end(&ring->syncp);
+
+ ring->tqp_vector->rx_group.total_bytes += len;
++
++ hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
+ return 0;
+ }
+
+@@ -2857,14 +2860,13 @@ static int hns3_handle_rx_bd(struct hns3
+ ALIGN(ring->pull_len, sizeof(long)));
+ }
+
+- ret = hns3_handle_bdinfo(ring, skb, desc);
++ ret = hns3_handle_bdinfo(ring, skb);
+ if (unlikely(ret)) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ *out_skb = skb;
+- hns3_set_rx_skb_rss_type(ring, skb);
+
+ return 0;
+ }
diff --git a/patches.drivers/net-hns3-ignore-lower-level-new-coming-reset.patch b/patches.drivers/net-hns3-ignore-lower-level-new-coming-reset.patch
new file mode 100644
index 0000000000..68a193790e
--- /dev/null
+++ b/patches.drivers/net-hns3-ignore-lower-level-new-coming-reset.patch
@@ -0,0 +1,31 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Sat, 6 Apr 2019 15:43:30 +0800
+Subject: net: hns3: ignore lower-level new coming reset
+Patch-mainline: v5.2-rc1
+Git-commit: 0fdf4d304c24eb2fb99d1f81db5bc46c85f24009
+References: bsc#1104353 FATE#326415 bsc#1137201
+
+It is unnecessary to deal with the new coming reset if
+it is lower than the ongoing one.
+
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -2795,6 +2795,10 @@ static enum hnae3_reset_type hclge_get_r
+ clear_bit(HNAE3_FLR_RESET, addr);
+ }
+
++ if (hdev->reset_type != HNAE3_NONE_RESET &&
++ rst_level < hdev->reset_type)
++ return HNAE3_NONE_RESET;
++
+ return rst_level;
+ }
+
diff --git a/patches.drivers/net-hns3-minor-optimization-for-datapath.patch b/patches.drivers/net-hns3-minor-optimization-for-datapath.patch
new file mode 100644
index 0000000000..b69a69e37a
--- /dev/null
+++ b/patches.drivers/net-hns3-minor-optimization-for-datapath.patch
@@ -0,0 +1,81 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Thu, 4 Apr 2019 16:17:53 +0800
+Subject: net: hns3: minor optimization for datapath
+Patch-mainline: v5.2-rc1
+Git-commit: ceca4a5e3223dbb99c062990a64f7a8c32906674
+References: bsc#1104353 FATE#326415
+
+This patch adds a likely case for hns3_fill_desc and
+limits the local variables' scope as much as possible,
+also avoid div operation when the tqp_vector->num_tqps
+is one.
+
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 24 +++++++++++++++++++++---
+ 1 file changed, 21 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -1012,7 +1012,6 @@ static int hns3_fill_desc(struct hns3_en
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ struct hns3_desc *desc = &ring->desc[ring->next_to_use];
+ struct device *dev = ring_to_dev(ring);
+- u16 bdtp_fe_sc_vld_ra_ri = 0;
+ struct skb_frag_struct *frag;
+ unsigned int frag_buf_num;
+ int k, sizeoflast;
+@@ -1080,12 +1079,30 @@ static int hns3_fill_desc(struct hns3_en
+
+ desc_cb->length = size;
+
++ if (likely(size <= HNS3_MAX_BD_SIZE)) {
++ u16 bdtp_fe_sc_vld_ra_ri = 0;
++
++ desc_cb->priv = priv;
++ desc_cb->dma = dma;
++ desc_cb->type = type;
++ desc->addr = cpu_to_le64(dma);
++ desc->tx.send_size = cpu_to_le16(size);
++ hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
++ desc->tx.bdtp_fe_sc_vld_ra_ri =
++ cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
++
++ ring_ptr_move_fw(ring, next_to_use);
++ return 0;
++ }
++
+ frag_buf_num = hns3_tx_bd_count(size);
+ sizeoflast = size & HNS3_TX_LAST_SIZE_M;
+ sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
+
+ /* When frag size is bigger than hardware limit, split this frag */
+ for (k = 0; k < frag_buf_num; k++) {
++ u16 bdtp_fe_sc_vld_ra_ri = 0;
++
+ /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
+ desc_cb->priv = priv;
+ desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
+@@ -2892,7 +2909,7 @@ static int hns3_nic_common_poll(struct n
+ struct hns3_enet_tqp_vector *tqp_vector =
+ container_of(napi, struct hns3_enet_tqp_vector, napi);
+ bool clean_complete = true;
+- int rx_budget;
++ int rx_budget = budget;
+
+ if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
+ napi_complete(napi);
+@@ -2906,7 +2923,8 @@ static int hns3_nic_common_poll(struct n
+ hns3_clean_tx_ring(ring);
+
+ /* make sure rx ring budget not smaller than 1 */
+- rx_budget = max(budget / tqp_vector->num_tqps, 1);
++ if (tqp_vector->num_tqps > 1)
++ rx_budget = max(budget / tqp_vector->num_tqps, 1);
+
+ hns3_for_each_ring(ring, tqp_vector->rx_group) {
+ int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
diff --git a/patches.drivers/net-hns3-minor-optimization-for-ring_space.patch b/patches.drivers/net-hns3-minor-optimization-for-ring_space.patch
new file mode 100644
index 0000000000..f4b69d88c4
--- /dev/null
+++ b/patches.drivers/net-hns3-minor-optimization-for-ring_space.patch
@@ -0,0 +1,48 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Thu, 4 Apr 2019 16:17:52 +0800
+Subject: net: hns3: minor optimization for ring_space
+Patch-mainline: v5.2-rc1
+Git-commit: 0aa3d88a9197fd7176dbaf5db769837be6afdf46
+References: bsc#1104353 FATE#326415
+
+This patch optimizes the ring_space by calculating the
+ring space without calling ring_dist.
+
+Also ring_dist is only used by ring_space, so this patch
+removes it when it is no longer used.
+
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 15 +++++----------
+ 1 file changed, 5 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+@@ -577,18 +577,13 @@ union l4_hdr_info {
+ unsigned char *hdr;
+ };
+
+-/* the distance between [begin, end) in a ring buffer
+- * note: there is a unuse slot between the begin and the end
+- */
+-static inline int ring_dist(struct hns3_enet_ring *ring, int begin, int end)
+-{
+- return (end - begin + ring->desc_num) % ring->desc_num;
+-}
+-
+ static inline int ring_space(struct hns3_enet_ring *ring)
+ {
+- return ring->desc_num -
+- ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1;
++ int begin = ring->next_to_clean;
++ int end = ring->next_to_use;
++
++ return ((end >= begin) ? (ring->desc_num - end + begin) :
++ (begin - end)) - 1;
+ }
+
+ static inline int is_ring_empty(struct hns3_enet_ring *ring)
diff --git a/patches.drivers/net-hns3-minor-refactor-for-hns3_rx_checksum.patch b/patches.drivers/net-hns3-minor-refactor-for-hns3_rx_checksum.patch
new file mode 100644
index 0000000000..4f197460dc
--- /dev/null
+++ b/patches.drivers/net-hns3-minor-refactor-for-hns3_rx_checksum.patch
@@ -0,0 +1,49 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Sun, 14 Apr 2019 09:47:39 +0800
+Subject: net: hns3: minor refactor for hns3_rx_checksum
+Patch-mainline: v5.2-rc1
+Git-commit: a4d2cdcbb878d4d5828cd4124104c330d2817211
+References: bsc#1104353 FATE#326415 bsc#1135052
+
+Change the parameters of hns3_rx_checksum to be more specific to
+what is used internally, rather than passing in a pointer to the
+whole hns3_desc. Reduces duplicate code and bring this function
+inline with the approach used in hns3_set_gro_param.
+
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 9 ++-------
+ 1 file changed, 2 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -2333,16 +2333,11 @@ static void hns3_nic_reuse_page(struct s
+ }
+
+ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
+- struct hns3_desc *desc)
++ u32 l234info, u32 bd_base_info)
+ {
+ struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ int l3_type, l4_type;
+- u32 bd_base_info;
+ int ol4_type;
+- u32 l234info;
+-
+- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+- l234info = le32_to_cpu(desc->rx.l234_info);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+@@ -2752,7 +2747,7 @@ static int hns3_handle_rx_bd(struct hns3
+ /* This is needed in order to enable forwarding support */
+ hns3_set_gro_param(skb, l234info, bd_base_info);
+
+- hns3_rx_checksum(ring, skb, desc);
++ hns3_rx_checksum(ring, skb, l234info, bd_base_info);
+ *out_skb = skb;
+ hns3_set_rx_skb_rss_type(ring, skb);
+
diff --git a/patches.drivers/net-hns3-modify-HNS3_NIC_STATE_INITED-flag-in-hns3_r.patch b/patches.drivers/net-hns3-modify-HNS3_NIC_STATE_INITED-flag-in-hns3_r.patch
new file mode 100644
index 0000000000..b11d3ceb49
--- /dev/null
+++ b/patches.drivers/net-hns3-modify-HNS3_NIC_STATE_INITED-flag-in-hns3_r.patch
@@ -0,0 +1,39 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Thu, 4 Apr 2019 16:17:58 +0800
+Subject: net: hns3: modify HNS3_NIC_STATE_INITED flag in
+ hns3_reset_notify_uninit_enet
+Patch-mainline: v5.2-rc1
+Git-commit: 1eeb3367897a3f9f852e186695e28bb623b09f92
+References: bsc#1104353 FATE#326415
+
+In the hns3_reset_notify_uninit_enet() HNS3_NIC_STATE_INITED flag
+should be checked and cleared firstly.
+
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -4127,7 +4127,7 @@ static int hns3_reset_notify_uninit_enet
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ int ret;
+
+- if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
++ if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_warn(netdev, "already uninitialized\n");
+ return 0;
+ }
+@@ -4149,8 +4149,6 @@ static int hns3_reset_notify_uninit_enet
+ hns3_put_ring_config(priv);
+ priv->ring_data = NULL;
+
+- clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
+-
+ return ret;
+ }
+
diff --git a/patches.drivers/net-hns3-modify-VLAN-initialization-to-be-compatible.patch b/patches.drivers/net-hns3-modify-VLAN-initialization-to-be-compatible.patch
new file mode 100644
index 0000000000..89539f5588
--- /dev/null
+++ b/patches.drivers/net-hns3-modify-VLAN-initialization-to-be-compatible.patch
@@ -0,0 +1,195 @@
+From: Jian Shen <shenjian15@huawei.com>
+Date: Sun, 14 Apr 2019 09:47:35 +0800
+Subject: net: hns3: modify VLAN initialization to be compatible with port
+ based VLAN
+Patch-mainline: v5.2-rc1
+Git-commit: 741fca1667ea90f6c9a1393d3c1a3e4f9eae3fc7
+References: bsc#1104353 FATE#326415 bsc#1135053
+
+Our hardware supports inserting a specified VLAN header for each
+function when sending packets. User can enable it with command
+"ip link set <devname> vf <vfid> vlan <vlan id>".
+For this VLAN header is inserted by hardware, not from stack,
+hardware also needs to strip it from received packets before
+sending to stack. In this case, driver needs to tell
+hardware which VLAN to insert or strip.
+
+The current VLAN initialization doesn't allow inserting
+VLAN header by hardware, this patch modifies it, in order be
+compatible with VLAN inserted base on port.
+
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hnae3.h | 7 +
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 80 ++++++++++------
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h | 21 +++-
+ 3 files changed, 78 insertions(+), 30 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+@@ -147,6 +147,13 @@ enum hnae3_flr_state {
+ HNAE3_FLR_DONE,
+ };
+
++enum hnae3_port_base_vlan_state {
++ HNAE3_PORT_BASE_VLAN_DISABLE,
++ HNAE3_PORT_BASE_VLAN_ENABLE,
++ HNAE3_PORT_BASE_VLAN_MODIFY,
++ HNAE3_PORT_BASE_VLAN_NOCHANGE,
++};
++
+ struct hnae3_vector_info {
+ u8 __iomem *io_addr;
+ int vector;
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -1358,6 +1358,8 @@ static int hclge_alloc_vport(struct hclg
+ vport->back = hdev;
+ vport->vport_id = i;
+ vport->mps = HCLGE_MAC_DEFAULT_FRAME;
++ vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
++ vport->rxvlan_cfg.rx_vlan_offload_en = true;
+ INIT_LIST_HEAD(&vport->vlan_list);
+ INIT_LIST_HEAD(&vport->uc_mac_list);
+ INIT_LIST_HEAD(&vport->mc_mac_list);
+@@ -6680,6 +6682,52 @@ static int hclge_set_vlan_rx_offload_cfg
+ return status;
+ }
+
++static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
++ u16 port_base_vlan_state,
++ u16 vlan_tag)
++{
++ int ret;
++
++ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
++ vport->txvlan_cfg.accept_tag1 = true;
++ vport->txvlan_cfg.insert_tag1_en = false;
++ vport->txvlan_cfg.default_tag1 = 0;
++ } else {
++ vport->txvlan_cfg.accept_tag1 = false;
++ vport->txvlan_cfg.insert_tag1_en = true;
++ vport->txvlan_cfg.default_tag1 = vlan_tag;
++ }
++
++ vport->txvlan_cfg.accept_untag1 = true;
++
++ /* accept_tag2 and accept_untag2 are not supported on
++ * pdev revision(0x20), new revision support them,
++ * this two fields can not be configured by user.
++ */
++ vport->txvlan_cfg.accept_tag2 = true;
++ vport->txvlan_cfg.accept_untag2 = true;
++ vport->txvlan_cfg.insert_tag2_en = false;
++ vport->txvlan_cfg.default_tag2 = 0;
++
++ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
++ vport->rxvlan_cfg.strip_tag1_en = false;
++ vport->rxvlan_cfg.strip_tag2_en =
++ vport->rxvlan_cfg.rx_vlan_offload_en;
++ } else {
++ vport->rxvlan_cfg.strip_tag1_en =
++ vport->rxvlan_cfg.rx_vlan_offload_en;
++ vport->rxvlan_cfg.strip_tag2_en = true;
++ }
++ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
++ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
++
++ ret = hclge_set_vlan_tx_offload_cfg(vport);
++ if (ret)
++ return ret;
++
++ return hclge_set_vlan_rx_offload_cfg(vport);
++}
++
+ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
+ {
+ struct hclge_rx_vlan_type_cfg_cmd *rx_req;
+@@ -6770,34 +6818,14 @@ static int hclge_init_vlan_config(struct
+ return ret;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+- vport = &hdev->vport[i];
+- vport->txvlan_cfg.accept_tag1 = true;
+- vport->txvlan_cfg.accept_untag1 = true;
+-
+- /* accept_tag2 and accept_untag2 are not supported on
+- * pdev revision(0x20), new revision support them. The
+- * value of this two fields will not return error when driver
+- * send command to fireware in revision(0x20).
+- * This two fields can not configured by user.
+- */
+- vport->txvlan_cfg.accept_tag2 = true;
+- vport->txvlan_cfg.accept_untag2 = true;
++ u16 vlan_tag;
+
+- vport->txvlan_cfg.insert_tag1_en = false;
+- vport->txvlan_cfg.insert_tag2_en = false;
+- vport->txvlan_cfg.default_tag1 = 0;
+- vport->txvlan_cfg.default_tag2 = 0;
+-
+- ret = hclge_set_vlan_tx_offload_cfg(vport);
+- if (ret)
+- return ret;
+-
+- vport->rxvlan_cfg.strip_tag1_en = false;
+- vport->rxvlan_cfg.strip_tag2_en = true;
+- vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+- vport->rxvlan_cfg.vlan2_vlan_prionly = false;
++ vport = &hdev->vport[i];
++ vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
+
+- ret = hclge_set_vlan_rx_offload_cfg(vport);
++ ret = hclge_vlan_offload_cfg(vport,
++ vport->port_base_vlan_cfg.state,
++ vlan_tag);
+ if (ret)
+ return ret;
+ }
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+@@ -807,10 +807,11 @@ struct hclge_tx_vtag_cfg {
+
+ /* VPort level vlan tag configuration for RX direction */
+ struct hclge_rx_vtag_cfg {
+- bool strip_tag1_en; /* Whether strip inner vlan tag */
+- bool strip_tag2_en; /* Whether strip outer vlan tag */
+- bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */
+- bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */
++ u8 rx_vlan_offload_en; /* Whether enable rx vlan offload */
++ u8 strip_tag1_en; /* Whether strip inner vlan tag */
++ u8 strip_tag2_en; /* Whether strip outer vlan tag */
++ u8 vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */
++ u8 vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */
+ };
+
+ struct hclge_rss_tuple_cfg {
+@@ -829,6 +830,17 @@ enum HCLGE_VPORT_STATE {
+ HCLGE_VPORT_STATE_MAX
+ };
+
++struct hclge_vlan_info {
++ u16 vlan_proto; /* so far support 802.1Q only */
++ u16 qos;
++ u16 vlan_tag;
++};
++
++struct hclge_port_base_vlan_config {
++ u16 state;
++ struct hclge_vlan_info vlan_info;
++};
++
+ struct hclge_vport {
+ u16 alloc_tqps; /* Allocated Tx/Rx queues */
+
+@@ -845,6 +857,7 @@ struct hclge_vport {
+ u16 bw_limit; /* VSI BW Limit (0 = disabled) */
+ u8 dwrr;
+
++ struct hclge_port_base_vlan_config port_base_vlan_cfg;
+ struct hclge_tx_vtag_cfg txvlan_cfg;
+ struct hclge_rx_vtag_cfg rxvlan_cfg;
+
diff --git a/patches.drivers/net-hns3-modify-the-VF-network-port-media-type-acqui.patch b/patches.drivers/net-hns3-modify-the-VF-network-port-media-type-acqui.patch
new file mode 100644
index 0000000000..7d8cafd8a5
--- /dev/null
+++ b/patches.drivers/net-hns3-modify-the-VF-network-port-media-type-acqui.patch
@@ -0,0 +1,136 @@
+From: liuzhongzhu <liuzhongzhu@huawei.com>
+Date: Thu, 4 Apr 2019 16:17:50 +0800
+Subject: net: hns3: modify the VF network port media type acquisition method
+Patch-mainline: v5.2-rc1
+Git-commit: 9c3e713020fc8e08e02d6756b401125ab5cb702c
+References: bsc#1104353 FATE#326415 bsc#1137201
+
+Method for obtaining the media type of the VF network port periodically,
+regular tasks will not run until the network port UP. When the network
+port is DOWN, the network port cannot obtain the media type.
+Modifies the media type obtained when initializing the VF network port.
+
+Signed-off-by: liuzhongzhu <liuzhongzhu@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h | 1
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 23 +++++++++++---
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 23 ++++++++++++++
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c | 1
+ 4 files changed, 43 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+@@ -43,6 +43,7 @@ enum HCLGE_MBX_OPCODE {
+ HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
+ HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */
+ HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
++ HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
+
+ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
+ };
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+@@ -385,24 +385,32 @@ static int hclge_get_vf_queue_depth(stru
+ HCLGE_TQPS_DEPTH_INFO_LEN);
+ }
+
++static int hclge_get_vf_media_type(struct hclge_vport *vport,
++ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
++{
++ struct hclge_dev *hdev = vport->back;
++ u8 resp_data;
++
++ resp_data = hdev->hw.mac.media_type;
++ return hclge_gen_resp_to_vf(vport, mbx_req, 0, &resp_data,
++ sizeof(resp_data));
++}
++
+ static int hclge_get_link_info(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+ {
+ struct hclge_dev *hdev = vport->back;
+ u16 link_status;
+- u8 msg_data[10];
+- u16 media_type;
++ u8 msg_data[8];
+ u8 dest_vfid;
+ u16 duplex;
+
+ /* mac.link can only be 0 or 1 */
+ link_status = (u16)hdev->hw.mac.link;
+ duplex = hdev->hw.mac.duplex;
+- media_type = hdev->hw.mac.media_type;
+ memcpy(&msg_data[0], &link_status, sizeof(u16));
+ memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
+ memcpy(&msg_data[6], &duplex, sizeof(u16));
+- memcpy(&msg_data[8], &media_type, sizeof(u16));
+ dest_vfid = mbx_req->mbx_src_vfid;
+
+ /* send this requested info to VF */
+@@ -662,6 +670,13 @@ void hclge_mbx_handler(struct hclge_dev
+ hclge_rm_vport_all_vlan_table(vport, true);
+ mutex_unlock(&hdev->vport_cfg_mutex);
+ break;
++ case HCLGE_MBX_GET_MEDIA_TYPE:
++ ret = hclge_get_vf_media_type(vport, req);
++ if (ret)
++ dev_err(&hdev->pdev->dev,
++ "PF fail(%d) to media type for VF\n",
++ ret);
++ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "un-supported mailbox message, code = %d\n",
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -307,6 +307,25 @@ static u16 hclgevf_get_qid_global(struct
+ return qid_in_pf;
+ }
+
++static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
++{
++ u8 resp_msg;
++ int ret;
++
++ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0,
++ true, &resp_msg, sizeof(resp_msg));
++ if (ret) {
++ dev_err(&hdev->pdev->dev,
++ "VF request to get the pf port media type failed %d",
++ ret);
++ return ret;
++ }
++
++ hdev->hw.mac.media_type = resp_msg;
++
++ return 0;
++}
++
+ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
+ {
+ struct hclgevf_tqp *tqp;
+@@ -1824,6 +1843,10 @@ static int hclgevf_configure(struct hclg
+ if (ret)
+ return ret;
+
++ ret = hclgevf_get_pf_media_type(hdev);
++ if (ret)
++ return ret;
++
+ /* get tc configuration from PF */
+ return hclgevf_get_tc_info(hdev);
+ }
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+@@ -272,7 +272,6 @@ void hclgevf_mbx_async_handler(struct hc
+ link_status = le16_to_cpu(msg_q[1]);
+ memcpy(&speed, &msg_q[2], sizeof(speed));
+ duplex = (u8)le16_to_cpu(msg_q[4]);
+- hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]);
+
+ /* update upper layer with new link link status */
+ hclgevf_update_link_status(hdev, link_status);
diff --git a/patches.drivers/net-hns3-not-reset-TQP-in-the-DOWN-while-VF-resettin.patch b/patches.drivers/net-hns3-not-reset-TQP-in-the-DOWN-while-VF-resettin.patch
new file mode 100644
index 0000000000..54528ef170
--- /dev/null
+++ b/patches.drivers/net-hns3-not-reset-TQP-in-the-DOWN-while-VF-resettin.patch
@@ -0,0 +1,35 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Thu, 25 Apr 2019 20:42:51 +0800
+Subject: net: hns3: not reset TQP in the DOWN while VF resetting
+Patch-mainline: v5.2-rc1
+Git-commit: 146e92c13fdedf43a1ae211e85acde4631bb3c71
+References: bsc#1104353 FATE#326415 bsc#1134952
+
+Since the hardware does not handle mailboxes and the hardware
+reset include TQP reset, so it is unnecessary to reset TQP
+in the hclgevf_ae_stop() while doing VF reset. Also it is
+unnecessary to reset the remaining TQP when one reset fails.
+
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -2050,8 +2050,10 @@ static void hclgevf_ae_stop(struct hnae3
+
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
+- for (i = 0; i < handle->kinfo.num_tqps; i++)
+- hclgevf_reset_tqp(handle, i);
++ if (hdev->reset_type != HNAE3_VF_RESET)
++ for (i = 0; i < handle->kinfo.num_tqps; i++)
++ if (hclgevf_reset_tqp(handle, i))
++ break;
+
+ /* reset tqp stats */
+ hclgevf_reset_tqp_stats(handle);
diff --git a/patches.drivers/net-hns3-not-reset-vport-who-not-alive-when-PF-reset.patch b/patches.drivers/net-hns3-not-reset-vport-who-not-alive-when-PF-reset.patch
new file mode 100644
index 0000000000..98c30e8410
--- /dev/null
+++ b/patches.drivers/net-hns3-not-reset-vport-who-not-alive-when-PF-reset.patch
@@ -0,0 +1,31 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Sat, 6 Apr 2019 15:43:27 +0800
+Subject: net: hns3: not reset vport who not alive when PF reset
+Patch-mainline: v5.2-rc1
+Git-commit: cc645dfa89a747382beaf62d69daafe60fd1cd94
+References: bsc#1104353 FATE#326415 bsc#1137201
+
+If a vport is not alive, it is unnecessary to notify it to reset
+before PF asserting a reset. So before inform vport to reset,
+we need to check its alive state firstly.
+
+Fixes: aa5c4f175be6 ("net: hns3: add reset handling for VF when doing PF reset")
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -2677,7 +2677,7 @@ static int hclge_set_all_vf_rst(struct h
+ return ret;
+ }
+
+- if (!reset)
++ if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ continue;
+
+ /* Inform VF to process the reset.
diff --git a/patches.drivers/net-hns3-optimize-the-barrier-using-when-cleaning-TX.patch b/patches.drivers/net-hns3-optimize-the-barrier-using-when-cleaning-TX.patch
new file mode 100644
index 0000000000..6fe14d7579
--- /dev/null
+++ b/patches.drivers/net-hns3-optimize-the-barrier-using-when-cleaning-TX.patch
@@ -0,0 +1,71 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Mon, 6 May 2019 10:48:49 +0800
+Subject: net: hns3: optimize the barrier using when cleaning TX BD
+Patch-mainline: v5.2-rc1
+Git-commit: ce74370c2ce9a90c16167131f837e14b5e3c57ed
+References: bsc#1104353 FATE#326415 bsc#1134945
+
+Currently, a barrier is used when cleaning each TX BD, which may
+cause performance degradation.
+
+This patch optimizes it to use one barrier when cleaning TX BD
+each round.
+
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 29 ++++++++++++------------
+ 1 file changed, 15 insertions(+), 14 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -2187,20 +2187,25 @@ static void hns3_reuse_buffer(struct hns
+ ring->desc[i].rx.bd_base_info = 0;
+ }
+
+-static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
+- int *pkts)
++static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head,
++ int *bytes, int *pkts)
+ {
+ int ntc = ring->next_to_clean;
+ struct hns3_desc_cb *desc_cb;
+
+- desc_cb = &ring->desc_cb[ntc];
+- (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
+- (*bytes) += desc_cb->length;
+- /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/
+- hns3_free_buffer_detach(ring, ntc);
++ while (head != ntc) {
++ desc_cb = &ring->desc_cb[ntc];
++ (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
++ (*bytes) += desc_cb->length;
++ /* desc_cb will be cleaned, after hnae3_free_buffer_detach */
++ hns3_free_buffer_detach(ring, ntc);
+
+- if (++ntc == ring->desc_num)
+- ntc = 0;
++ if (++ntc == ring->desc_num)
++ ntc = 0;
++
++ /* Issue prefetch for next Tx descriptor */
++ prefetch(&ring->desc_cb[ntc]);
++ }
+
+ /* This smp_store_release() pairs with smp_load_acquire() in
+ * ring_space called by hns3_nic_net_xmit.
+@@ -2245,11 +2250,7 @@ void hns3_clean_tx_ring(struct hns3_enet
+
+ bytes = 0;
+ pkts = 0;
+- while (head != ring->next_to_clean) {
+- hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
+- /* Issue prefetch for next Tx descriptor */
+- prefetch(&ring->desc_cb[ring->next_to_clean]);
+- }
++ hns3_nic_reclaim_desc(ring, head, &bytes, &pkts);
+
+ ring->tqp_vector->tx_group.total_bytes += bytes;
+ ring->tqp_vector->tx_group.total_packets += pkts;
diff --git a/patches.drivers/net-hns3-prevent-change-MTU-when-resetting.patch b/patches.drivers/net-hns3-prevent-change-MTU-when-resetting.patch
new file mode 100644
index 0000000000..b64e4f74b4
--- /dev/null
+++ b/patches.drivers/net-hns3-prevent-change-MTU-when-resetting.patch
@@ -0,0 +1,30 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Thu, 4 Apr 2019 16:17:57 +0800
+Subject: net: hns3: prevent change MTU when resetting
+Patch-mainline: v5.2-rc1
+Git-commit: 6ff7ed8049ebf932643eee8680bb1d75691fa801
+References: bsc#1104353 FATE#326415 bsc#1137201
+
+When resetting, the changing of MTU is not allowed, so this patch
+adds checking reset status in hns3_nic_change_mtu() to do that.
+
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -1591,6 +1591,9 @@ static int hns3_nic_change_mtu(struct ne
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ int ret;
+
++ if (hns3_nic_resetting(netdev))
++ return -EBUSY;
++
+ if (!h->ae_algo->ops->set_mtu)
+ return -EOPNOTSUPP;
+
diff --git a/patches.drivers/net-hns3-prevent-double-free-in-hns3_put_ring_config.patch b/patches.drivers/net-hns3-prevent-double-free-in-hns3_put_ring_config.patch
new file mode 100644
index 0000000000..ffd97905eb
--- /dev/null
+++ b/patches.drivers/net-hns3-prevent-double-free-in-hns3_put_ring_config.patch
@@ -0,0 +1,76 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Thu, 25 Apr 2019 20:42:54 +0800
+Subject: net: hns3: prevent double free in hns3_put_ring_config()
+Patch-mainline: v5.2-rc1
+Git-commit: 7b8f622e537aa87b52def78c37a8645d979fb7cc
+References: bsc#1104353 FATE#326415 bsc#1134950
+
+This patch adds a check for the hns3_put_ring_config() to prevent
+double free, and for more readable, move the NULL assignment of
+priv->ring_data into the hns3_put_ring_config().
+
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -3486,6 +3486,7 @@ err:
+ }
+
+ devm_kfree(&pdev->dev, priv->ring_data);
++ priv->ring_data = NULL;
+ return ret;
+ }
+
+@@ -3494,12 +3495,16 @@ static void hns3_put_ring_config(struct
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
++ if (!priv->ring_data)
++ return;
++
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ devm_kfree(priv->dev, priv->ring_data[i].ring);
+ devm_kfree(priv->dev,
+ priv->ring_data[i + h->kinfo.num_tqps].ring);
+ }
+ devm_kfree(priv->dev, priv->ring_data);
++ priv->ring_data = NULL;
+ }
+
+ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
+@@ -3919,8 +3924,6 @@ static void hns3_client_uninit(struct hn
+
+ hns3_dbg_uninit(handle);
+
+- priv->ring_data = NULL;
+-
+ out_netdev_free:
+ free_netdev(netdev);
+ }
+@@ -4267,12 +4270,10 @@ err_uninit_ring:
+ hns3_uninit_all_ring(priv);
+ err_uninit_vector:
+ hns3_nic_uninit_vector_data(priv);
+- priv->ring_data = NULL;
+ err_dealloc_vector:
+ hns3_nic_dealloc_vector_data(priv);
+ err_put_ring:
+ hns3_put_ring_config(priv);
+- priv->ring_data = NULL;
+
+ return ret;
+ }
+@@ -4334,7 +4335,6 @@ static int hns3_reset_notify_uninit_enet
+ netdev_err(netdev, "uninit ring error\n");
+
+ hns3_put_ring_config(priv);
+- priv->ring_data = NULL;
+
+ return ret;
+ }
diff --git a/patches.drivers/net-hns3-reduce-resources-use-in-kdump-kernel.patch b/patches.drivers/net-hns3-reduce-resources-use-in-kdump-kernel.patch
new file mode 100644
index 0000000000..3331718edc
--- /dev/null
+++ b/patches.drivers/net-hns3-reduce-resources-use-in-kdump-kernel.patch
@@ -0,0 +1,81 @@
+From: Yonglong Liu <liuyonglong@huawei.com>
+Date: Thu, 4 Apr 2019 16:17:49 +0800
+Subject: net: hns3: reduce resources use in kdump kernel
+Patch-mainline: v5.2-rc1
+Git-commit: 962e31bdfce9dfe50af66ffe3b9014f227ffcf3b
+References: bsc#1104353 FATE#326415 bsc#1137201
+
+When the kdump kernel started, the HNS3 driver fail to register:
+[14.753340] hns3 0000:7d:00.0: Alloc umv space failed, want 512, get 0
+[14.795034] hns3 0000:7d:00.0: add uc mac address fail, ret =-22.
+
+By default, the HNS3 driver will use about 512M memory, but
+usually the reserved memory of kdump kernel is 576M, so the HNS3
+driver fail to register. This patch reduces the memory use in
+kdump kernel to about 16M.
+
+And when the kdump kernel starts, we must clear ucast mac address
+first to avoid add fail.
+
+Signed-off-by: Yonglong Liu <liuyonglong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 23 +++++++++++++++-
+ 1 file changed, 22 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -12,6 +12,7 @@
+ #include <linux/pci.h>
+ #include <linux/platform_device.h>
+ #include <linux/if_vlan.h>
++#include <linux/crash_dump.h>
+ #include <net/rtnetlink.h>
+ #include "hclge_cmd.h"
+ #include "hclge_dcb.h"
+@@ -1015,6 +1016,23 @@ static int hclge_get_cap(struct hclge_de
+ return ret;
+ }
+
++static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
++{
++#define HCLGE_MIN_TX_DESC 64
++#define HCLGE_MIN_RX_DESC 64
++
++ if (!is_kdump_kernel())
++ return;
++
++ dev_info(&hdev->pdev->dev,
++ "Running kdump kernel. Using minimal resources\n");
++
++ /* minimal queue pairs equals to the number of vports */
++ hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
++ hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
++ hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
++}
++
+ static int hclge_configure(struct hclge_dev *hdev)
+ {
+ struct hclge_cfg cfg;
+@@ -1074,6 +1092,8 @@ static int hclge_configure(struct hclge_
+
+ hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
+
++ hclge_init_kdump_kernel_config(hdev);
++
+ return ret;
+ }
+
+@@ -6293,7 +6313,8 @@ static int hclge_set_mac_addr(struct hna
+ return -EINVAL;
+ }
+
+- if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
++ if ((!is_first || is_kdump_kernel()) &&
++ hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
+ dev_warn(&hdev->pdev->dev,
+ "remove old uc mac address fail.\n");
+
diff --git a/patches.drivers/net-hns3-refactor-BD-filling-for-l2l3l4-info.patch b/patches.drivers/net-hns3-refactor-BD-filling-for-l2l3l4-info.patch
new file mode 100644
index 0000000000..c0e31bc66f
--- /dev/null
+++ b/patches.drivers/net-hns3-refactor-BD-filling-for-l2l3l4-info.patch
@@ -0,0 +1,111 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Mon, 6 May 2019 10:48:46 +0800
+Subject: net: hns3: refactor BD filling for l2l3l4 info
+Patch-mainline: v5.2-rc1
+Git-commit: 07918fcde144628f12048d5f95f28c40b073fba8
+References: bsc#1104353 FATE#326415 bsc#1134947
+
+This patch separates the inner and outer l2l3l4 len handling in
+hns3_set_l2l3l4_len, this is a preparation to combine the l2l3l4
+len and checksum handling for inner and outer header.
+
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 64 +++++++++---------------
+ 1 file changed, 24 insertions(+), 40 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -741,65 +741,49 @@ static void hns3_set_l2l3l4_len(struct s
+ u8 il4_proto, u32 *type_cs_vlan_tso,
+ u32 *ol_type_vlan_len_msec)
+ {
++ unsigned char *l2_hdr = skb->data;
++ u8 l4_proto = ol4_proto;
+ union l3_hdr_info l3;
+ union l4_hdr_info l4;
+- unsigned char *l2_hdr;
+- u8 l4_proto = ol4_proto;
+- u32 ol2_len;
+- u32 ol3_len;
+- u32 ol4_len;
+ u32 l2_len;
+ u32 l3_len;
++ u32 l4_len;
+
+ l3.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+
+- /* compute L2 header size for normal packet, defined in 2 Bytes */
+- l2_len = l3.hdr - skb->data;
+- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
+-
+- /* tunnel packet*/
++ /* tunnel packet */
+ if (skb->encapsulation) {
++ /* not MAC in UDP, MAC in GRE (0x6558) */
++ if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE))
++ return;
++
+ /* compute OL2 header size, defined in 2 Bytes */
+- ol2_len = l2_len;
++ l2_len = l3.hdr - skb->data;
+ hns3_set_field(*ol_type_vlan_len_msec,
+- HNS3_TXD_L2LEN_S, ol2_len >> 1);
++ HNS3_TXD_L2LEN_S, l2_len >> 1);
+
+ /* compute OL3 header size, defined in 4 Bytes */
+- ol3_len = l4.hdr - l3.hdr;
++ l3_len = l4.hdr - l3.hdr;
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S,
+- ol3_len >> 2);
++ l3_len >> 2);
+
+- /* MAC in UDP, MAC in GRE (0x6558)*/
+- if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
+- /* switch MAC header ptr from outer to inner header.*/
+- l2_hdr = skb_inner_mac_header(skb);
+-
+- /* compute OL4 header size, defined in 4 Bytes. */
+- ol4_len = l2_hdr - l4.hdr;
+- hns3_set_field(*ol_type_vlan_len_msec,
+- HNS3_TXD_L4LEN_S, ol4_len >> 2);
+-
+- /* switch IP header ptr from outer to inner header */
+- l3.hdr = skb_inner_network_header(skb);
+-
+- /* compute inner l2 header size, defined in 2 Bytes. */
+- l2_len = l3.hdr - l2_hdr;
+- hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S,
+- l2_len >> 1);
+- } else {
+- /* skb packet types not supported by hardware,
+- * txbd len fild doesn't be filled.
+- */
+- return;
+- }
+-
+- /* switch L4 header pointer from outer to inner */
++ l2_hdr = skb_inner_mac_header(skb);
++ /* compute OL4 header size, defined in 4 Bytes. */
++ l4_len = l2_hdr - l4.hdr;
++ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S,
++ l4_len >> 2);
++
++ /* switch to inner header */
++ l2_hdr = skb_inner_mac_header(skb);
++ l3.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+-
+ l4_proto = il4_proto;
+ }
+
++ l2_len = l3.hdr - l2_hdr;
++ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
++
+ /* compute inner(/normal) L3 header size, defined in 4 Bytes */
+ l3_len = l4.hdr - l3.hdr;
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
diff --git a/patches.drivers/net-hns3-refine-tx-timeout-count-handle.patch b/patches.drivers/net-hns3-refine-tx-timeout-count-handle.patch
new file mode 100644
index 0000000000..d6f75441af
--- /dev/null
+++ b/patches.drivers/net-hns3-refine-tx-timeout-count-handle.patch
@@ -0,0 +1,44 @@
+From: Jian Shen <shenjian15@huawei.com>
+Date: Fri, 19 Apr 2019 11:05:40 +0800
+Subject: net: hns3: refine tx timeout count handle
+Patch-mainline: v5.2-rc1
+Git-commit: beab694aa32afb7c785c55f075628d9a21ed8011
+References: bsc#1104353 FATE#326415 bsc#1134990
+
+In current codes, tx_timeout_cnt is used before increased,
+then we can see the tx_timeout_count is still 0 from the
+print when tx timeout happens, e.g.
+"hns3 0000:7d:00.3 eth3: tx_timeout count: 0, queue id: 0, SW_NTU:
+ 0xa6, SW_NTC: 0xa4, HW_HEAD: 0xa4, HW_TAIL: 0xa6, INT: 0x1"
+
+The tx_timeout_cnt should be updated before used.
+
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -1655,6 +1655,8 @@ static bool hns3_get_tx_timeo_queue_info
+ return false;
+ }
+
++ priv->tx_timeout_count++;
++
+ tx_ring = priv->ring_data[timeout_queue].ring;
+
+ hw_head = readl_relaxed(tx_ring->tqp->io_base +
+@@ -1682,8 +1684,6 @@ static void hns3_nic_net_timeout(struct
+ if (!hns3_get_tx_timeo_queue_info(ndev))
+ return;
+
+- priv->tx_timeout_count++;
+-
+ /* request the reset, and let the hclge to determine
+ * which reset level should be done
+ */
diff --git a/patches.drivers/net-hns3-remove-redundant-assignment-of-l2_hdr-to-it.patch b/patches.drivers/net-hns3-remove-redundant-assignment-of-l2_hdr-to-it.patch
new file mode 100644
index 0000000000..e967376a04
--- /dev/null
+++ b/patches.drivers/net-hns3-remove-redundant-assignment-of-l2_hdr-to-it.patch
@@ -0,0 +1,30 @@
+From: Colin Ian King <colin.king@canonical.com>
+Date: Wed, 8 May 2019 11:51:35 +0100
+Subject: net: hns3: remove redundant assignment of l2_hdr to itself
+Patch-mainline: v5.2-rc1
+Git-commit: c264ed44d857c50f43be08572668afa374bf6a48
+References: bsc#1104353 FATE#326415
+
+The pointer l2_hdr is being assigned to itself, this is redundant
+and can be removed.
+
+Addresses-Coverity: ("Evaluation order violation")
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Reviewed-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -810,7 +810,7 @@ static int hns3_set_l2l3l4(struct sk_buf
+ u8 il4_proto, u32 *type_cs_vlan_tso,
+ u32 *ol_type_vlan_len_msec)
+ {
+- unsigned char *l2_hdr = l2_hdr = skb->data;
++ unsigned char *l2_hdr = skb->data;
+ u32 l4_proto = ol4_proto;
+ union l4_hdr_info l4;
+ union l3_hdr_info l3;
diff --git a/patches.drivers/net-hns3-remove-reset-after-command-send-failed.patch b/patches.drivers/net-hns3-remove-reset-after-command-send-failed.patch
new file mode 100644
index 0000000000..a9bb3964d8
--- /dev/null
+++ b/patches.drivers/net-hns3-remove-reset-after-command-send-failed.patch
@@ -0,0 +1,68 @@
+From: Weihang Li <liweihang@hisilicon.com>
+Date: Thu, 25 Apr 2019 20:42:55 +0800
+Subject: net: hns3: remove reset after command send failed
+Patch-mainline: v5.2-rc1
+Git-commit: 96490a1c09ce8af322f646b20d1dd6255e2e9ae2
+References: bsc#1104353 FATE#326415 bsc#1134949
+
+It's meaningless to trigger reset when failed to send command to IMP,
+because the failure is usually caused by no authority, illegal command
+and so on. When that happened, we just need to return the status code
+for further debugging.
+
+Signed-off-by: Weihang Li <liweihang@hisilicon.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+@@ -1653,8 +1653,6 @@ int hclge_handle_hw_msix_error(struct hc
+ if (ret) {
+ dev_err(dev, "fail(%d) to query msix int status bd num\n",
+ ret);
+- /* reset everything for now */
+- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ return ret;
+ }
+
+@@ -1675,8 +1673,6 @@ int hclge_handle_hw_msix_error(struct hc
+ if (ret) {
+ dev_err(dev, "query all mpf msix int cmd failed (%d)\n",
+ ret);
+- /* reset everything for now */
+- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ goto msi_error;
+ }
+
+@@ -1710,8 +1706,6 @@ int hclge_handle_hw_msix_error(struct hc
+ if (ret) {
+ dev_err(dev, "clear all mpf msix int cmd failed (%d)\n",
+ ret);
+- /* reset everything for now */
+- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ goto msi_error;
+ }
+
+@@ -1725,8 +1719,6 @@ int hclge_handle_hw_msix_error(struct hc
+ if (ret) {
+ dev_err(dev, "query all pf msix int cmd failed (%d)\n",
+ ret);
+- /* reset everything for now */
+- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ goto msi_error;
+ }
+
+@@ -1767,8 +1759,6 @@ int hclge_handle_hw_msix_error(struct hc
+ if (ret) {
+ dev_err(dev, "clear all pf msix int cmd failed (%d)\n",
+ ret);
+- /* reset everything for now */
+- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
+ }
+
+ /* query and clear mac tnl interruptions */
diff --git a/patches.drivers/net-hns3-return-0-and-print-warning-when-hit-duplica.patch b/patches.drivers/net-hns3-return-0-and-print-warning-when-hit-duplica.patch
new file mode 100644
index 0000000000..c90a54988b
--- /dev/null
+++ b/patches.drivers/net-hns3-return-0-and-print-warning-when-hit-duplica.patch
@@ -0,0 +1,39 @@
+From: Peng Li <lipeng321@huawei.com>
+Date: Thu, 4 Apr 2019 16:17:51 +0800
+Subject: net: hns3: return 0 and print warning when hit duplicate MAC
+Patch-mainline: v5.2-rc1
+Git-commit: 72110b567479f0282489a9b3747e76d8c67d75f5
+References: bsc#1104353 FATE#326415 bsc#1137201
+
+When set 2 same MAC to different function of one port, IMP
+will return error as the later one may modify the origin one.
+This will cause bond fail for 2 VFs of one port.
+
+Driver just print warning and return 0 with this patch, so
+if set same MAC address, it will return 0 but do not really
+configure HW.
+
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -5962,8 +5962,11 @@ int hclge_add_uc_addr_common(struct hclg
+ }
+
+ /* check if we just hit the duplicate */
+- if (!ret)
+- ret = -EINVAL;
++ if (!ret) {
++ dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
++ vport->vport_id, addr);
++ return 0;
++ }
+
+ dev_err(&hdev->pdev->dev,
+ "PF failed to add unicast entry(%pM) in the MAC table\n",
diff --git a/patches.drivers/net-hns3-set-dividual-reset-level-for-all-RAS-and-MS.patch b/patches.drivers/net-hns3-set-dividual-reset-level-for-all-RAS-and-MS.patch
new file mode 100644
index 0000000000..3e59db2b52
--- /dev/null
+++ b/patches.drivers/net-hns3-set-dividual-reset-level-for-all-RAS-and-MS.patch
@@ -0,0 +1,1271 @@
+From: Weihang Li <liweihang@hisilicon.com>
+Date: Sun, 14 Apr 2019 09:47:43 +0800
+Subject: net: hns3: set dividual reset level for all RAS and MSI-X errors
+Patch-mainline: v5.2-rc1
+Git-commit: c41e672d1e6a51b2b21a23ade4048b414ec76624
+References: bsc#1104353 FATE#326415 bsc#1135046
+
+According to hardware description, reset level that should be
+triggered are not consistent in a module. For example, in SSU
+common errors, the first two bits has no need to do reset,
+but the other bits need global reset.
+
+This patch sets separate reset level for all RAS and MSI-X
+interrupts by adding a reset_lvel field in struct hclge_hw_error,
+and fixes some incorrect reset level.
+
+Signed-off-by: Weihang Li <liweihang@hisilicon.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c | 956 ++++++++++-------
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h | 1
+ 2 files changed, 618 insertions(+), 339 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+@@ -4,287 +4,468 @@
+ #include "hclge_err.h"
+
+ static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
+- { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" },
+- { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" },
+- { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" },
+- { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" },
+- { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" },
+- { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" },
+- { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" },
+- { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" },
+- { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err" },
++ { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
+- { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" },
+- { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" },
+- { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" },
+- { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" },
+- { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" },
+- { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" },
+- { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" },
+- { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" },
+- { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" },
+- { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" },
+- { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" },
+- { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" },
+- { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" },
+- { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" },
+- { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" },
+- { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" },
++ { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
+- { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" },
+- { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" },
+- { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" },
+- { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err" },
+- { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err" },
+- { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err" },
++ { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
+- { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err" },
+- { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err" },
++ { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_igu_int[] = {
+- { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" },
+- { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" },
++ { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err",
++ .reset_level = HNAE3_CORE_RESET },
++ { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err",
++ .reset_level = HNAE3_CORE_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
+- { .int_msk = BIT(0), .msg = "rx_buf_overflow" },
+- { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" },
+- { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" },
+- { .int_msk = BIT(3), .msg = "tx_buf_overflow" },
+- { .int_msk = BIT(4), .msg = "tx_buf_underrun" },
+- { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow" },
++ { .int_msk = BIT(0), .msg = "rx_buf_overflow",
++ .reset_level = HNAE3_CORE_RESET },
++ { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
++ .reset_level = HNAE3_CORE_RESET },
++ { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow",
++ .reset_level = HNAE3_CORE_RESET },
++ { .int_msk = BIT(3), .msg = "tx_buf_overflow",
++ .reset_level = HNAE3_CORE_RESET },
++ { .int_msk = BIT(4), .msg = "tx_buf_underrun",
++ .reset_level = HNAE3_CORE_RESET },
++ { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow",
++ .reset_level = HNAE3_CORE_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_ncsi_err_int[] = {
+- { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" },
++ { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err",
++ .reset_level = HNAE3_NONE_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
+- { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" },
+- { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" },
+- { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" },
+- { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" },
+- { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" },
+- { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" },
+- { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err" },
+- { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" },
+- { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" },
+- { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" },
+- { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err" },
+- { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err" },
+- { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err" },
+- { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err" },
+- { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err" },
+- { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err" },
+- { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err" },
+- { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err" },
+- { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err" },
+- { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err" },
+- { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err" },
+- { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err" },
+- { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err" },
+- { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err" },
+- { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err" },
+- { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err" },
+- { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err" },
+- { .int_msk = BIT(27),
+- .msg = "flow_director_ad_mem0_ecc_mbit_err" },
+- { .int_msk = BIT(28),
+- .msg = "flow_director_ad_mem1_ecc_mbit_err" },
+- { .int_msk = BIT(29),
+- .msg = "rx_vlan_tag_memory_ecc_mbit_err" },
+- { .int_msk = BIT(30),
+- .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err" },
++ { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(27), .msg = "flow_director_ad_mem0_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(28), .msg = "flow_director_ad_mem1_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(29), .msg = "rx_vlan_tag_memory_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(30), .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = {
+- { .int_msk = BIT(0), .msg = "tx_vlan_tag_err" },
+- { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" },
++ { .int_msk = BIT(0), .msg = "tx_vlan_tag_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err",
++ .reset_level = HNAE3_NONE_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = {
+- { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" },
+- { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" },
+- { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" },
+- { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err" },
+- { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err" },
+- { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err" },
++ { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_tm_sch_rint[] = {
+- { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" },
+- { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err" },
+- { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err" },
+- { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err" },
+- { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err" },
+- { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err" },
+- { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err" },
+- { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err" },
+- { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err" },
+- { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err" },
+- { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err" },
+- { .int_msk = BIT(12),
+- .msg = "tm_sch_port_shap_offset_fifo_wr_err" },
+- { .int_msk = BIT(13),
+- .msg = "tm_sch_port_shap_offset_fifo_rd_err" },
+- { .int_msk = BIT(14),
+- .msg = "tm_sch_pg_pshap_offset_fifo_wr_err" },
+- { .int_msk = BIT(15),
+- .msg = "tm_sch_pg_pshap_offset_fifo_rd_err" },
+- { .int_msk = BIT(16),
+- .msg = "tm_sch_pg_cshap_offset_fifo_wr_err" },
+- { .int_msk = BIT(17),
+- .msg = "tm_sch_pg_cshap_offset_fifo_rd_err" },
+- { .int_msk = BIT(18),
+- .msg = "tm_sch_pri_pshap_offset_fifo_wr_err" },
+- { .int_msk = BIT(19),
+- .msg = "tm_sch_pri_pshap_offset_fifo_rd_err" },
+- { .int_msk = BIT(20),
+- .msg = "tm_sch_pri_cshap_offset_fifo_wr_err" },
+- { .int_msk = BIT(21),
+- .msg = "tm_sch_pri_cshap_offset_fifo_rd_err" },
+- { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err" },
+- { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err" },
+- { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err" },
+- { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err" },
+- { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err" },
+- { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err" },
+- { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err" },
+- { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err" },
+- { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err" },
+- { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err" },
++ { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(12), .msg = "tm_sch_port_shap_offset_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(13), .msg = "tm_sch_port_shap_offset_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(14), .msg = "tm_sch_pg_pshap_offset_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(15), .msg = "tm_sch_pg_pshap_offset_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(16), .msg = "tm_sch_pg_cshap_offset_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(17), .msg = "tm_sch_pg_cshap_offset_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(18), .msg = "tm_sch_pri_pshap_offset_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(19), .msg = "tm_sch_pri_pshap_offset_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(20), .msg = "tm_sch_pri_cshap_offset_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(21), .msg = "tm_sch_pri_cshap_offset_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_qcn_fifo_rint[] = {
+- { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err" },
+- { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err" },
+- { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err" },
+- { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err" },
+- { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err" },
+- { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err" },
+- { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err" },
+- { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err" },
+- { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err" },
+- { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err" },
+- { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err" },
+- { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err" },
+- { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err" },
+- { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err" },
+- { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err" },
+- { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err" },
+- { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err" },
+- { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err" },
++ { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_qcn_ecc_rint[] = {
+- { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" },
+- { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" },
+- { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" },
+- { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" },
+- { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" },
+- { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" },
+- { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" },
+- { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" },
+- { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" },
+- { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" },
+- { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" },
++ { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
+- { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err" },
+- { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err" },
+- { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err" },
+- { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err" },
+- { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err" },
+- { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err" },
+- { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err" },
+- { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err" },
+- { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err" },
+- { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err" },
+- { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err" },
+- { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err" },
+- { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err" },
+- { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err" },
++ { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
+- { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err" },
+- { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err" },
+- { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err" },
+- { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err" },
+- { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err" },
+- { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err" },
+- { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err" },
+- { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err" },
+- { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err" },
+- { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err" },
+- { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err" },
+- { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err" },
+- { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err" },
+- { .int_msk = BIT(26), .msg = "rd_bus_err" },
+- { .int_msk = BIT(27), .msg = "wr_bus_err" },
+- { .int_msk = BIT(28), .msg = "reg_search_miss" },
+- { .int_msk = BIT(29), .msg = "rx_q_search_miss" },
+- { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect" },
+- { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl" },
++ { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(26), .msg = "rd_bus_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(27), .msg = "wr_bus_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(28), .msg = "reg_search_miss",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(29), .msg = "rx_q_search_miss",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl",
++ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
+- { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err" },
+- { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err" },
+- { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err" },
+- { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err" },
++ { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err",
++ .reset_level = HNAE3_CORE_RESET },
++ { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err",
++ .reset_level = HNAE3_CORE_RESET },
++ { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err",
++ .reset_level = HNAE3_CORE_RESET },
++ { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err",
++ .reset_level = HNAE3_CORE_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = {
+- { .int_msk = BIT(0), .msg = "over_8bd_no_fe" },
+- { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err" },
+- { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err" },
+- { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison" },
+- { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison" },
+- { .int_msk = BIT(5), .msg = "buf_wait_timeout" },
++ { .int_msk = BIT(0), .msg = "over_8bd_no_fe",
++ .reset_level = HNAE3_FUNC_RESET },
++ { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison",
++ .reset_level = HNAE3_FUNC_RESET },
++ { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison",
++ .reset_level = HNAE3_FUNC_RESET },
++ { .int_msk = BIT(5), .msg = "buf_wait_timeout",
++ .reset_level = HNAE3_NONE_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_ssu_com_err_int[] = {
+- { .int_msk = BIT(0), .msg = "buf_sum_err" },
+- { .int_msk = BIT(1), .msg = "ppp_mb_num_err" },
+- { .int_msk = BIT(2), .msg = "ppp_mbid_err" },
+- { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err" },
+- { .int_msk = BIT(4), .msg = "ppp_rlt_host_err" },
+- { .int_msk = BIT(5), .msg = "cks_edit_position_err" },
+- { .int_msk = BIT(6), .msg = "cks_edit_condition_err" },
+- { .int_msk = BIT(7), .msg = "vlan_edit_condition_err" },
+- { .int_msk = BIT(8), .msg = "vlan_num_ot_err" },
+- { .int_msk = BIT(9), .msg = "vlan_num_in_err" },
++ { .int_msk = BIT(0), .msg = "buf_sum_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(1), .msg = "ppp_mb_num_err",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(2), .msg = "ppp_mbid_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(4), .msg = "ppp_rlt_host_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(5), .msg = "cks_edit_position_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(6), .msg = "cks_edit_condition_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(7), .msg = "vlan_edit_condition_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(8), .msg = "vlan_num_ot_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(9), .msg = "vlan_num_in_err",
++ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+ };
+
+ #define HCLGE_SSU_MEM_ECC_ERR(x) \
+- { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err" }
++ { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err", \
++ .reset_level = HNAE3_GLOBAL_RESET }
+
+ static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
+ HCLGE_SSU_MEM_ECC_ERR(0),
+@@ -323,62 +504,106 @@ static const struct hclge_hw_error hclge
+ };
+
+ static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
+- { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
+- { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" },
+- { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port" },
+- { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port" },
+- { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port" },
+- { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port" },
+- { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port" },
+- { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port" },
+- { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port" },
+- { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port" },
+- { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port" },
+- { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port" },
+- { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port" },
++ { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port",
++ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = {
+- { .int_msk = BIT(0), .msg = "ig_mac_inf_int" },
+- { .int_msk = BIT(1), .msg = "ig_host_inf_int" },
+- { .int_msk = BIT(2), .msg = "ig_roc_buf_int" },
+- { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int" },
+- { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int" },
+- { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int" },
+- { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int" },
+- { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int" },
+- { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int" },
+- { .int_msk = BIT(9), .msg = "qm_eof_fifo_int" },
+- { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int" },
+- { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int" },
+- { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int" },
+- { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int" },
+- { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int" },
+- { .int_msk = BIT(15), .msg = "host_cmd_fifo_int" },
+- { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int" },
+- { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int" },
+- { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int" },
+- { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int" },
+- { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int" },
+- { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int" },
+- { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int" },
+- { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int" },
++ { .int_msk = BIT(0), .msg = "ig_mac_inf_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(1), .msg = "ig_host_inf_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(2), .msg = "ig_roc_buf_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(9), .msg = "qm_eof_fifo_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(15), .msg = "host_cmd_fifo_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int",
++ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
+- { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg" },
+- { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg" },
+- { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg" },
+- { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg" },
++ { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg",
++ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+ };
+
+ static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
+- { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" },
+- { .int_msk = BIT(9), .msg = "low_water_line_err_port" },
+- { .int_msk = BIT(10), .msg = "hi_water_line_err_port" },
++ { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
++ .reset_level = HNAE3_GLOBAL_RESET },
++ { .int_msk = BIT(9), .msg = "low_water_line_err_port",
++ .reset_level = HNAE3_NONE_RESET },
++ { .int_msk = BIT(10), .msg = "hi_water_line_err_port",
++ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+ };
+
+@@ -406,16 +631,29 @@ static const struct hclge_hw_error hclge
+ { /* sentinel */ }
+ };
+
+-static void hclge_log_error(struct device *dev, char *reg,
+- const struct hclge_hw_error *err,
+- u32 err_sts)
++static enum hnae3_reset_type hclge_log_error(struct device *dev, char *reg,
++ const struct hclge_hw_error *err,
++ u32 err_sts)
+ {
++ enum hnae3_reset_type reset_level = HNAE3_FUNC_RESET;
++ bool need_reset = false;
++
+ while (err->msg) {
+- if (err->int_msk & err_sts)
++ if (err->int_msk & err_sts) {
+ dev_warn(dev, "%s %s found [error status=0x%x]\n",
+ reg, err->msg, err_sts);
++ if (err->reset_level != HNAE3_NONE_RESET &&
++ err->reset_level >= reset_level) {
++ reset_level = err->reset_level;
++ need_reset = true;
++ }
++ }
+ err++;
+ }
++ if (need_reset)
++ return reset_level;
++ else
++ return HNAE3_NONE_RESET;
+ }
+
+ /* hclge_cmd_query_error: read the error information
+@@ -826,6 +1064,7 @@ static int hclge_handle_mpf_ras_error(st
+ int num)
+ {
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
++ enum hnae3_reset_type reset_level;
+ struct device *dev = &hdev->pdev->dev;
+ __le32 *desc_data;
+ u32 status;
+@@ -845,78 +1084,94 @@ static int hclge_handle_mpf_ras_error(st
+ /* log HNS common errors */
+ status = le32_to_cpu(desc[0].data[0]);
+ if (status) {
+- hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
+- &hclge_imp_tcm_ecc_int[0], status);
+- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
++ reset_level = hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
++ &hclge_imp_tcm_ecc_int[0],
++ status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
+
+ status = le32_to_cpu(desc[0].data[1]);
+ if (status) {
+- hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
+- &hclge_cmdq_nic_mem_ecc_int[0], status);
+- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
++ reset_level = hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
++ &hclge_cmdq_nic_mem_ecc_int[0],
++ status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
+
+ if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) {
+ dev_warn(dev, "imp_rd_data_poison_err found\n");
+- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_NONE_RESET);
+ }
+
+ status = le32_to_cpu(desc[0].data[3]);
+ if (status) {
+- hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
+- &hclge_tqp_int_ecc_int[0], status);
+- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
++ reset_level = hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
++ &hclge_tqp_int_ecc_int[0],
++ status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
+
+ status = le32_to_cpu(desc[0].data[4]);
+ if (status) {
+- hclge_log_error(dev, "MSIX_ECC_INT_STS",
+- &hclge_msix_sram_ecc_int[0], status);
+- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
++ reset_level = hclge_log_error(dev, "MSIX_ECC_INT_STS",
++ &hclge_msix_sram_ecc_int[0],
++ status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
+
+ /* log SSU(Storage Switch Unit) errors */
+ desc_data = (__le32 *)&desc[2];
+ status = le32_to_cpu(*(desc_data + 2));
+ if (status) {
+- hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
+- &hclge_ssu_mem_ecc_err_int[0], status);
+- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
++ reset_level = hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
++ &hclge_ssu_mem_ecc_err_int[0],
++ status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
+
+ status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
+ if (status) {
+ dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
+ status);
+- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ }
+
+ status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK;
+ if (status) {
+- hclge_log_error(dev, "SSU_COMMON_ERR_INT",
+- &hclge_ssu_com_err_int[0], status);
+- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
++ reset_level = hclge_log_error(dev, "SSU_COMMON_ERR_INT",
++ &hclge_ssu_com_err_int[0],
++ status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
+
+ /* log IGU(Ingress Unit) errors */
+ desc_data = (__le32 *)&desc[3];
+ status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK;
+- if (status)
+- hclge_log_error(dev, "IGU_INT_STS",
+- &hclge_igu_int[0], status);
++ if (status) {
++ reset_level = hclge_log_error(dev, "IGU_INT_STS",
++ &hclge_igu_int[0], status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
++ }
+
+ /* log PPP(Programmable Packet Process) errors */
+ desc_data = (__le32 *)&desc[4];
+ status = le32_to_cpu(*(desc_data + 1));
+- if (status)
+- hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
+- &hclge_ppp_mpf_abnormal_int_st1[0], status);
++ if (status) {
++ reset_level =
++ hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
++ &hclge_ppp_mpf_abnormal_int_st1[0],
++ status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
++ }
+
+ status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK;
+- if (status)
+- hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
+- &hclge_ppp_mpf_abnormal_int_st3[0], status);
++ if (status) {
++ reset_level =
++ hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
++ &hclge_ppp_mpf_abnormal_int_st3[0],
++ status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
++ }
+
+ /* log PPU(RCB) errors */
+ desc_data = (__le32 *)&desc[5];
+@@ -924,55 +1179,60 @@ static int hclge_handle_mpf_ras_error(st
+ if (status) {
+ dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n",
+ "rpu_rx_pkt_ecc_mbit_err");
+- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
+ }
+
+ status = le32_to_cpu(*(desc_data + 2));
+ if (status) {
+- hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
+- &hclge_ppu_mpf_abnormal_int_st2[0], status);
+- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
++ reset_level =
++ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
++ &hclge_ppu_mpf_abnormal_int_st2[0],
++ status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
+
+ status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK;
+ if (status) {
+- hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
+- &hclge_ppu_mpf_abnormal_int_st3[0], status);
+- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
++ reset_level =
++ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
++ &hclge_ppu_mpf_abnormal_int_st3[0],
++ status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
+
+ /* log TM(Traffic Manager) errors */
+ desc_data = (__le32 *)&desc[6];
+ status = le32_to_cpu(*desc_data);
+ if (status) {
+- hclge_log_error(dev, "TM_SCH_RINT",
+- &hclge_tm_sch_rint[0], status);
+- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
++ reset_level = hclge_log_error(dev, "TM_SCH_RINT",
++ &hclge_tm_sch_rint[0], status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
+
+ /* log QCN(Quantized Congestion Control) errors */
+ desc_data = (__le32 *)&desc[7];
+ status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK;
+ if (status) {
+- hclge_log_error(dev, "QCN_FIFO_RINT",
+- &hclge_qcn_fifo_rint[0], status);
+- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
++ reset_level = hclge_log_error(dev, "QCN_FIFO_RINT",
++ &hclge_qcn_fifo_rint[0], status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
+
+ status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK;
+ if (status) {
+- hclge_log_error(dev, "QCN_ECC_RINT",
+- &hclge_qcn_ecc_rint[0], status);
+- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
++ reset_level = hclge_log_error(dev, "QCN_ECC_RINT",
++ &hclge_qcn_ecc_rint[0],
++ status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
+
+ /* log NCSI errors */
+ desc_data = (__le32 *)&desc[9];
+ status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK;
+ if (status) {
+- hclge_log_error(dev, "NCSI_ECC_INT_RPT",
+- &hclge_ncsi_err_int[0], status);
+- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET);
++ reset_level = hclge_log_error(dev, "NCSI_ECC_INT_RPT",
++ &hclge_ncsi_err_int[0], status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
+
+ /* clear all main PF RAS errors */
+@@ -1000,6 +1260,7 @@ static int hclge_handle_pf_ras_error(str
+ {
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ struct device *dev = &hdev->pdev->dev;
++ enum hnae3_reset_type reset_level;
+ __le32 *desc_data;
+ u32 status;
+ int ret;
+@@ -1018,38 +1279,47 @@ static int hclge_handle_pf_ras_error(str
+ /* log SSU(Storage Switch Unit) errors */
+ status = le32_to_cpu(desc[0].data[0]);
+ if (status) {
+- hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+- &hclge_ssu_port_based_err_int[0], status);
+- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
++ reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
++ &hclge_ssu_port_based_err_int[0],
++ status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
+
+ status = le32_to_cpu(desc[0].data[1]);
+ if (status) {
+- hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
+- &hclge_ssu_fifo_overflow_int[0], status);
+- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
++ reset_level = hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
++ &hclge_ssu_fifo_overflow_int[0],
++ status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
+
+ status = le32_to_cpu(desc[0].data[2]);
+ if (status) {
+- hclge_log_error(dev, "SSU_ETS_TCG_INT",
+- &hclge_ssu_ets_tcg_int[0], status);
+- HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
++ reset_level = hclge_log_error(dev, "SSU_ETS_TCG_INT",
++ &hclge_ssu_ets_tcg_int[0],
++ status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
+ }
+
+ /* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */
+ desc_data = (__le32 *)&desc[1];
+ status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK;
+- if (status)
+- hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
+- &hclge_igu_egu_tnl_int[0], status);
++ if (status) {
++ reset_level = hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
++ &hclge_igu_egu_tnl_int[0],
++ status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
++ }
+
+ /* log PPU(RCB) errors */
+ desc_data = (__le32 *)&desc[3];
+ status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK;
+- if (status)
+- hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
+- &hclge_ppu_pf_abnormal_int[0], status);
++ if (status) {
++ reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
++ &hclge_ppu_pf_abnormal_int[0],
++ status);
++ HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level);
++ }
+
+ /* clear all PF RAS errors */
+ hclge_cmd_reuse_desc(&desc[0], false);
+@@ -1343,14 +1613,12 @@ int hclge_handle_hw_msix_error(struct hc
+ {
+ struct device *dev = &hdev->pdev->dev;
+ u32 mpf_bd_num, pf_bd_num, bd_num;
++ enum hnae3_reset_type reset_level;
+ struct hclge_desc desc_bd;
+ struct hclge_desc *desc;
+ __le32 *desc_data;
+- int ret = 0;
+ u32 status;
+-
+- /* set default handling */
+- set_bit(HNAE3_FUNC_RESET, reset_requests);
++ int ret;
+
+ /* query the number of bds for the MSIx int status */
+ hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM,
+@@ -1390,9 +1658,10 @@ int hclge_handle_hw_msix_error(struct hc
+ desc_data = (__le32 *)&desc[1];
+ status = le32_to_cpu(*desc_data);
+ if (status) {
+- hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
+- &hclge_mac_afifo_tnl_int[0], status);
+- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
++ reset_level = hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
++ &hclge_mac_afifo_tnl_int[0],
++ status);
++ set_bit(reset_level, reset_requests);
+ }
+
+ /* log PPU(RCB) MPF errors */
+@@ -1400,9 +1669,11 @@ int hclge_handle_hw_msix_error(struct hc
+ status = le32_to_cpu(*(desc_data + 2)) &
+ HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
+ if (status) {
+- hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
+- &hclge_ppu_mpf_abnormal_int_st2[0], status);
+- set_bit(HNAE3_CORE_RESET, reset_requests);
++ reset_level =
++ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
++ &hclge_ppu_mpf_abnormal_int_st2[0],
++ status);
++ set_bit(reset_level, reset_requests);
+ }
+
+ /* clear all main PF MSIx errors */
+@@ -1436,24 +1707,31 @@ int hclge_handle_hw_msix_error(struct hc
+ /* log SSU PF errors */
+ status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK;
+ if (status) {
+- hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+- &hclge_ssu_port_based_pf_int[0], status);
+- set_bit(HNAE3_GLOBAL_RESET, reset_requests);
++ reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
++ &hclge_ssu_port_based_pf_int[0],
++ status);
++ set_bit(reset_level, reset_requests);
+ }
+
+ /* read and log PPP PF errors */
+ desc_data = (__le32 *)&desc[2];
+ status = le32_to_cpu(*desc_data);
+- if (status)
+- hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
+- &hclge_ppp_pf_abnormal_int[0], status);
++ if (status) {
++ reset_level = hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
++ &hclge_ppp_pf_abnormal_int[0],
++ status);
++ set_bit(reset_level, reset_requests);
++ }
+
+ /* log PPU(RCB) PF errors */
+ desc_data = (__le32 *)&desc[3];
+ status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK;
+- if (status)
+- hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
+- &hclge_ppu_pf_abnormal_int[0], status);
++ if (status) {
++ reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
++ &hclge_ppu_pf_abnormal_int[0],
++ status);
++ set_bit(reset_level, reset_requests);
++ }
+
+ /* clear all PF MSIx errors */
+ hclge_cmd_reuse_desc(&desc[0], false);
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+@@ -112,6 +112,7 @@ struct hclge_hw_blk {
+ struct hclge_hw_error {
+ u32 int_msk;
+ const char *msg;
++ enum hnae3_reset_type reset_level;
+ };
+
+ int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state);
diff --git a/patches.drivers/net-hns3-set-up-the-vport-alive-state-while-reinitia.patch b/patches.drivers/net-hns3-set-up-the-vport-alive-state-while-reinitia.patch
new file mode 100644
index 0000000000..b4f1928289
--- /dev/null
+++ b/patches.drivers/net-hns3-set-up-the-vport-alive-state-while-reinitia.patch
@@ -0,0 +1,38 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Sat, 6 Apr 2019 15:43:26 +0800
+Subject: net: hns3: set up the vport alive state while reinitializing
+Patch-mainline: v5.2-rc1
+Git-commit: cd513a69750b4be20b8c077e05c5112f0fd014f2
+References: bsc#1104353 FATE#326415 bsc#1137201
+
+When reinitializing, the vport alive state needs to be set up.
+
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -4074,10 +4074,18 @@ static int hns3_reset_notify_init_enet(s
+ if (ret)
+ goto err_uninit_vector;
+
++ ret = hns3_client_start(handle);
++ if (ret) {
++ dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
++ goto err_uninit_ring;
++ }
++
+ set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+
+ return ret;
+
++err_uninit_ring:
++ hns3_uninit_all_ring(priv);
+ err_uninit_vector:
+ hns3_nic_uninit_vector_data(priv);
+ priv->ring_data = NULL;
diff --git a/patches.drivers/net-hns3-set-vport-alive-state-to-default-while-rese.patch b/patches.drivers/net-hns3-set-vport-alive-state-to-default-while-rese.patch
new file mode 100644
index 0000000000..45c89bb53c
--- /dev/null
+++ b/patches.drivers/net-hns3-set-vport-alive-state-to-default-while-rese.patch
@@ -0,0 +1,31 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Sat, 6 Apr 2019 15:43:25 +0800
+Subject: net: hns3: set vport alive state to default while resetting
+Patch-mainline: v5.2-rc1
+Git-commit: 0f14c5b1a9c9d082d9b567e3775e299ec075721d
+References: bsc#1104353 FATE#326415 bsc#1137201
+
+When resetting, the vport alive state should be set to default,
+otherwise the alive state of the vport whose driver not running
+is wrong before the timer to check it out.
+
+Fixes: a6d818e31d08 ("net: hns3: Add vport alive state checking support")
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -7742,7 +7742,7 @@ static void hclge_reset_vport_state(stru
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+- hclge_vport_start(vport);
++ hclge_vport_stop(vport);
+ vport++;
+ }
+ }
diff --git a/patches.drivers/net-hns3-simplify-hclgevf_cmd_csq_clean.patch b/patches.drivers/net-hns3-simplify-hclgevf_cmd_csq_clean.patch
new file mode 100644
index 0000000000..1f2177a70a
--- /dev/null
+++ b/patches.drivers/net-hns3-simplify-hclgevf_cmd_csq_clean.patch
@@ -0,0 +1,72 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Thu, 4 Apr 2019 16:17:54 +0800
+Subject: net: hns3: simplify hclgevf_cmd_csq_clean
+Patch-mainline: v5.2-rc1
+Git-commit: ffd0a922cdea3f37438aeb76a154da1775e82626
+References: bsc#1104353 FATE#326415
+
+csq is used as a ring buffer, the value of the desc will be replaced
+in next use. This patch removes the unnecessary memset, and just
+updates the next_to_clean.
+
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c | 35 ++++++++++-----
+ 1 file changed, 24 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+@@ -27,26 +27,39 @@ static int hclgevf_ring_space(struct hcl
+ return ring->desc_num - used - 1;
+ }
+
++static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring,
++ int head)
++{
++ int ntu = ring->next_to_use;
++ int ntc = ring->next_to_clean;
++
++ if (ntu > ntc)
++ return head >= ntc && head <= ntu;
++
++ return head >= ntc || head <= ntu;
++}
++
+ static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
+ {
++ struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
+ struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
+- u16 ntc = csq->next_to_clean;
+- struct hclgevf_desc *desc;
+ int clean = 0;
+ u32 head;
+
+- desc = &csq->desc[ntc];
+ head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
+- while (head != ntc) {
+- memset(desc, 0, sizeof(*desc));
+- ntc++;
+- if (ntc == csq->desc_num)
+- ntc = 0;
+- desc = &csq->desc[ntc];
+- clean++;
++ rmb(); /* Make sure head is ready before touch any data */
++
++ if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
++ dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
++ csq->next_to_use, csq->next_to_clean);
++ dev_warn(&hdev->pdev->dev,
++ "Disabling any further commands to IMP firmware\n");
++ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
++ return -EIO;
+ }
+- csq->next_to_clean = ntc;
+
++ clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
++ csq->next_to_clean = head;
+ return clean;
+ }
+
diff --git a/patches.drivers/net-hns3-some-cleanup-for-struct-hns3_enet_ring.patch b/patches.drivers/net-hns3-some-cleanup-for-struct-hns3_enet_ring.patch
new file mode 100644
index 0000000000..89200950d1
--- /dev/null
+++ b/patches.drivers/net-hns3-some-cleanup-for-struct-hns3_enet_ring.patch
@@ -0,0 +1,70 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Mon, 6 May 2019 10:48:51 +0800
+Subject: net: hns3: some cleanup for struct hns3_enet_ring
+Patch-mainline: v5.2-rc1
+Git-commit: 845e0d1d5290c3b242aa76f37b4a5cae287b6f75
+References: bsc#1104353 FATE#326415 bsc#1134947
+
+This patch removes some unused field in struct hns3_enet_ring,
+use ring->dev for ring_to_dev macro, and use dev consistently
+in hns3_fill_desc.
+
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 2 +-
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 9 +--------
+ 2 files changed, 2 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -1051,7 +1051,7 @@ static int hns3_fill_desc(struct hns3_en
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ }
+
+- if (unlikely(dma_mapping_error(ring->dev, dma))) {
++ if (unlikely(dma_mapping_error(dev, dma))) {
+ ring->stats.sw_err_cnt++;
+ return -ENOMEM;
+ }
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+@@ -401,7 +401,6 @@ struct hns3_enet_ring {
+ struct hns3_enet_ring *next;
+ struct hns3_enet_tqp_vector *tqp_vector;
+ struct hnae3_queue *tqp;
+- char ring_name[HNS3_RING_NAME_LEN];
+ struct device *dev; /* will be used for DMA mapping of descriptors */
+
+ /* statistic */
+@@ -411,9 +410,6 @@ struct hns3_enet_ring {
+ dma_addr_t desc_dma_addr;
+ u32 buf_size; /* size for hnae_desc->addr, preset by AE */
+ u16 desc_num; /* total number of desc */
+- u16 max_desc_num_per_pkt;
+- u16 max_raw_data_sz_per_desc;
+- u16 max_pkt_size;
+ int next_to_use; /* idx of next spare desc */
+
+ /* idx of lastest sent desc, the ring is empty when equal to
+@@ -427,9 +423,6 @@ struct hns3_enet_ring {
+
+ u32 flag; /* ring attribute */
+
+- int numa_node;
+- cpumask_t affinity_mask;
+-
+ int pending_buf;
+ struct sk_buff *skb;
+ struct sk_buff *tail_skb;
+@@ -629,7 +622,7 @@ static inline bool hns3_nic_resetting(st
+ #define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
+ (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)
+
+-#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev)
++#define ring_to_dev(ring) ((ring)->dev)
+
+ #define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
+ DMA_TO_DEVICE : DMA_FROM_DEVICE)
diff --git a/patches.drivers/net-hns3-split-function-hnae3_match_n_instantiate.patch b/patches.drivers/net-hns3-split-function-hnae3_match_n_instantiate.patch
new file mode 100644
index 0000000000..9567972fe2
--- /dev/null
+++ b/patches.drivers/net-hns3-split-function-hnae3_match_n_instantiate.patch
@@ -0,0 +1,128 @@
+From: Jian Shen <shenjian15@huawei.com>
+Date: Thu, 4 Apr 2019 16:17:59 +0800
+Subject: net: hns3: split function hnae3_match_n_instantiate()
+Patch-mainline: v5.2-rc1
+Git-commit: d223dfa40a8f3811a56b3682a9377d7ada73f507
+References: bsc#1104353 FATE#326415
+
+The function hnae3_match_n_instantiate() was called both by
+initializing or uninitializing client. For uninitializing, the
+return value was never used.
+
+To make it more clear, this patch splits it to two functions,
+hnae3_init_client_instance() and hnae3_uninit_client_instance().
+
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hnae3.c | 40 +++++++++++++++-------------
+ 1 file changed, 22 insertions(+), 18 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+@@ -76,8 +76,8 @@ static int hnae3_get_client_init_flag(st
+ return inited;
+ }
+
+-static int hnae3_match_n_instantiate(struct hnae3_client *client,
+- struct hnae3_ae_dev *ae_dev, bool is_reg)
++static int hnae3_init_client_instance(struct hnae3_client *client,
++ struct hnae3_ae_dev *ae_dev)
+ {
+ int ret;
+
+@@ -87,23 +87,27 @@ static int hnae3_match_n_instantiate(str
+ return 0;
+ }
+
+- /* now, (un-)instantiate client by calling lower layer */
+- if (is_reg) {
+- ret = ae_dev->ops->init_client_instance(client, ae_dev);
+- if (ret)
+- dev_err(&ae_dev->pdev->dev,
+- "fail to instantiate client, ret = %d\n", ret);
++ ret = ae_dev->ops->init_client_instance(client, ae_dev);
++ if (ret)
++ dev_err(&ae_dev->pdev->dev,
++ "fail to instantiate client, ret = %d\n", ret);
+
+- return ret;
+- }
++ return ret;
++}
++
++static void hnae3_uninit_client_instance(struct hnae3_client *client,
++ struct hnae3_ae_dev *ae_dev)
++{
++ /* check if this client matches the type of ae_dev */
++ if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
++ hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)))
++ return;
+
+ if (hnae3_get_client_init_flag(client, ae_dev)) {
+ ae_dev->ops->uninit_client_instance(client, ae_dev);
+
+ hnae3_set_client_init_flag(client, ae_dev, 0);
+ }
+-
+- return 0;
+ }
+
+ int hnae3_register_client(struct hnae3_client *client)
+@@ -129,7 +133,7 @@ int hnae3_register_client(struct hnae3_c
+ /* if the client could not be initialized on current port, for
+ * any error reasons, move on to next available port
+ */
+- ret = hnae3_match_n_instantiate(client, ae_dev, true);
++ ret = hnae3_init_client_instance(client, ae_dev);
+ if (ret)
+ dev_err(&ae_dev->pdev->dev,
+ "match and instantiation failed for port, ret = %d\n",
+@@ -153,7 +157,7 @@ void hnae3_unregister_client(struct hnae
+ mutex_lock(&hnae3_common_lock);
+ /* un-initialize the client on every matched port */
+ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+- hnae3_match_n_instantiate(client, ae_dev, false);
++ hnae3_uninit_client_instance(client, ae_dev);
+ }
+
+ list_del(&client->node);
+@@ -205,7 +209,7 @@ void hnae3_register_ae_algo(struct hnae3
+ * initialize the figure out client instance
+ */
+ list_for_each_entry(client, &hnae3_client_list, node) {
+- ret = hnae3_match_n_instantiate(client, ae_dev, true);
++ ret = hnae3_init_client_instance(client, ae_dev);
+ if (ret)
+ dev_err(&ae_dev->pdev->dev,
+ "match and instantiation failed, ret = %d\n",
+@@ -243,7 +247,7 @@ void hnae3_unregister_ae_algo(struct hna
+ * un-initialize the figure out client instance
+ */
+ list_for_each_entry(client, &hnae3_client_list, node)
+- hnae3_match_n_instantiate(client, ae_dev, false);
++ hnae3_uninit_client_instance(client, ae_dev);
+
+ ae_algo->ops->uninit_ae_dev(ae_dev);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+@@ -301,7 +305,7 @@ int hnae3_register_ae_dev(struct hnae3_a
+ * initialize the figure out client instance
+ */
+ list_for_each_entry(client, &hnae3_client_list, node) {
+- ret = hnae3_match_n_instantiate(client, ae_dev, true);
++ ret = hnae3_init_client_instance(client, ae_dev);
+ if (ret)
+ dev_err(&ae_dev->pdev->dev,
+ "match and instantiation failed, ret = %d\n",
+@@ -343,7 +347,7 @@ void hnae3_unregister_ae_dev(struct hnae
+ continue;
+
+ list_for_each_entry(client, &hnae3_client_list, node)
+- hnae3_match_n_instantiate(client, ae_dev, false);
++ hnae3_uninit_client_instance(client, ae_dev);
+
+ ae_algo->ops->uninit_ae_dev(ae_dev);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
diff --git a/patches.drivers/net-hns3-stop-mailbox-handling-when-command-queue-ne.patch b/patches.drivers/net-hns3-stop-mailbox-handling-when-command-queue-ne.patch
new file mode 100644
index 0000000000..0c27b85be6
--- /dev/null
+++ b/patches.drivers/net-hns3-stop-mailbox-handling-when-command-queue-ne.patch
@@ -0,0 +1,31 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Sat, 6 Apr 2019 15:43:33 +0800
+Subject: net: hns3: stop mailbox handling when command queue need re-init
+Patch-mainline: v5.2-rc1
+Git-commit: 18e2488881c61957b8fc1a25c0bb8419e25ddb6f
+References: bsc#1104353 FATE#326415 bsc#1135058
+
+If the command queue needs re-initialization, the mailbox handling
+task should do nothing, otherwise this task will just get some error
+print.
+
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -2164,7 +2164,8 @@ static int hclge_mac_init(struct hclge_d
+
+ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
+ {
+- if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
++ if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) &&
++ !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ schedule_work(&hdev->mbx_service_task);
+ }
+
diff --git a/patches.drivers/net-hns3-stop-sending-keep-alive-msg-when-VF-command.patch b/patches.drivers/net-hns3-stop-sending-keep-alive-msg-when-VF-command.patch
new file mode 100644
index 0000000000..91aa1ae2c1
--- /dev/null
+++ b/patches.drivers/net-hns3-stop-sending-keep-alive-msg-when-VF-command.patch
@@ -0,0 +1,33 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Thu, 25 Apr 2019 20:42:48 +0800
+Subject: net: hns3: stop sending keep alive msg when VF command queue needs
+ reinit
+Patch-mainline: v5.2-rc1
+Git-commit: 1416d333a4ec9ab05c37b94628cb476b32326858
+References: bsc#1104353 FATE#326415 bsc#1134972
+
+HCLGEVF_STATE_CMD_DISABLE is more suitable than
+HCLGEVF_STATE_RST_HANDLING to stop sending keep alive msg,
+since HCLGEVF_STATE_RST_HANDLING only be set when the reset
+task is running.
+
+Fixes: c59a85c07e77 ("net: hns3: stop sending keep alive msg to PF when VF is resetting")
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -1758,7 +1758,7 @@ static void hclgevf_keep_alive_task(stru
+
+ hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
+
+- if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
++ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
+ return;
+
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
diff --git a/patches.drivers/net-hns3-unify-maybe_stop_tx-for-TSO-and-non-TSO-cas.patch b/patches.drivers/net-hns3-unify-maybe_stop_tx-for-TSO-and-non-TSO-cas.patch
new file mode 100644
index 0000000000..3ae07cbbb0
--- /dev/null
+++ b/patches.drivers/net-hns3-unify-maybe_stop_tx-for-TSO-and-non-TSO-cas.patch
@@ -0,0 +1,259 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Mon, 6 May 2019 10:48:41 +0800
+Subject: net: hns3: unify maybe_stop_tx for TSO and non-TSO case
+Patch-mainline: v5.2-rc1
+Git-commit: 3d5f3741895291d3317e33718d96eb78294c8941
+References: bsc#1104353 FATE#326415 bsc#1134947
+
+Currently, maybe_stop_tx ops for TSO and non-TSO case share some BD
+calculation code, so this patch unifies the maybe_stop_tx by removing
+the maybe_stop_tx ops. skb_is_gso() can be used to differentiate the
+case between TSO and non-TSO case if there is need to handle special
+case for TSO case.
+
+This patch also add tx_copy field in "ethtool --statistics" to help
+better debug the performance issue caused by calling skb_copy.
+
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 128 +++++++--------------
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 7 -
+ drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c | 1
+ 3 files changed, 50 insertions(+), 86 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -1154,64 +1154,48 @@ static int hns3_fill_desc(struct hns3_en
+ return 0;
+ }
+
+-static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
+- struct hns3_enet_ring *ring)
++static int hns3_nic_bd_num(struct sk_buff *skb)
+ {
+- struct sk_buff *skb = *out_skb;
+- struct sk_buff *new_skb = NULL;
+- struct skb_frag_struct *frag;
+- int bdnum_for_frag;
+- int frag_num;
+- int buf_num;
+- int size;
+- int i;
++ int size = skb_headlen(skb);
++ int i, bd_num;
+
+- size = skb_headlen(skb);
+- buf_num = hns3_tx_bd_count(size);
++ /* if the total len is within the max bd limit */
++ if (likely(skb->len <= HNS3_MAX_BD_SIZE))
++ return skb_shinfo(skb)->nr_frags + 1;
+
+- frag_num = skb_shinfo(skb)->nr_frags;
+- for (i = 0; i < frag_num; i++) {
+- frag = &skb_shinfo(skb)->frags[i];
+- size = skb_frag_size(frag);
+- bdnum_for_frag = hns3_tx_bd_count(size);
+- if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG))
+- return -ENOMEM;
++ bd_num = hns3_tx_bd_count(size);
+
+- buf_num += bdnum_for_frag;
+- }
++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
++ int frag_bd_num;
+
+- if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
+- buf_num = hns3_tx_bd_count(skb->len);
+- if (ring_space(ring) < buf_num)
+- return -EBUSY;
+- /* manual split the send packet */
+- new_skb = skb_copy(skb, GFP_ATOMIC);
+- if (!new_skb)
++ size = skb_frag_size(frag);
++ frag_bd_num = hns3_tx_bd_count(size);
++
++ if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG))
+ return -ENOMEM;
+- dev_kfree_skb_any(skb);
+- *out_skb = new_skb;
+- }
+
+- if (unlikely(ring_space(ring) < buf_num))
+- return -EBUSY;
++ bd_num += frag_bd_num;
++ }
+
+- *bnum = buf_num;
+- return 0;
++ return bd_num;
+ }
+
+-static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
+- struct hns3_enet_ring *ring)
++static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
++ struct sk_buff **out_skb)
+ {
+ struct sk_buff *skb = *out_skb;
+- struct sk_buff *new_skb = NULL;
+- int buf_num;
++ int bd_num;
+
+- /* No. of segments (plus a header) */
+- buf_num = skb_shinfo(skb)->nr_frags + 1;
++ bd_num = hns3_nic_bd_num(skb);
++ if (bd_num < 0)
++ return bd_num;
++
++ if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) {
++ struct sk_buff *new_skb;
+
+- if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
+- buf_num = hns3_tx_bd_count(skb->len);
+- if (ring_space(ring) < buf_num)
++ bd_num = hns3_tx_bd_count(skb->len);
++ if (unlikely(ring_space(ring) < bd_num))
+ return -EBUSY;
+ /* manual split the send packet */
+ new_skb = skb_copy(skb, GFP_ATOMIC);
+@@ -1219,14 +1203,16 @@ static int hns3_nic_maybe_stop_tx(struct
+ return -ENOMEM;
+ dev_kfree_skb_any(skb);
+ *out_skb = new_skb;
++
++ u64_stats_update_begin(&ring->syncp);
++ ring->stats.tx_copy++;
++ u64_stats_update_end(&ring->syncp);
+ }
+
+- if (unlikely(ring_space(ring) < buf_num))
++ if (unlikely(ring_space(ring) < bd_num))
+ return -EBUSY;
+
+- *bnum = buf_num;
+-
+- return 0;
++ return bd_num;
+ }
+
+ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
+@@ -1277,22 +1263,23 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_
+ /* Prefetch the data used later */
+ prefetch(skb->data);
+
+- switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
+- case -EBUSY:
+- u64_stats_update_begin(&ring->syncp);
+- ring->stats.tx_busy++;
+- u64_stats_update_end(&ring->syncp);
++ buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
++ if (unlikely(buf_num <= 0)) {
++ if (buf_num == -EBUSY) {
++ u64_stats_update_begin(&ring->syncp);
++ ring->stats.tx_busy++;
++ u64_stats_update_end(&ring->syncp);
++ goto out_net_tx_busy;
++ } else if (buf_num == -ENOMEM) {
++ u64_stats_update_begin(&ring->syncp);
++ ring->stats.sw_err_cnt++;
++ u64_stats_update_end(&ring->syncp);
++ }
+
+- goto out_net_tx_busy;
+- case -ENOMEM:
+- u64_stats_update_begin(&ring->syncp);
+- ring->stats.sw_err_cnt++;
+- u64_stats_update_end(&ring->syncp);
+- netdev_err(netdev, "no memory to xmit!\n");
++ if (net_ratelimit())
++ netdev_err(netdev, "xmit error: %d!\n", buf_num);
+
+ goto out_err_tx_ok;
+- default:
+- break;
+ }
+
+ /* No. of segments (plus a header) */
+@@ -1397,13 +1384,6 @@ static int hns3_nic_set_features(struct
+ bool enable;
+ int ret;
+
+- if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
+- if (features & (NETIF_F_TSO | NETIF_F_TSO6))
+- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
+- else
+- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
+- }
+-
+ if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
+ enable = !!(features & NETIF_F_GRO_HW);
+ ret = h->ae_algo->ops->set_gro_en(h, enable);
+@@ -3732,17 +3712,6 @@ static void hns3_del_all_fd_rules(struct
+ h->ae_algo->ops->del_all_fd_entries(h, clear_list);
+ }
+
+-static void hns3_nic_set_priv_ops(struct net_device *netdev)
+-{
+- struct hns3_nic_priv *priv = netdev_priv(netdev);
+-
+- if ((netdev->features & NETIF_F_TSO) ||
+- (netdev->features & NETIF_F_TSO6))
+- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
+- else
+- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
+-}
+-
+ static int hns3_client_start(struct hnae3_handle *handle)
+ {
+ if (!handle->ae_algo->ops->client_start)
+@@ -3809,7 +3778,6 @@ static int hns3_client_init(struct hnae3
+ netdev->netdev_ops = &hns3_nic_netdev_ops;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ hns3_ethtool_set_ops(netdev);
+- hns3_nic_set_priv_ops(netdev);
+
+ /* Carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+@@ -376,6 +376,7 @@ struct ring_stats {
+ u64 tx_err_cnt;
+ u64 restart_queue;
+ u64 tx_busy;
++ u64 tx_copy;
+ };
+ struct {
+ u64 rx_pkts;
+@@ -444,11 +445,6 @@ struct hns3_nic_ring_data {
+ void (*fini_process)(struct hns3_nic_ring_data *);
+ };
+
+-struct hns3_nic_ops {
+- int (*maybe_stop_tx)(struct sk_buff **out_skb,
+- int *bnum, struct hns3_enet_ring *ring);
+-};
+-
+ enum hns3_flow_level_range {
+ HNS3_FLOW_LOW = 0,
+ HNS3_FLOW_MID = 1,
+@@ -538,7 +534,6 @@ struct hns3_nic_priv {
+ u32 port_id;
+ struct net_device *netdev;
+ struct device *dev;
+- struct hns3_nic_ops ops;
+
+ /**
+ * the cb for nic to manage the ring buffer, the first half of the
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+@@ -29,6 +29,7 @@ static const struct hns3_stats hns3_txq_
+ HNS3_TQP_STAT("errors", tx_err_cnt),
+ HNS3_TQP_STAT("wake", restart_queue),
+ HNS3_TQP_STAT("busy", tx_busy),
++ HNS3_TQP_STAT("copy", tx_copy),
+ };
+
+ #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
diff --git a/patches.drivers/net-hns3-unify-the-page-reusing-for-page-size-4K-and.patch b/patches.drivers/net-hns3-unify-the-page-reusing-for-page-size-4K-and.patch
new file mode 100644
index 0000000000..9cf73d3b1f
--- /dev/null
+++ b/patches.drivers/net-hns3-unify-the-page-reusing-for-page-size-4K-and.patch
@@ -0,0 +1,90 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Mon, 6 May 2019 10:48:50 +0800
+Subject: net: hns3: unify the page reusing for page size 4K and 64K
+Patch-mainline: v5.2-rc1
+Git-commit: 389ca14615e5ea4f9a56d765ac1e0da22d8105c3
+References: bsc#1104353 FATE#326415 bsc#1134947
+
+When page size is 64K, RX buffer is currently not reused when the
+page_offset is moved to last buffer. This patch adds checking to
+decide whether the buffer page can be reused when last_offset is
+moved beyond last offset.
+
+If the driver is the only user of page when page_offset is moved
+to beyond last offset, then buffer can be reused and page_offset
+is set to zero.
+
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 45 ++++++------------------
+ 1 file changed, 13 insertions(+), 32 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -2329,50 +2329,31 @@ static void hns3_nic_reuse_page(struct s
+ struct hns3_enet_ring *ring, int pull_len,
+ struct hns3_desc_cb *desc_cb)
+ {
+- struct hns3_desc *desc;
+- u32 truesize;
+- int size;
+- int last_offset;
+- bool twobufs;
+-
+- twobufs = ((PAGE_SIZE < 8192) &&
+- hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
+-
+- desc = &ring->desc[ring->next_to_clean];
+- size = le16_to_cpu(desc->rx.size);
+-
+- truesize = hnae3_buf_size(ring);
+-
+- if (!twobufs)
+- last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring);
++ struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
++ int size = le16_to_cpu(desc->rx.size);
++ u32 truesize = hnae3_buf_size(ring);
+
+ skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
+ size - pull_len, truesize);
+
+- /* Avoid re-using remote pages,flag default unreuse */
+- if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
+- return;
+-
+- if (twobufs) {
+- /* If we are only owner of page we can reuse it */
+- if (likely(page_count(desc_cb->priv) == 1)) {
+- /* Flip page offset to other buffer */
+- desc_cb->page_offset ^= truesize;
+-
+- desc_cb->reuse_flag = 1;
+- /* bump ref count on page before it is given*/
+- get_page(desc_cb->priv);
+- }
++ /* Avoid re-using remote pages, or the stack is still using the page
++ * when page_offset rollback to zero, flag default unreuse
++ */
++ if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()) ||
++ (!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
+ return;
+- }
+
+ /* Move offset up to the next cache line */
+ desc_cb->page_offset += truesize;
+
+- if (desc_cb->page_offset <= last_offset) {
++ if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) {
+ desc_cb->reuse_flag = 1;
+ /* Bump ref count on page before it is given*/
+ get_page(desc_cb->priv);
++ } else if (page_count(desc_cb->priv) == 1) {
++ desc_cb->reuse_flag = 1;
++ desc_cb->page_offset = 0;
++ get_page(desc_cb->priv);
+ }
+ }
+
diff --git a/patches.drivers/net-hns3-use-a-reserved-byte-to-identify-need_resp-f.patch b/patches.drivers/net-hns3-use-a-reserved-byte-to-identify-need_resp-f.patch
new file mode 100644
index 0000000000..5d713f3cca
--- /dev/null
+++ b/patches.drivers/net-hns3-use-a-reserved-byte-to-identify-need_resp-f.patch
@@ -0,0 +1,84 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Thu, 25 Apr 2019 20:42:50 +0800
+Subject: net: hns3: use a reserved byte to identify need_resp flag
+Patch-mainline: v5.2-rc1
+Git-commit: b7048d324b5ebcb99022e2e7296f03918e5f38c4
+References: bsc#1104353 FATE#326415
+
+This patch uses a reserved byte in the hclge_mbx_vf_to_pf_cmd
+to save the need_resp flag, so when PF received the mailbox,
+it can use it to decise whether send a response to VF.
+
+For hclge_set_vf_uc_mac_addr(), it should use mbx_need_resp flag
+to decide whether send response to VF.
+
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h | 5 ++++-
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c | 7 +++----
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c | 2 ++
+ 3 files changed, 9 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+@@ -84,12 +84,15 @@ struct hclgevf_mbx_resp_status {
+ struct hclge_mbx_vf_to_pf_cmd {
+ u8 rsv;
+ u8 mbx_src_vfid; /* Auto filled by IMP */
+- u8 rsv1[2];
++ u8 mbx_need_resp;
++ u8 rsv1[1];
+ u8 msg_len;
+ u8 rsv2[3];
+ u8 msg[HCLGE_MBX_MAX_MSG_SIZE];
+ };
+
++#define HCLGE_MBX_NEED_RESP_BIT BIT(0)
++
+ struct hclge_mbx_pf_to_vf_cmd {
+ u8 dest_vfid;
+ u8 rsv[3];
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+@@ -212,8 +212,7 @@ static int hclge_set_vf_promisc_mode(str
+ }
+
+ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
+- struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+- bool gen_resp)
++ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+ {
+ const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]);
+ struct hclge_dev *hdev = vport->back;
+@@ -249,7 +248,7 @@ static int hclge_set_vf_uc_mac_addr(stru
+ return -EIO;
+ }
+
+- if (gen_resp)
++ if (mbx_req->mbx_need_resp & HCLGE_MBX_NEED_RESP_BIT)
+ hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0);
+
+ return 0;
+@@ -597,7 +596,7 @@ void hclge_mbx_handler(struct hclge_dev
+ ret);
+ break;
+ case HCLGE_MBX_SET_UNICAST:
+- ret = hclge_set_vf_uc_mac_addr(vport, req, true);
++ ret = hclge_set_vf_uc_mac_addr(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to set VF UC MAC Addr\n",
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+@@ -98,6 +98,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_
+ }
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
++ req->mbx_need_resp |= need_resp ? HCLGE_MBX_NEED_RESP_BIT :
++ ~HCLGE_MBX_NEED_RESP_BIT;
+ req->msg[0] = code;
+ req->msg[1] = subcode;
+ memcpy(&req->msg[2], msg_data, msg_len);
diff --git a/patches.drivers/net-hns3-use-atomic_t-replace-u32-for-arq-s-count.patch b/patches.drivers/net-hns3-use-atomic_t-replace-u32-for-arq-s-count.patch
new file mode 100644
index 0000000000..95aea03dea
--- /dev/null
+++ b/patches.drivers/net-hns3-use-atomic_t-replace-u32-for-arq-s-count.patch
@@ -0,0 +1,74 @@
+From: Huazhong Tan <tanhuazhong@huawei.com>
+Date: Thu, 25 Apr 2019 20:42:49 +0800
+Subject: net: hns3: use atomic_t replace u32 for arq's count
+Patch-mainline: v5.2-rc1
+Git-commit: 30780a8b1677e7409b32ae52a9a84f7d41ae6b43
+References: bsc#1104353 FATE#326415 bsc#1134953
+
+Since irq handler and mailbox task will both update arq's count,
+so arq's count should use atomic_t instead of u32, otherwise
+its value may go wrong finally.
+
+Fixes: 07a0556a3a73 ("net: hns3: Changes to support ARQ(Asynchronous Receive Queue)")
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h | 2 +-
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c | 2 +-
+ drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c | 7 ++++---
+ 3 files changed, 6 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+@@ -111,7 +111,7 @@ struct hclgevf_mbx_arq_ring {
+ struct hclgevf_dev *hdev;
+ u32 head;
+ u32 tail;
+- u32 count;
++ atomic_t count;
+ u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
+ };
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+@@ -341,7 +341,7 @@ int hclgevf_cmd_init(struct hclgevf_dev
+ hdev->arq.hdev = hdev;
+ hdev->arq.head = 0;
+ hdev->arq.tail = 0;
+- hdev->arq.count = 0;
++ atomic_set(&hdev->arq.count, 0);
+ hdev->hw.cmq.csq.next_to_clean = 0;
+ hdev->hw.cmq.csq.next_to_use = 0;
+ hdev->hw.cmq.crq.next_to_clean = 0;
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+@@ -212,7 +212,8 @@ void hclgevf_mbx_handler(struct hclgevf_
+ /* we will drop the async msg if we find ARQ as full
+ * and continue with next message
+ */
+- if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) {
++ if (atomic_read(&hdev->arq.count) >=
++ HCLGE_MBX_MAX_ARQ_MSG_NUM) {
+ dev_warn(&hdev->pdev->dev,
+ "Async Q full, dropping msg(%d)\n",
+ req->msg[1]);
+@@ -224,7 +225,7 @@ void hclgevf_mbx_handler(struct hclgevf_
+ memcpy(&msg_q[0], req->msg,
+ HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
+ hclge_mbx_tail_ptr_move_arq(hdev->arq);
+- hdev->arq.count++;
++ atomic_inc(&hdev->arq.count);
+
+ hclgevf_mbx_task_schedule(hdev);
+
+@@ -317,7 +318,7 @@ void hclgevf_mbx_async_handler(struct hc
+ }
+
+ hclge_mbx_head_ptr_move_arq(hdev->arq);
+- hdev->arq.count--;
++ atomic_dec(&hdev->arq.count);
+ msg_q = hdev->arq.msg_q[hdev->arq.head];
+ }
+ }
diff --git a/patches.drivers/net-hns3-use-devm_kcalloc-when-allocating-desc_cb.patch b/patches.drivers/net-hns3-use-devm_kcalloc-when-allocating-desc_cb.patch
new file mode 100644
index 0000000000..a6bdfacaa3
--- /dev/null
+++ b/patches.drivers/net-hns3-use-devm_kcalloc-when-allocating-desc_cb.patch
@@ -0,0 +1,51 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Mon, 6 May 2019 10:48:52 +0800
+Subject: net: hns3: use devm_kcalloc when allocating desc_cb
+Patch-mainline: v5.2-rc1
+Git-commit: 77296bf6a7b806b00a62b53436b1e8429becd244
+References: bsc#1104353 FATE#326415 bsc#1134947
+
+This patch uses devm_kcalloc instead of kcalloc when allocating
+ring->desc_cb, because devm_kcalloc not only ensure to free the
+memory when the dev is deallocted, but also allocate the memory
+from it's device memory node.
+
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -3477,8 +3477,8 @@ static int hns3_alloc_ring_memory(struct
+ if (ring->desc_num <= 0 || ring->buf_size <= 0)
+ return -EINVAL;
+
+- ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
+- GFP_KERNEL);
++ ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num,
++ sizeof(ring->desc_cb[0]), GFP_KERNEL);
+ if (!ring->desc_cb) {
+ ret = -ENOMEM;
+ goto out;
+@@ -3499,7 +3499,7 @@ static int hns3_alloc_ring_memory(struct
+ out_with_desc:
+ hns3_free_desc(ring);
+ out_with_desc_cb:
+- kfree(ring->desc_cb);
++ devm_kfree(ring_to_dev(ring), ring->desc_cb);
+ ring->desc_cb = NULL;
+ out:
+ return ret;
+@@ -3508,7 +3508,7 @@ out:
+ static void hns3_fini_ring(struct hns3_enet_ring *ring)
+ {
+ hns3_free_desc(ring);
+- kfree(ring->desc_cb);
++ devm_kfree(ring_to_dev(ring), ring->desc_cb);
+ ring->desc_cb = NULL;
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
diff --git a/patches.drivers/net-hns3-use-napi_schedule_irqoff-in-hard-interrupts.patch b/patches.drivers/net-hns3-use-napi_schedule_irqoff-in-hard-interrupts.patch
new file mode 100644
index 0000000000..0c12c305c8
--- /dev/null
+++ b/patches.drivers/net-hns3-use-napi_schedule_irqoff-in-hard-interrupts.patch
@@ -0,0 +1,33 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Mon, 6 May 2019 10:48:42 +0800
+Subject: net: hns3: use napi_schedule_irqoff in hard interrupts handlers
+Patch-mainline: v5.2-rc1
+Git-commit: fb00331bb8db4a631b00d6582f94806eba7a4c7f
+References: bsc#1104353 FATE#326415 bsc#1134947
+
+napi_schedule_irqoff is introduced to be used from hard interrupts
+handlers or when irqs are already masked, see:
+
+https://lists.openwall.net/netdev/2014/10/29/2
+
+So this patch replaces napi_schedule with napi_schedule_irqoff.
+
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -74,7 +74,7 @@ static irqreturn_t hns3_irq_handle(int i
+ {
+ struct hns3_enet_tqp_vector *tqp_vector = vector;
+
+- napi_schedule(&tqp_vector->napi);
++ napi_schedule_irqoff(&tqp_vector->napi);
+
+ return IRQ_HANDLED;
+ }
diff --git a/patches.drivers/net-phy-marvell-Enable-interrupt-function-on-LED2-pi.patch b/patches.drivers/net-phy-marvell-Enable-interrupt-function-on-LED2-pi.patch
new file mode 100644
index 0000000000..5c681c513b
--- /dev/null
+++ b/patches.drivers/net-phy-marvell-Enable-interrupt-function-on-LED2-pi.patch
@@ -0,0 +1,63 @@
+From: Esben Haabendal <eha@deif.com>
+Date: Thu, 5 Apr 2018 22:40:29 +0200
+Subject: net: phy: marvell: Enable interrupt function on LED2 pin
+Patch-mainline: v4.17-rc1
+Git-commit: dd9a122ae99ae471beed4d4f8073d71e8d31ffa6
+References: bsc#1135018
+
+The LED2[2]/INTn pin on Marvell 88E1318S as well as 88E1510/12/14/18 needs
+to be configured to be usable as interrupt not only when WOL is enabled,
+but whenever we rely on interrupts from the PHY.
+
+Signed-off-by: Esben Haabendal <eha@deif.com>
+Cc: Rasmus Villemoes <rasmus.villemoes@prevas.dk>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/phy/marvell.c | 20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/phy/marvell.c
++++ b/drivers/net/phy/marvell.c
+@@ -820,6 +820,22 @@ static int m88e1121_config_init(struct p
+ return marvell_config_init(phydev);
+ }
+
++static int m88e1318_config_init(struct phy_device *phydev)
++{
++ if (phy_interrupt_is_valid(phydev)) {
++ int err = phy_modify_paged(
++ phydev, MII_MARVELL_LED_PAGE,
++ MII_88E1318S_PHY_LED_TCR,
++ MII_88E1318S_PHY_LED_TCR_FORCE_INT,
++ MII_88E1318S_PHY_LED_TCR_INTn_ENABLE |
++ MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW);
++ if (err < 0)
++ return err;
++ }
++
++ return m88e1121_config_init(phydev);
++}
++
+ static int m88e1510_config_init(struct phy_device *phydev)
+ {
+ int err;
+@@ -862,7 +878,7 @@ static int m88e1510_config_init(struct p
+ phydev->advertising &= ~pause;
+ }
+
+- return m88e1121_config_init(phydev);
++ return m88e1318_config_init(phydev);
+ }
+
+ static int m88e1118_config_aneg(struct phy_device *phydev)
+@@ -1940,7 +1956,7 @@ static struct phy_driver marvell_drivers
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .probe = marvell_probe,
+- .config_init = &m88e1121_config_init,
++ .config_init = &m88e1318_config_init,
+ .config_aneg = &m88e1318_config_aneg,
+ .read_status = &marvell_read_status,
+ .ack_interrupt = &marvell_ack_interrupt,
diff --git a/patches.drivers/net-phy-marvell-add-new-default-led-configure-for-m8.patch b/patches.drivers/net-phy-marvell-add-new-default-led-configure-for-m8.patch
new file mode 100644
index 0000000000..240a6d94b2
--- /dev/null
+++ b/patches.drivers/net-phy-marvell-add-new-default-led-configure-for-m8.patch
@@ -0,0 +1,75 @@
+From: Jian Shen <shenjian15@huawei.com>
+Date: Mon, 22 Apr 2019 21:52:23 +0800
+Subject: net: phy: marvell: add new default led configure for m88e151x
+Patch-mainline: v5.2-rc1
+Git-commit: a93f7fe134543649cf2e2d8fc2c50a8f4d742915
+References: bsc#1135018
+
+The default m88e151x LED configuration is 0x1177, used LED[0]
+for 1000M link, LED[1] for 100M link, and LED[2] for active.
+But for some boards, which use LED[0] for link, and LED[1] for
+active, prefer to be 0x1040. To be compatible with this case,
+this patch defines a new dev_flag, and set it before connect
+phy in HNS3 driver. When phy initializing, using the new
+LED configuration if this dev_flag is set.
+
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c | 3 +++
+ drivers/net/phy/marvell.c | 6 +++++-
+ include/linux/marvell_phy.h | 1 +
+ 3 files changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+@@ -3,6 +3,7 @@
+
+ #include <linux/etherdevice.h>
+ #include <linux/kernel.h>
++#include <linux/marvell_phy.h>
+
+ #include "hclge_cmd.h"
+ #include "hclge_main.h"
+@@ -216,6 +217,8 @@ int hclge_mac_connect_phy(struct hnae3_h
+
+ phydev->supported &= ~SUPPORTED_FIBRE;
+
++ phydev->dev_flags |= MARVELL_PHY_LED0_LINK_LED1_ACTIVE;
++
+ ret = phy_connect_direct(netdev, phydev,
+ hclge_mac_adjust_link,
+ PHY_INTERFACE_MODE_SGMII);
+--- a/drivers/net/phy/marvell.c
++++ b/drivers/net/phy/marvell.c
+@@ -122,6 +122,7 @@
+ #define MII_PHY_LED_CTRL 16
+ #define MII_88E1121_PHY_LED_DEF 0x0030
+ #define MII_88E1510_PHY_LED_DEF 0x1177
++#define MII_88E1510_PHY_LED0_LINK_LED1_ACTIVE 0x1040
+
+ #define MII_M1011_PHY_STATUS 0x11
+ #define MII_M1011_PHY_STATUS_1000 0x8000
+@@ -640,7 +641,10 @@ static void marvell_config_led(struct ph
+ * LED[2] .. Blink, Activity
+ */
+ case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1510):
+- def_config = MII_88E1510_PHY_LED_DEF;
++ if (phydev->dev_flags & MARVELL_PHY_LED0_LINK_LED1_ACTIVE)
++ def_config = MII_88E1510_PHY_LED0_LINK_LED1_ACTIVE;
++ else
++ def_config = MII_88E1510_PHY_LED_DEF;
+ break;
+ default:
+ return;
+--- a/include/linux/marvell_phy.h
++++ b/include/linux/marvell_phy.h
+@@ -31,5 +31,6 @@
+ /* struct phy_device dev_flags definitions */
+ #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
+ #define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002
++#define MARVELL_PHY_LED0_LINK_LED1_ACTIVE 0x00000004
+
+ #endif /* _MARVELL_PHY_H */
diff --git a/patches.drivers/net-phy-marvell-change-default-m88e1510-LED-configur.patch b/patches.drivers/net-phy-marvell-change-default-m88e1510-LED-configur.patch
new file mode 100644
index 0000000000..bf507b44c9
--- /dev/null
+++ b/patches.drivers/net-phy-marvell-change-default-m88e1510-LED-configur.patch
@@ -0,0 +1,123 @@
+From: Wang Dongsheng <dongsheng.wang@hxt-semitech.com>
+Date: Sun, 1 Jul 2018 23:15:46 -0700
+Subject: net: phy: marvell: change default m88e1510 LED configuration
+Patch-mainline: v4.18-rc6
+Git-commit: 077772468ec141b22e1e7c0c58bc09e2f9dc8762
+References: bsc#1135018
+
+The m88e1121 LED default configuration does not apply m88e151x.
+So add a function to relpace m88e1121 LED configuration.
+
+Signed-off-by: Wang Dongsheng <dongsheng.wang@hxt-semitech.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/phy/marvell.c | 54 +++++++++++++++++++++++++++++---------------
+ include/linux/marvell_phy.h | 2 +
+ 2 files changed, 38 insertions(+), 18 deletions(-)
+
+--- a/drivers/net/phy/marvell.c
++++ b/drivers/net/phy/marvell.c
+@@ -119,8 +119,9 @@
+ #define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12)
+ #define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14)
+
+-#define MII_88E1121_PHY_LED_CTRL 16
++#define MII_PHY_LED_CTRL 16
+ #define MII_88E1121_PHY_LED_DEF 0x0030
++#define MII_88E1510_PHY_LED_DEF 0x1177
+
+ #define MII_M1011_PHY_STATUS 0x11
+ #define MII_M1011_PHY_STATUS_1000 0x8000
+@@ -622,8 +623,40 @@ error:
+ return err;
+ }
+
++static void marvell_config_led(struct phy_device *phydev)
++{
++ u16 def_config;
++ int err;
++
++ switch (MARVELL_PHY_FAMILY_ID(phydev->phy_id)) {
++ /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
++ case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1121R):
++ case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1318S):
++ def_config = MII_88E1121_PHY_LED_DEF;
++ break;
++ /* Default PHY LED config:
++ * LED[0] .. 1000Mbps Link
++ * LED[1] .. 100Mbps Link
++ * LED[2] .. Blink, Activity
++ */
++ case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1510):
++ def_config = MII_88E1510_PHY_LED_DEF;
++ break;
++ default:
++ return;
++ }
++
++ err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, MII_PHY_LED_CTRL,
++ def_config);
++ if (err < 0)
++ pr_warn("Fail to config marvell phy LED.\n");
++}
++
+ static int marvell_config_init(struct phy_device *phydev)
+ {
++ /* Set defalut LED */
++ marvell_config_led(phydev);
++
+ /* Set registers from marvell,reg-init DT property */
+ return marvell_of_reg_init(phydev);
+ }
+@@ -805,21 +838,6 @@ static int m88e1111_config_init(struct p
+ return genphy_soft_reset(phydev);
+ }
+
+-static int m88e1121_config_init(struct phy_device *phydev)
+-{
+- int err;
+-
+- /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
+- err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE,
+- MII_88E1121_PHY_LED_CTRL,
+- MII_88E1121_PHY_LED_DEF);
+- if (err < 0)
+- return err;
+-
+- /* Set marvell,reg-init configuration from device tree */
+- return marvell_config_init(phydev);
+-}
+-
+ static int m88e1318_config_init(struct phy_device *phydev)
+ {
+ if (phy_interrupt_is_valid(phydev)) {
+@@ -833,7 +851,7 @@ static int m88e1318_config_init(struct p
+ return err;
+ }
+
+- return m88e1121_config_init(phydev);
++ return marvell_config_init(phydev);
+ }
+
+ static int m88e1510_config_init(struct phy_device *phydev)
+@@ -1935,7 +1953,7 @@ static struct phy_driver marvell_drivers
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .probe = &m88e1121_probe,
+- .config_init = &m88e1121_config_init,
++ .config_init = &marvell_config_init,
+ .config_aneg = &m88e1121_config_aneg,
+ .read_status = &marvell_read_status,
+ .ack_interrupt = &marvell_ack_interrupt,
+--- a/include/linux/marvell_phy.h
++++ b/include/linux/marvell_phy.h
+@@ -26,6 +26,8 @@
+ */
+ #define MARVELL_PHY_ID_88E6390 0x01410f90
+
++#define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4)
++
+ /* struct phy_device dev_flags definitions */
+ #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
+ #define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002
diff --git a/patches.drivers/scsi-qla2xxx-Add-28xx-flash-primary-secondary-status.patch b/patches.drivers/scsi-qla2xxx-Add-28xx-flash-primary-secondary-status.patch
new file mode 100644
index 0000000000..33c9247c46
--- /dev/null
+++ b/patches.drivers/scsi-qla2xxx-Add-28xx-flash-primary-secondary-status.patch
@@ -0,0 +1,873 @@
+From: Joe Carnuccio <joe.carnuccio@cavium.com>
+Date: Tue, 12 Mar 2019 11:08:21 -0700
+Subject: [PATCH] scsi: qla2xxx: Add 28xx flash primary/secondary status/image
+ mechanism
+Git-commit: 5fa8774c7f38c79f38b672c1a0db0c049da477d6
+Patch-mainline: v5.2-rc1
+References: bsc#1136215
+
+Includes the following:
+- correction to 27xx image status struct;
+- factoring of 27xx image status validating routines to make common;
+- image status generation compare that works across zero wrap;
+- bsg interface to report current active images (as loaded by driver).
+
+Signed-off-by: Joe Carnuccio <joe.carnuccio@cavium.com>
+Signed-off-by: Mike Hernandez <mhernandez@marvell.com>
+Signed-off-by: Himanshu Madhani <hmadhani@marvell.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Acked-by: Hannes Reinecke <hare@suse.com>
+---
+ drivers/scsi/qla2xxx/qla_attr.c | 34 ++++-
+ drivers/scsi/qla2xxx/qla_bsg.c | 42 +++++
+ drivers/scsi/qla2xxx/qla_bsg.h | 11 ++
+ drivers/scsi/qla2xxx/qla_def.h | 63 +++++---
+ drivers/scsi/qla2xxx/qla_fw.h | 12 ++
+ drivers/scsi/qla2xxx/qla_gbl.h | 6 +-
+ drivers/scsi/qla2xxx/qla_init.c | 328 +++++++++++++++++++++++++++++++++-------
+ drivers/scsi/qla2xxx/qla_sup.c | 71 +++++++--
+ 8 files changed, 470 insertions(+), 97 deletions(-)
+
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index 35f7804682ec..8687090193dc 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -154,6 +154,8 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
+ struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
++ uint32_t faddr;
++ struct active_regions active_regions = { };
+
+ if (!capable(CAP_SYS_ADMIN))
+ return 0;
+@@ -164,11 +166,21 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
+ return -EAGAIN;
+ }
+
+- if (IS_NOCACHE_VPD_TYPE(ha))
+- ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
+- ha->nvram_size);
++ if (!IS_NOCACHE_VPD_TYPE(ha)) {
++ mutex_unlock(&ha->optrom_mutex);
++ goto skip;
++ }
++
++ faddr = ha->flt_region_nvram;
++ if (IS_QLA28XX(ha)) {
++ if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
++ faddr = ha->flt_region_nvram_sec;
++ }
++ ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
++
+ mutex_unlock(&ha->optrom_mutex);
+
++skip:
+ return memory_read_from_buffer(buf, count, &off, ha->nvram,
+ ha->nvram_size);
+ }
+@@ -504,6 +516,7 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
+ struct device, kobj)));
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t faddr;
++ struct active_regions active_regions = { };
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return -EAGAIN;
+@@ -516,9 +529,16 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
+
+ faddr = ha->flt_region_vpd << 2;
+
+- if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
+- qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+- faddr = ha->flt_region_vpd_sec << 2;
++ if (IS_QLA28XX(ha)) {
++ qla28xx_get_aux_images(vha, &active_regions);
++ if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
++ faddr = ha->flt_region_vpd_sec << 2;
++
++ ql_dbg(ql_dbg_init, vha, 0x7070,
++ "Loading %s nvram image.\n",
++ active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
++ "primary" : "secondary");
++ }
+
+ mutex_lock(&ha->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+@@ -528,6 +548,8 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
+
+ ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
+ mutex_unlock(&ha->optrom_mutex);
++
++ ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size);
+ skip:
+ return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
+index 4c294bcd100a..95b0ec3c2d40 100644
+--- a/drivers/scsi/qla2xxx/qla_bsg.c
++++ b/drivers/scsi/qla2xxx/qla_bsg.c
+@@ -2388,6 +2388,45 @@ qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
+ return 0;
+ }
+
++static int
++qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
++{
++ scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
++ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
++ struct qla_hw_data *ha = vha->hw;
++ struct qla_active_regions regions = { };
++ struct active_regions active_regions = { };
++
++ qla28xx_get_aux_images(vha, &active_regions);
++ regions.global_image = active_regions.global;
++
++ if (IS_QLA28XX(ha)) {
++ qla27xx_get_active_image(vha, &active_regions);
++ regions.board_config = active_regions.aux.board_config;
++ regions.vpd_nvram = active_regions.aux.vpd_nvram;
++ regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1;
++ regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3;
++ }
++
++ ql_dbg(ql_dbg_user, vha, 0x70e1,
++ "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n",
++ __func__, vha->host_no, regions.global_image,
++ regions.board_config, regions.vpd_nvram,
++ regions.npiv_config_0_1, regions.npiv_config_2_3);
++
++ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
++ bsg_job->reply_payload.sg_cnt, &regions, sizeof(regions));
++
++ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
++ bsg_reply->reply_payload_rcv_len = sizeof(regions);
++ bsg_reply->result = DID_OK << 16;
++ bsg_job->reply_len = sizeof(struct fc_bsg_reply);
++ bsg_job_done(bsg_job, bsg_reply->result,
++ bsg_reply->reply_payload_rcv_len);
++
++ return 0;
++}
++
+ static int
+ qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
+ {
+@@ -2461,6 +2500,9 @@ qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
+ case QL_VND_DPORT_DIAGNOSTICS:
+ return qla2x00_do_dport_diagnostics(bsg_job);
+
++ case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
++ return qla2x00_get_flash_image_status(bsg_job);
++
+ default:
+ return -ENOSYS;
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
+index d97dfd521356..7594fad7b5b5 100644
+--- a/drivers/scsi/qla2xxx/qla_bsg.h
++++ b/drivers/scsi/qla2xxx/qla_bsg.h
+@@ -31,6 +31,7 @@
+ #define QL_VND_GET_PRIV_STATS 0x18
+ #define QL_VND_DPORT_DIAGNOSTICS 0x19
+ #define QL_VND_GET_PRIV_STATS_EX 0x1A
++#define QL_VND_SS_GET_FLASH_IMAGE_STATUS 0x1E
+
+ /* BSG Vendor specific subcode returns */
+ #define EXT_STATUS_OK 0
+@@ -279,4 +280,14 @@ struct qla_dport_diag {
+ #define QLA_DPORT_RESULT 0x0
+ #define QLA_DPORT_START 0x2
+
++/* active images in flash */
++struct qla_active_regions {
++ uint8_t global_image;
++ uint8_t board_config;
++ uint8_t vpd_nvram;
++ uint8_t npiv_config_0_1;
++ uint8_t npiv_config_2_3;
++ uint8_t reserved[32];
++} __packed;
++
+ #endif
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index ac229cf7126b..574797ac7f92 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -1204,6 +1204,9 @@ struct mbx_cmd_32 {
+ #define QLA27XX_IMG_STATUS_VER_MINOR 0x00
+ #define QLA27XX_IMG_STATUS_SIGN 0xFACEFADE
+ #define QLA28XX_IMG_STATUS_SIGN 0xFACEFADF
++#define QLA28XX_IMG_STATUS_SIGN 0xFACEFADF
++#define QLA28XX_AUX_IMG_STATUS_SIGN 0xFACEFAED
++#define QLA27XX_DEFAULT_IMAGE 0
+ #define QLA27XX_PRIMARY_IMAGE 1
+ #define QLA27XX_SECONDARY_IMAGE 2
+
+@@ -4116,22 +4119,28 @@ struct qla_hw_data {
+ uint32_t fdt_protect_sec_cmd;
+ uint32_t fdt_wrt_sts_reg_cmd;
+
+- uint32_t flt_region_flt;
+- uint32_t flt_region_fdt;
+- uint32_t flt_region_boot;
+- uint32_t flt_region_boot_sec;
+- uint32_t flt_region_fw;
+- uint32_t flt_region_fw_sec;
+- uint32_t flt_region_vpd_nvram;
+- uint32_t flt_region_vpd;
+- uint32_t flt_region_vpd_sec;
+- uint32_t flt_region_nvram;
+- uint32_t flt_region_npiv_conf;
+- uint32_t flt_region_gold_fw;
+- uint32_t flt_region_fcp_prio;
+- uint32_t flt_region_bootload;
+- uint32_t flt_region_img_status_pri;
+- uint32_t flt_region_img_status_sec;
++ struct {
++ uint32_t flt_region_flt;
++ uint32_t flt_region_fdt;
++ uint32_t flt_region_boot;
++ uint32_t flt_region_boot_sec;
++ uint32_t flt_region_fw;
++ uint32_t flt_region_fw_sec;
++ uint32_t flt_region_vpd_nvram;
++ uint32_t flt_region_vpd_nvram_sec;
++ uint32_t flt_region_vpd;
++ uint32_t flt_region_vpd_sec;
++ uint32_t flt_region_nvram;
++ uint32_t flt_region_nvram_sec;
++ uint32_t flt_region_npiv_conf;
++ uint32_t flt_region_gold_fw;
++ uint32_t flt_region_fcp_prio;
++ uint32_t flt_region_bootload;
++ uint32_t flt_region_img_status_pri;
++ uint32_t flt_region_img_status_sec;
++ uint32_t flt_region_aux_img_status_pri;
++ uint32_t flt_region_aux_img_status_sec;
++ };
+ uint8_t active_image;
+
+ /* Needed for BEACON */
+@@ -4252,9 +4261,20 @@ struct qla_hw_data {
+
+ atomic_t zio_threshold;
+ uint16_t last_zio_threshold;
++
+ #define DEFAULT_ZIO_THRESHOLD 5
+ };
+
++struct active_regions {
++ uint8_t global;
++ struct {
++ uint8_t board_config;
++ uint8_t vpd_nvram;
++ uint8_t npiv_config_0_1;
++ uint8_t npiv_config_2_3;
++ } aux;
++};
++
+ #define FW_ABILITY_MAX_SPEED_MASK 0xFUL
+ #define FW_ABILITY_MAX_SPEED_16G 0x0
+ #define FW_ABILITY_MAX_SPEED_32G 0x1
+@@ -4469,13 +4489,20 @@ typedef struct scsi_qla_host {
+ struct qla27xx_image_status {
+ uint8_t image_status_mask;
+ uint16_t generation;
+- uint8_t reserved[3];
+- uint8_t ver_minor;
+ uint8_t ver_major;
++ uint8_t ver_minor;
++ uint8_t bitmap; /* 28xx only */
++ uint8_t reserved[2];
+ uint32_t checksum;
+ uint32_t signature;
+ } __packed;
+
++/* 28xx aux image status bimap values */
++#define QLA28XX_AUX_IMG_BOARD_CONFIG BIT_0
++#define QLA28XX_AUX_IMG_VPD_NVRAM BIT_1
++#define QLA28XX_AUX_IMG_NPIV_CONFIG_0_1 BIT_2
++#define QLA28XX_AUX_IMG_NPIV_CONFIG_2_3 BIT_3
++
+ #define SET_VP_IDX 1
+ #define SET_AL_PA 2
+ #define RESET_VP_IDX 3
+diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
+index b9b1aaaff906..9dbd0dce5a29 100644
+--- a/drivers/scsi/qla2xxx/qla_fw.h
++++ b/drivers/scsi/qla2xxx/qla_fw.h
+@@ -1515,6 +1515,18 @@ struct qla_flt_header {
+ #define FLT_REG_VPD_SEC_27XX_2 0xD8
+ #define FLT_REG_VPD_SEC_27XX_3 0xDA
+
++/* 28xx */
++#define FLT_REG_AUX_IMG_PRI_28XX 0x125
++#define FLT_REG_AUX_IMG_SEC_28XX 0x126
++#define FLT_REG_VPD_SEC_28XX_0 0x10C
++#define FLT_REG_VPD_SEC_28XX_1 0x10E
++#define FLT_REG_VPD_SEC_28XX_2 0x110
++#define FLT_REG_VPD_SEC_28XX_3 0x112
++#define FLT_REG_NVRAM_SEC_28XX_0 0x10D
++#define FLT_REG_NVRAM_SEC_28XX_1 0x10F
++#define FLT_REG_NVRAM_SEC_28XX_2 0x111
++#define FLT_REG_NVRAM_SEC_28XX_3 0x113
++
+ struct qla_flt_region {
+ uint16_t code;
+ uint8_t attribute;
+diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
+index f00f1d213253..d5c27ffb5f41 100644
+--- a/drivers/scsi/qla2xxx/qla_gbl.h
++++ b/drivers/scsi/qla2xxx/qla_gbl.h
+@@ -93,7 +93,6 @@ extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
+ extern int
+ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
+ extern int qla2x00_init_rings(scsi_qla_host_t *);
+-extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *);
+ extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *,
+ int, int, bool);
+ extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *);
+@@ -108,6 +107,11 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
+ int qla24xx_detect_sfp(scsi_qla_host_t *vha);
+ int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
+
++extern void qla28xx_get_aux_images(struct scsi_qla_host *,
++ struct active_regions *);
++extern void qla27xx_get_active_image(struct scsi_qla_host *,
++ struct active_regions *);
++
+ void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *,
+ uint16_t *);
+ extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *,
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 158ec5aa2837..7c5e530a90df 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -7236,95 +7236,281 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
+ return (rval);
+ }
+
+-uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
++static void
++qla27xx_print_image(struct scsi_qla_host *vha, char *name,
++ struct qla27xx_image_status *image_status)
++{
++ ql_dbg(ql_dbg_init, vha, 0x018b,
++ "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n",
++ name, "status",
++ image_status->image_status_mask,
++ le16_to_cpu(image_status->generation),
++ image_status->ver_major,
++ image_status->ver_minor,
++ image_status->bitmap,
++ le32_to_cpu(image_status->checksum),
++ le32_to_cpu(image_status->signature));
++}
++
++static bool
++qla28xx_check_aux_image_status_signature(
++ struct qla27xx_image_status *image_status)
++{
++ ulong signature = le32_to_cpu(image_status->signature);
++
++ return signature != QLA28XX_AUX_IMG_STATUS_SIGN;
++}
++
++static bool
++qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status)
++{
++ ulong signature = le32_to_cpu(image_status->signature);
++
++ return
++ signature != QLA27XX_IMG_STATUS_SIGN &&
++ signature != QLA28XX_IMG_STATUS_SIGN;
++}
++
++static ulong
++qla27xx_image_status_checksum(struct qla27xx_image_status *image_status)
++{
++ uint32_t *p = (void *)image_status;
++ uint n = sizeof(*image_status) / sizeof(*p);
++ uint32_t sum = 0;
++
++ for ( ; n--; p++)
++ sum += le32_to_cpup(p);
++
++ return sum;
++}
++
++static inline uint
++qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask)
++{
++ return aux->bitmap & bitmask ?
++ QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE;
++}
++
++static void
++qla28xx_component_status(
++ struct active_regions *active_regions, struct qla27xx_image_status *aux)
++{
++ active_regions->aux.board_config =
++ qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG);
++
++ active_regions->aux.vpd_nvram =
++ qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM);
++
++ active_regions->aux.npiv_config_0_1 =
++ qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1);
++
++ active_regions->aux.npiv_config_2_3 =
++ qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3);
++}
++
++static int
++qla27xx_compare_image_generation(
++ struct qla27xx_image_status *pri_image_status,
++ struct qla27xx_image_status *sec_image_status)
++{
++ /* calculate generation delta as uint16 (this accounts for wrap) */
++ int16_t delta =
++ le16_to_cpu(pri_image_status->generation) -
++ le16_to_cpu(sec_image_status->generation);
++
++ ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta);
++
++ return delta;
++}
++
++void
++qla28xx_get_aux_images(
++ struct scsi_qla_host *vha, struct active_regions *active_regions)
+ {
+- struct qla27xx_image_status pri_image_status, sec_image_status;
+- bool valid_pri_image = true, valid_sec_image = true;
+- uint32_t *wptr;
+- uint chksum, cnt, size = sizeof(pri_image_status) / sizeof(*wptr);
+ struct qla_hw_data *ha = vha->hw;
+- uint32_t signature;
++ struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status;
++ bool valid_pri_image = false, valid_sec_image = false;
++ bool active_pri_image = false, active_sec_image = false;
++
++ if (!ha->flt_region_aux_img_status_pri) {
++ ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n");
++ goto check_sec_image;
++ }
++
++ qla24xx_read_flash_data(vha, (void *)&pri_aux_image_status,
++ ha->flt_region_aux_img_status_pri,
++ sizeof(pri_aux_image_status) >> 2);
++ qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status);
++
++ if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) {
++ ql_dbg(ql_dbg_init, vha, 0x018b,
++ "Primary aux image signature (%#x) not valid\n",
++ le32_to_cpu(pri_aux_image_status.signature));
++ goto check_sec_image;
++ }
++
++ if (qla27xx_image_status_checksum(&pri_aux_image_status)) {
++ ql_dbg(ql_dbg_init, vha, 0x018c,
++ "Primary aux image checksum failed\n");
++ goto check_sec_image;
++ }
++
++ valid_pri_image = true;
++
++ if (pri_aux_image_status.image_status_mask & 1) {
++ ql_dbg(ql_dbg_init, vha, 0x018d,
++ "Primary aux image is active\n");
++ active_pri_image = true;
++ }
++
++check_sec_image:
++ if (!ha->flt_region_aux_img_status_sec) {
++ ql_dbg(ql_dbg_init, vha, 0x018a,
++ "Secondary aux image not addressed\n");
++ goto check_valid_image;
++ }
++
++ qla24xx_read_flash_data(vha, (void *)&sec_aux_image_status,
++ ha->flt_region_aux_img_status_sec,
++ sizeof(sec_aux_image_status) >> 2);
++ qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status);
++
++ if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) {
++ ql_dbg(ql_dbg_init, vha, 0x018b,
++ "Secondary aux image signature (%#x) not valid\n",
++ le32_to_cpu(sec_aux_image_status.signature));
++ goto check_valid_image;
++ }
++
++ if (qla27xx_image_status_checksum(&sec_aux_image_status)) {
++ ql_dbg(ql_dbg_init, vha, 0x018c,
++ "Secondary aux image checksum failed\n");
++ goto check_valid_image;
++ }
+
+- ha->active_image = 0;
++ valid_sec_image = true;
++
++ if (sec_aux_image_status.image_status_mask & 1) {
++ ql_dbg(ql_dbg_init, vha, 0x018d,
++ "Secondary aux image is active\n");
++ active_sec_image = true;
++ }
++
++check_valid_image:
++ if (valid_pri_image && active_pri_image &&
++ valid_sec_image && active_sec_image) {
++ if (qla27xx_compare_image_generation(&pri_aux_image_status,
++ &sec_aux_image_status) >= 0) {
++ qla28xx_component_status(active_regions,
++ &pri_aux_image_status);
++ } else {
++ qla28xx_component_status(active_regions,
++ &sec_aux_image_status);
++ }
++ } else if (valid_pri_image && active_pri_image) {
++ qla28xx_component_status(active_regions, &pri_aux_image_status);
++ } else if (valid_sec_image && active_sec_image) {
++ qla28xx_component_status(active_regions, &sec_aux_image_status);
++ }
++
++ ql_dbg(ql_dbg_init, vha, 0x018f,
++ "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n",
++ active_regions->aux.board_config,
++ active_regions->aux.vpd_nvram,
++ active_regions->aux.npiv_config_0_1,
++ active_regions->aux.npiv_config_2_3);
++}
++
++void
++qla27xx_get_active_image(struct scsi_qla_host *vha,
++ struct active_regions *active_regions)
++{
++ struct qla_hw_data *ha = vha->hw;
++ struct qla27xx_image_status pri_image_status, sec_image_status;
++ bool valid_pri_image = false, valid_sec_image = false;
++ bool active_pri_image = false, active_sec_image = false;
+
+ if (!ha->flt_region_img_status_pri) {
+- valid_pri_image = false;
++ ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n");
+ goto check_sec_image;
+ }
+
+- qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
+- ha->flt_region_img_status_pri, size);
++ qla24xx_read_flash_data(vha, (void *)(&pri_image_status),
++ ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2);
++ qla27xx_print_image(vha, "Primary image", &pri_image_status);
+
+- signature = le32_to_cpu(pri_image_status.signature);
+- if (signature != QLA27XX_IMG_STATUS_SIGN &&
+- signature != QLA28XX_IMG_STATUS_SIGN) {
++ if (qla27xx_check_image_status_signature(&pri_image_status)) {
+ ql_dbg(ql_dbg_init, vha, 0x018b,
+ "Primary image signature (%#x) not valid\n",
+ le32_to_cpu(pri_image_status.signature));
+- valid_pri_image = false;
+ goto check_sec_image;
+ }
+
+- wptr = (uint32_t *)(&pri_image_status);
+- cnt = size;
++ if (qla27xx_image_status_checksum(&pri_image_status)) {
++ ql_dbg(ql_dbg_init, vha, 0x018c,
++ "Primary image checksum failed\n");
++ goto check_sec_image;
++ }
+
+- for (chksum = 0; cnt--; wptr++)
+- chksum += le32_to_cpu(*wptr);
++ valid_pri_image = true;
+
+- if (chksum) {
+- ql_dbg(ql_dbg_init, vha, 0x018c,
+- "Primary image checksum failed (%#x)\n", chksum);
+- valid_pri_image = false;
++ if (pri_image_status.image_status_mask & 1) {
++ ql_dbg(ql_dbg_init, vha, 0x018d,
++ "Primary image is active\n");
++ active_pri_image = true;
+ }
+
+ check_sec_image:
+ if (!ha->flt_region_img_status_sec) {
+- valid_sec_image = false;
++ ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n");
+ goto check_valid_image;
+ }
+
+ qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
+- ha->flt_region_img_status_sec, size);
++ ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2);
++ qla27xx_print_image(vha, "Secondary image", &sec_image_status);
+
+- signature = le32_to_cpu(sec_image_status.signature);
+- if (signature != QLA27XX_IMG_STATUS_SIGN &&
+- signature != QLA28XX_IMG_STATUS_SIGN) {
+- ql_dbg(ql_dbg_init, vha, 0x018d,
++ if (qla27xx_check_image_status_signature(&sec_image_status)) {
++ ql_dbg(ql_dbg_init, vha, 0x018b,
+ "Secondary image signature (%#x) not valid\n",
+ le32_to_cpu(sec_image_status.signature));
+- valid_sec_image = false;
+ goto check_valid_image;
+ }
+
+- wptr = (uint32_t *)(&sec_image_status);
+- cnt = size;
+- for (chksum = 0; cnt--; wptr++)
+- chksum += le32_to_cpu(*wptr);
+- if (chksum) {
+- ql_dbg(ql_dbg_init, vha, 0x018e,
+- "Secondary image checksum failed (%#x)\n", chksum);
+- valid_sec_image = false;
++ if (qla27xx_image_status_checksum(&sec_image_status)) {
++ ql_dbg(ql_dbg_init, vha, 0x018c,
++ "Secondary image checksum failed\n");
++ goto check_valid_image;
++ }
++
++ valid_sec_image = true;
++
++ if (sec_image_status.image_status_mask & 1) {
++ ql_dbg(ql_dbg_init, vha, 0x018d,
++ "Secondary image is active\n");
++ active_sec_image = true;
+ }
+
+ check_valid_image:
+- if (valid_pri_image && (pri_image_status.image_status_mask & 1))
+- ha->active_image = QLA27XX_PRIMARY_IMAGE;
++ if (valid_pri_image && active_pri_image)
++ active_regions->global = QLA27XX_PRIMARY_IMAGE;
+
+- if (valid_sec_image && (sec_image_status.image_status_mask & 1)) {
+- if (!ha->active_image ||
+- le16_to_cpu(pri_image_status.generation) <
+- le16_to_cpu(sec_image_status.generation)) {
+- ha->active_image = QLA27XX_SECONDARY_IMAGE;
++ if (valid_sec_image && active_sec_image) {
++ if (!active_regions->global ||
++ qla27xx_compare_image_generation(
++ &pri_image_status, &sec_image_status) < 0) {
++ active_regions->global = QLA27XX_SECONDARY_IMAGE;
+ }
+ }
+
+- ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x018f, "%s image\n",
+- ha->active_image == 0 ? "default bootld and fw" :
+- ha->active_image == 1 ? "primary" :
+- ha->active_image == 2 ? "secondary" :
+- "Invalid");
+-
+- return ha->active_image;
++ ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n",
++ active_regions->global == QLA27XX_DEFAULT_IMAGE ?
++ "default (boot/fw)" :
++ active_regions->global == QLA27XX_PRIMARY_IMAGE ?
++ "primary" :
++ active_regions->global == QLA27XX_SECONDARY_IMAGE ?
++ "secondary" : "invalid",
++ active_regions->global);
+ }
+
+ bool qla24xx_risc_firmware_invalid(uint32_t *dword)
+@@ -7714,7 +7900,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
+
+ dcode = fwdt->template;
+ for (i = 0; i < risc_size; i++)
+- dcode[i] = le32_to_cpu(fwcode[i]);
++ dcode[i] = fwcode[i];
+
+ if (!qla27xx_fwdt_template_valid(dcode)) {
+ ql_log(ql_log_warn, vha, 0x0175,
+@@ -7777,6 +7963,7 @@ qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
+ {
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
++ struct active_regions active_regions = { };
+
+ if (ql2xfwloadbin == 2)
+ goto try_blob_fw;
+@@ -7787,10 +7974,12 @@ qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
+ * 3) Golden-Firmware residing in flash -- (limited operation).
+ */
+
+- if (!IS_QLA27XX(ha) || !IS_QLA28XX(ha))
++ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ goto try_primary_fw;
+
+- if (qla27xx_find_valid_image(vha) != QLA27XX_SECONDARY_IMAGE)
++ qla27xx_get_active_image(vha, &active_regions);
++
++ if (active_regions.global != QLA27XX_SECONDARY_IMAGE)
+ goto try_primary_fw;
+
+ ql_dbg(ql_dbg_init, vha, 0x008b,
+@@ -7986,6 +8175,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
+ uint32_t chksum;
+ uint16_t cnt;
+ struct qla_hw_data *ha = vha->hw;
++ uint32_t faddr;
++ struct active_regions active_regions = { };
+
+ rval = QLA_SUCCESS;
+ icb = (struct init_cb_81xx *)ha->init_cb;
+@@ -7997,14 +8188,35 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
+ if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
+ ha->vpd_size = FA_VPD_SIZE_82XX;
+
++ if (IS_QLA28XX(ha))
++ qla28xx_get_aux_images(vha, &active_regions);
++
+ /* Get VPD data into cache */
+ ha->vpd = ha->nvram + VPD_OFFSET;
+- ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
+- ha->vpd_size);
++
++ faddr = ha->flt_region_vpd;
++ if (IS_QLA28XX(ha)) {
++ if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
++ faddr = ha->flt_region_vpd_sec;
++ ql_dbg(ql_dbg_init, vha, 0x0110,
++ "Loading %s nvram image.\n",
++ active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
++ "primary" : "secondary");
++ }
++ qla24xx_read_flash_data(vha, ha->vpd, faddr, ha->vpd_size >> 2);
+
+ /* Get NVRAM data into cache and calculate checksum. */
+- ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
+- ha->nvram_size);
++ faddr = ha->flt_region_nvram;
++ if (IS_QLA28XX(ha)) {
++ if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE)
++ faddr = ha->flt_region_nvram_sec;
++ }
++ ql_dbg(ql_dbg_init, vha, 0x0110,
++ "Loading %s nvram image.\n",
++ active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ?
++ "primary" : "secondary");
++ qla24xx_read_flash_data(vha, ha->nvram, faddr, ha->nvram_size >> 2);
++
+ dptr = (uint32_t *)nv;
+ for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
+ chksum += le32_to_cpu(*dptr);
+diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
+index eb4514a56640..08c7dca07c90 100644
+--- a/drivers/scsi/qla2xxx/qla_sup.c
++++ b/drivers/scsi/qla2xxx/qla_sup.c
+@@ -862,21 +862,59 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
+ if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ ha->flt_region_boot_sec = start;
+ break;
++ case FLT_REG_AUX_IMG_PRI_28XX:
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
++ ha->flt_region_aux_img_status_pri = start;
++ break;
++ case FLT_REG_AUX_IMG_SEC_28XX:
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
++ ha->flt_region_aux_img_status_sec = start;
++ break;
++ case FLT_REG_NVRAM_SEC_28XX_0:
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
++ if (ha->port_no == 0)
++ ha->flt_region_nvram_sec = start;
++ break;
++ case FLT_REG_NVRAM_SEC_28XX_1:
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
++ if (ha->port_no == 1)
++ ha->flt_region_nvram_sec = start;
++ break;
++ case FLT_REG_NVRAM_SEC_28XX_2:
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
++ if (ha->port_no == 2)
++ ha->flt_region_nvram_sec = start;
++ break;
++ case FLT_REG_NVRAM_SEC_28XX_3:
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
++ if (ha->port_no == 3)
++ ha->flt_region_nvram_sec = start;
++ break;
+ case FLT_REG_VPD_SEC_27XX_0:
+- if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+- ha->flt_region_vpd_sec = start;
++ case FLT_REG_VPD_SEC_28XX_0:
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
++ ha->flt_region_vpd_nvram_sec = start;
++ if (ha->port_no == 0)
++ ha->flt_region_vpd_sec = start;
++ }
+ break;
+ case FLT_REG_VPD_SEC_27XX_1:
+- if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+- ha->flt_region_vpd_sec = start;
++ case FLT_REG_VPD_SEC_28XX_1:
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
++ if (ha->port_no == 1)
++ ha->flt_region_vpd_sec = start;
+ break;
+ case FLT_REG_VPD_SEC_27XX_2:
+- if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+- ha->flt_region_vpd_sec = start;
++ case FLT_REG_VPD_SEC_28XX_2:
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
++ if (ha->port_no == 2)
++ ha->flt_region_vpd_sec = start;
+ break;
+ case FLT_REG_VPD_SEC_27XX_3:
+- if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+- ha->flt_region_vpd_sec = start;
++ case FLT_REG_VPD_SEC_28XX_3:
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
++ if (ha->port_no == 3)
++ ha->flt_region_vpd_sec = start;
+ break;
+ }
+ }
+@@ -3020,6 +3058,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+ int i;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t faddr = 0;
++ struct active_regions active_regions = { };
+
+ if (IS_P3P_TYPE(ha))
+ return ret;
+@@ -3033,9 +3072,12 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+ memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
+
+ pcihdr = ha->flt_region_boot << 2;
+- if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
+- qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+- pcihdr = ha->flt_region_boot_sec << 2;
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
++ qla27xx_get_active_image(vha, &active_regions);
++ if (active_regions.global == QLA27XX_SECONDARY_IMAGE) {
++ pcihdr = ha->flt_region_boot_sec << 2;
++ }
++ }
+
+ do {
+ /* Verify PCI expansion ROM header. */
+@@ -3108,9 +3150,10 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+ /* Read firmware image information. */
+ memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
+ faddr = ha->flt_region_fw;
+- if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
+- qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+- faddr = ha->flt_region_fw_sec;
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
++ if (active_regions.global == QLA27XX_SECONDARY_IMAGE)
++ faddr = ha->flt_region_fw_sec;
++ }
+
+ qla24xx_read_flash_data(vha, dcode, faddr, 8);
+ if (qla24xx_risc_firmware_invalid(dcode)) {
+--
+2.16.4
+
diff --git a/patches.drivers/scsi-qla2xxx-Add-Device-ID-for-ISP28XX.patch b/patches.drivers/scsi-qla2xxx-Add-Device-ID-for-ISP28XX.patch
new file mode 100644
index 0000000000..b4ef487dce
--- /dev/null
+++ b/patches.drivers/scsi-qla2xxx-Add-Device-ID-for-ISP28XX.patch
@@ -0,0 +1,1542 @@
+From: Joe Carnuccio <joe.carnuccio@cavium.com>
+Date: Tue, 12 Mar 2019 11:08:13 -0700
+Subject: [PATCH] scsi: qla2xxx: Add Device ID for ISP28XX
+Git-commit: ecc89f25e225fabfffc709dbc43c928bc276cade
+Patch-mainline: v5.2-rc1
+References: bsc#1136215
+
+This patch adds PCI device ID ISP28XX for Gen7 support. Also signature
+determination for primary/secondary flash image for ISP27XX/28XX is aded as
+part of Gen7 support.
+
+Signed-off-by: Joe Carnuccio <joe.carnuccio@cavium.com>
+Signed-off-by: Himanshu Madhani <hmadhani@marvell.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Acked-by: Hannes Reinecke <hare@suse.com>
+---
+ drivers/scsi/qla2xxx/qla_attr.c | 30 ++++++++-----
+ drivers/scsi/qla2xxx/qla_bsg.c | 12 +++---
+ drivers/scsi/qla2xxx/qla_dbg.c | 9 ++--
+ drivers/scsi/qla2xxx/qla_def.h | 62 +++++++++++++++++++--------
+ drivers/scsi/qla2xxx/qla_dfs.c | 6 +--
+ drivers/scsi/qla2xxx/qla_fw.h | 5 ++-
+ drivers/scsi/qla2xxx/qla_gs.c | 4 +-
+ drivers/scsi/qla2xxx/qla_init.c | 51 +++++++++++++---------
+ drivers/scsi/qla2xxx/qla_iocb.c | 5 ++-
+ drivers/scsi/qla2xxx/qla_isr.c | 14 +++---
+ drivers/scsi/qla2xxx/qla_mbx.c | 76 ++++++++++++++++++---------------
+ drivers/scsi/qla2xxx/qla_os.c | 89 ++++++++++++++++++++++++++++++++-------
+ drivers/scsi/qla2xxx/qla_sup.c | 49 +++++++++++----------
+ drivers/scsi/qla2xxx/qla_target.c | 7 +--
+ 14 files changed, 272 insertions(+), 147 deletions(-)
+
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index 8cc4d6589cbb..f035251a86ce 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -427,7 +427,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
+ valid = 1;
+ else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
+ || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)
+- || IS_QLA27XX(ha))
++ || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ valid = 1;
+ if (!valid) {
+ ql_log(ql_log_warn, vha, 0x7065,
+@@ -514,7 +514,7 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
+ if (IS_NOCACHE_VPD_TYPE(ha)) {
+ faddr = ha->flt_region_vpd << 2;
+
+- if (IS_QLA27XX(ha) &&
++ if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
+ qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+ faddr = ha->flt_region_vpd_sec << 2;
+
+@@ -682,7 +682,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
+ ql_log(ql_log_info, vha, 0x706f,
+ "Issuing MPI reset.\n");
+
+- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
++ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ uint32_t idc_control;
+
+ qla83xx_idc_lock(vha, 0);
+@@ -991,7 +991,8 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
+ continue;
+ if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
+ continue;
+- if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
++ if (iter->is4GBp_only == 0x27 &&
++ (!IS_QLA27XX(vha->hw) || !IS_QLA28XX(ha)))
+ continue;
+
+ sysfs_remove_bin_file(&host->shost_gendev.kobj,
+@@ -1336,7 +1337,8 @@ qla2x00_optrom_gold_fw_version_show(struct device *dev,
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+
+- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha))
++ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
++ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
+@@ -1383,7 +1385,7 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) &&
+- !IS_QLA27XX(ha))
++ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
+@@ -1596,7 +1598,7 @@ qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr,
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+
+- if (!IS_QLA27XX(ha))
++ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
+@@ -1610,7 +1612,7 @@ qla2x00_min_link_speed_show(struct device *dev, struct device_attribute *attr,
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+
+- if (!IS_QLA27XX(ha))
++ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+@@ -1628,7 +1630,7 @@ qla2x00_max_speed_sup_show(struct device *dev, struct device_attribute *attr,
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+
+- if (!IS_QLA27XX(ha))
++ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n",
+@@ -1645,7 +1647,7 @@ qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
+ int mode = QLA_SET_DATA_RATE_LR;
+ struct qla_hw_data *ha = vha->hw;
+
+- if (!IS_QLA27XX(vha->hw)) {
++ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) {
+ ql_log(ql_log_warn, vha, 0x70d8,
+ "Speed setting not supported \n");
+ return -EINVAL;
+@@ -2171,7 +2173,7 @@ qla2x00_fw_attr_show(struct device *dev,
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ struct qla_hw_data *ha = vha->hw;
+
+- if (!IS_QLA27XX(ha))
++ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return scnprintf(buf, PAGE_SIZE, "\n");
+
+ return scnprintf(buf, PAGE_SIZE, "%llx\n",
+@@ -2357,6 +2359,9 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
+ case PORT_SPEED_32GB:
+ speed = FC_PORTSPEED_32GBIT;
+ break;
++ case PORT_SPEED_64GB:
++ speed = FC_PORTSPEED_64GBIT;
++ break;
+ }
+ fc_host_speed(shost) = speed;
+ }
+@@ -3036,6 +3041,9 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
+ else if (IS_QLA27XX(ha))
+ speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT |
+ FC_PORTSPEED_8GBIT;
++ else if (IS_QLA28XX(ha))
++ speed = FC_PORTSPEED_64GBIT | FC_PORTSPEED_32GBIT |
++ FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT;
+ else
+ speed = FC_PORTSPEED_1GBIT;
+ fc_host_supported_speeds(vha->host) = speed;
+diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
+index 17d42658ad9a..2fe194a06e67 100644
+--- a/drivers/scsi/qla2xxx/qla_bsg.c
++++ b/drivers/scsi/qla2xxx/qla_bsg.c
+@@ -1412,7 +1412,8 @@ qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
+ start == (ha->flt_region_fw * 4))
+ valid = 1;
+ else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
+- IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
++ IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
++ IS_QLA28XX(ha))
+ valid = 1;
+ if (!valid) {
+ ql_log(ql_log_warn, vha, 0x7058,
+@@ -2157,7 +2158,7 @@ qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
+ struct qla_hw_data *ha = vha->hw;
+ struct qla_flash_update_caps cap;
+
+- if (!(IS_QLA27XX(ha)))
++ if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
+ return -EPERM;
+
+ memset(&cap, 0, sizeof(cap));
+@@ -2190,7 +2191,7 @@ qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
+ uint64_t online_fw_attr = 0;
+ struct qla_flash_update_caps cap;
+
+- if (!(IS_QLA27XX(ha)))
++ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return -EPERM;
+
+ memset(&cap, 0, sizeof(cap));
+@@ -2238,7 +2239,7 @@ qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
+ uint8_t domain, area, al_pa, state;
+ int rval;
+
+- if (!(IS_QLA27XX(ha)))
++ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return -EPERM;
+
+ memset(&bbcr, 0, sizeof(bbcr));
+@@ -2353,7 +2354,8 @@ qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
+ int rval;
+ struct qla_dport_diag *dd;
+
+- if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
++ if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
++ !IS_QLA28XX(vha->hw))
+ return -EPERM;
+
+ dd = kmalloc(sizeof(*dd), GFP_KERNEL);
+diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
+index ec5bad0b0607..3cfd846cdb2a 100644
+--- a/drivers/scsi/qla2xxx/qla_dbg.c
++++ b/drivers/scsi/qla2xxx/qla_dbg.c
+@@ -176,7 +176,8 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
+ return rval;
+ }
+ for (j = 0; j < dwords; j++) {
+- ram[i + j] = IS_QLA27XX(ha) ?
++ ram[i + j] =
++ (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
+ chunk[j] : swab32(chunk[j]);
+ }
+ }
+@@ -251,7 +252,8 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
+ return rval;
+ }
+ for (j = 0; j < dwords; j++) {
+- ram[i + j] = IS_QLA27XX(ha) ?
++ ram[i + j] =
++ (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
+ chunk[j] : swab32(chunk[j]);
+ }
+ }
+@@ -665,7 +667,8 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
+ struct qla2xxx_mq_chain *mq = ptr;
+ device_reg_t *reg;
+
+- if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
++ if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
++ IS_QLA28XX(ha))
+ return ptr;
+
+ mq = ptr;
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index 3d46975a5e5c..c64cd555ef0d 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -1203,6 +1203,7 @@ struct mbx_cmd_32 {
+ #define QLA27XX_IMG_STATUS_VER_MAJOR 0x01
+ #define QLA27XX_IMG_STATUS_VER_MINOR 0x00
+ #define QLA27XX_IMG_STATUS_SIGN 0xFACEFADE
++#define QLA28XX_IMG_STATUS_SIGN 0xFACEFADF
+ #define QLA27XX_PRIMARY_IMAGE 1
+ #define QLA27XX_SECONDARY_IMAGE 2
+
+@@ -2672,6 +2673,7 @@ struct ct_fdmiv2_hba_attributes {
+ #define FDMI_PORT_SPEED_8GB 0x10
+ #define FDMI_PORT_SPEED_16GB 0x20
+ #define FDMI_PORT_SPEED_32GB 0x40
++#define FDMI_PORT_SPEED_64GB 0x80
+ #define FDMI_PORT_SPEED_UNKNOWN 0x8000
+
+ #define FC_CLASS_2 0x04
+@@ -3368,7 +3370,8 @@ struct qla_tc_param {
+ #define QLA_MQ_SIZE 32
+ #define QLA_MAX_QUEUES 256
+ #define ISP_QUE_REG(ha, id) \
+- ((ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? \
++ ((ha->mqenable || IS_QLA83XX(ha) || \
++ IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? \
+ ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\
+ ((void __iomem *)ha->iobase))
+ #define QLA_REQ_QUE_ID(tag) \
+@@ -3703,6 +3706,7 @@ struct qla_hw_data {
+ #define PORT_SPEED_8GB 0x04
+ #define PORT_SPEED_16GB 0x05
+ #define PORT_SPEED_32GB 0x06
++#define PORT_SPEED_64GB 0x07
+ #define PORT_SPEED_10GB 0x13
+ uint16_t link_data_rate; /* F/W operating speed */
+ uint16_t set_data_rate; /* Set by user */
+@@ -3729,6 +3733,11 @@ struct qla_hw_data {
+ #define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071
+ #define PCI_DEVICE_ID_QLOGIC_ISP2271 0x2271
+ #define PCI_DEVICE_ID_QLOGIC_ISP2261 0x2261
++#define PCI_DEVICE_ID_QLOGIC_ISP2061 0x2061
++#define PCI_DEVICE_ID_QLOGIC_ISP2081 0x2081
++#define PCI_DEVICE_ID_QLOGIC_ISP2089 0x2089
++#define PCI_DEVICE_ID_QLOGIC_ISP2281 0x2281
++#define PCI_DEVICE_ID_QLOGIC_ISP2289 0x2289
+
+ uint32_t isp_type;
+ #define DT_ISP2100 BIT_0
+@@ -3753,7 +3762,12 @@ struct qla_hw_data {
+ #define DT_ISP2071 BIT_19
+ #define DT_ISP2271 BIT_20
+ #define DT_ISP2261 BIT_21
+-#define DT_ISP_LAST (DT_ISP2261 << 1)
++#define DT_ISP2061 BIT_22
++#define DT_ISP2081 BIT_23
++#define DT_ISP2089 BIT_24
++#define DT_ISP2281 BIT_25
++#define DT_ISP2289 BIT_26
++#define DT_ISP_LAST (DT_ISP2289 << 1)
+
+ uint32_t device_type;
+ #define DT_T10_PI BIT_25
+@@ -3788,6 +3802,8 @@ struct qla_hw_data {
+ #define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071)
+ #define IS_QLA2271(ha) (DT_MASK(ha) & DT_ISP2271)
+ #define IS_QLA2261(ha) (DT_MASK(ha) & DT_ISP2261)
++#define IS_QLA2081(ha) (DT_MASK(ha) & DT_ISP2081)
++#define IS_QLA2281(ha) (DT_MASK(ha) & DT_ISP2281)
+
+ #define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
+ IS_QLA6312(ha) || IS_QLA6322(ha))
+@@ -3797,6 +3813,7 @@ struct qla_hw_data {
+ #define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha))
+ #define IS_QLA84XX(ha) (IS_QLA8432(ha))
+ #define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha) || IS_QLA2261(ha))
++#define IS_QLA28XX(ha) (IS_QLA2081(ha) || IS_QLA2281(ha))
+ #define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
+ IS_QLA84XX(ha))
+ #define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
+@@ -3805,14 +3822,15 @@ struct qla_hw_data {
+ #define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
+ IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
+ IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
+- IS_QLA8044(ha) || IS_QLA27XX(ha))
++ IS_QLA8044(ha) || IS_QLA27XX(ha) || \
++ IS_QLA28XX(ha))
+ #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+- IS_QLA27XX(ha))
++ IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ #define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
+ #define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+- IS_QLA27XX(ha))
++ IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
+- IS_QLA27XX(ha))
++ IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
+
+ #define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI)
+@@ -3823,28 +3841,34 @@ struct qla_hw_data {
+ #define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS)
+ #define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED)
+ #define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha) || \
+- IS_QLA27XX(ha))
+-#define IS_BIDI_CAPABLE(ha) ((IS_QLA25XX(ha) || IS_QLA2031(ha)))
++ IS_QLA27XX(ha) || IS_QLA28XX(ha))
++#define IS_BIDI_CAPABLE(ha) \
++ (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ /* Bit 21 of fw_attributes decides the MCTP capabilities */
+ #define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
+ ((ha)->fw_attributes_ext[0] & BIT_0))
+ #define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ #define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+ #define IS_PI_DIFB_DIX0_CAPABLE(ha) (0)
+-#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
++#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
++ IS_QLA28XX(ha))
+ #define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
+ (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
+-#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
++#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
++ IS_QLA28XX(ha))
+ #define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
+-#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
+-#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
+-#define IS_FAWWN_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
++#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha) || IS_QLA28XX(ha))
++#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
++ IS_QLA28XX(ha))
++#define IS_FAWWN_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \
++ IS_QLA28XX(ha))
+ #define IS_EXCHG_OFFLD_CAPABLE(ha) \
+- (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
++ (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ #define IS_EXLOGIN_OFFLD_CAPABLE(ha) \
+- (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
++ (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || \
++ IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ #define USE_ASYNC_SCAN(ha) (IS_QLA25XX(ha) || IS_QLA81XX(ha) ||\
+- IS_QLA83XX(ha) || IS_QLA27XX(ha))
++ IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+
+ /* HBA serial number */
+ uint8_t serial0;
+@@ -4595,6 +4619,7 @@ struct qla2_sgx {
+ #define OPTROM_SIZE_81XX 0x400000
+ #define OPTROM_SIZE_82XX 0x800000
+ #define OPTROM_SIZE_83XX 0x1000000
++#define OPTROM_SIZE_28XX 0x2000000
+
+ #define OPTROM_BURST_SIZE 0x1000
+ #define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4)
+@@ -4691,10 +4716,11 @@ struct sff_8247_a0 {
+ #define AUTO_DETECT_SFP_SUPPORT(_vha)\
+ (ql2xautodetectsfp && !_vha->vp_idx && \
+ (IS_QLA25XX(_vha->hw) || IS_QLA81XX(_vha->hw) ||\
+- IS_QLA83XX(_vha->hw) || IS_QLA27XX(_vha->hw)))
++ IS_QLA83XX(_vha->hw) || IS_QLA27XX(_vha->hw) || \
++ IS_QLA28XX(_vha->hw)))
+
+ #define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \
+- (IS_QLA27XX(_ha) || IS_QLA83XX(_ha)))
++ (IS_QLA27XX(_ha) || IS_QLA28XX(_ha) || IS_QLA83XX(_ha)))
+
+ #define SAVE_TOPO(_ha) { \
+ if (_ha->current_topology) \
+diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
+index 5819a45ac5ef..18dd8a640b7c 100644
+--- a/drivers/scsi/qla2xxx/qla_dfs.c
++++ b/drivers/scsi/qla2xxx/qla_dfs.c
+@@ -386,7 +386,7 @@ qla_dfs_naqp_write(struct file *file, const char __user *buffer,
+ int rc = 0;
+ unsigned long num_act_qp;
+
+- if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha))) {
++ if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))) {
+ pr_err("host%ld: this adapter does not support Multi Q.",
+ vha->host_no);
+ return -EINVAL;
+@@ -438,7 +438,7 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+- !IS_QLA27XX(ha))
++ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ goto out;
+ if (!ha->fce)
+ goto out;
+@@ -474,7 +474,7 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
+ ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
+ S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
+
+- if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
++ if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
+ ha->tgt.dfs_naqp = debugfs_create_file("naqp",
+ 0400, ha->dfs_dir, vha, &dfs_naqp_ops);
+ out:
+diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
+index 50c1e6c62e31..f7ff1d01a315 100644
+--- a/drivers/scsi/qla2xxx/qla_fw.h
++++ b/drivers/scsi/qla2xxx/qla_fw.h
+@@ -2005,6 +2005,8 @@ struct ex_init_cb_81xx {
+
+ #define FARX_ACCESS_FLASH_CONF_81XX 0x7FFD0000
+ #define FARX_ACCESS_FLASH_DATA_81XX 0x7F800000
++#define FARX_ACCESS_FLASH_CONF_28XX 0x7FFD0000
++#define FARX_ACCESS_FLASH_DATA_28XX 0x7F7D0000
+
+ /* FCP priority config defines *************************************/
+ /* operations */
+@@ -2079,6 +2081,7 @@ struct qla_fcp_prio_cfg {
+ #define FA_NPIV_CONF1_ADDR_81 0xD2000
+
+ /* 83XX Flash locations -- occupies second 8MB region. */
+-#define FA_FLASH_LAYOUT_ADDR_83 0xFC400
++#define FA_FLASH_LAYOUT_ADDR_83 (0x3F1000/4)
++#define FA_FLASH_LAYOUT_ADDR_28 (0x11000/4)
+
+ #endif
+diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
+index c6fdad12428e..41b5fa1f5774 100644
+--- a/drivers/scsi/qla2xxx/qla_gs.c
++++ b/drivers/scsi/qla2xxx/qla_gs.c
+@@ -1794,7 +1794,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
+ if (IS_CNA_CAPABLE(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_10GB);
+- else if (IS_QLA27XX(ha))
++ else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_32GB|
+ FDMI_PORT_SPEED_16GB|
+@@ -2373,7 +2373,7 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
+ if (IS_CNA_CAPABLE(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_10GB);
+- else if (IS_QLA27XX(ha))
++ else if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ eiter->a.sup_speed = cpu_to_be32(
+ FDMI_PORT_SPEED_32GB|
+ FDMI_PORT_SPEED_16GB|
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 4008b97a63d9..d9177fd88461 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -3018,7 +3018,7 @@ qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
+ if (IS_FWI2_CAPABLE(ha)) {
+ /* Allocate memory for Fibre Channel Event Buffer. */
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+- !IS_QLA27XX(ha))
++ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ goto try_eft;
+
+ if (ha->fce)
+@@ -3106,7 +3106,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+ mem_size = (ha->fw_memory_size - 0x11000 + 1) *
+ sizeof(uint16_t);
+ } else if (IS_FWI2_CAPABLE(ha)) {
+- if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
++ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
+ else if (IS_QLA81XX(ha))
+ fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
+@@ -3118,7 +3118,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+ mem_size = (ha->fw_memory_size - 0x100000 + 1) *
+ sizeof(uint32_t);
+ if (ha->mqenable) {
+- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
++ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) &&
++ !IS_QLA28XX(ha))
+ mq_size = sizeof(struct qla2xxx_mq_chain);
+ /*
+ * Allocate maximum buffer size for all queues.
+@@ -3133,7 +3134,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+ mq_size += ha->tgt.atio_q_length * sizeof(request_t);
+ /* Allocate memory for Fibre Channel Event Buffer. */
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+- !IS_QLA27XX(ha))
++ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ goto try_eft;
+
+ fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
+@@ -3143,7 +3144,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+ eft_size = EFT_SIZE;
+ }
+
+- if (IS_QLA27XX(ha)) {
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ if (!ha->fw_dump_template) {
+ ql_log(ql_log_warn, vha, 0x00ba,
+ "Failed missing fwdump template\n");
+@@ -3186,7 +3187,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+ "Allocated (%d KB) for firmware dump.\n",
+ dump_size / 1024);
+
+- if (IS_QLA27XX(ha))
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ return;
+
+ ha->fw_dump->signature[0] = 'Q';
+@@ -3498,7 +3499,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
+ if (rval == QLA_SUCCESS) {
+ qla24xx_detect_sfp(vha);
+
+- if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
++ if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
++ IS_QLA28XX(ha)) &&
+ (ha->zio_mode == QLA_ZIO_MODE_6))
+ qla27xx_set_zio_threshold(vha,
+ ha->last_zio_threshold);
+@@ -3570,7 +3572,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ }
+
+- if (IS_QLA27XX(ha))
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ ha->flags.fac_supported = 1;
+ else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
+ uint32_t size;
+@@ -3585,7 +3587,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
+ ha->fw_major_version, ha->fw_minor_version,
+ ha->fw_subminor_version);
+
+- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
++ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
++ IS_QLA28XX(ha)) {
+ ha->flags.fac_supported = 0;
+ rval = QLA_SUCCESS;
+ }
+@@ -3738,7 +3741,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
+
+ /* Move PUREX, ABTS RX & RIDA to ATIOQ */
+ if (ql2xmvasynctoatio &&
+- (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
++ (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha))
+ ha->fw_options[2] |= BIT_11;
+@@ -3746,7 +3749,8 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
+ ha->fw_options[2] &= ~BIT_11;
+ }
+
+- if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
++ if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
++ IS_QLA28XX(ha)) {
+ /*
+ * Tell FW to track each exchange to prevent
+ * driver from using stale exchange.
+@@ -3843,7 +3847,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
+ if (IS_SHADOW_REG_CAPABLE(ha))
+ icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29);
+
+- if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
++ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
++ IS_QLA28XX(ha)) {
+ icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS);
+ icb->rid = cpu_to_le16(rid);
+ if (ha->flags.msix_enabled) {
+@@ -7205,6 +7210,7 @@ uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
+ uint32_t *wptr;
+ uint32_t cnt, chksum, size;
+ struct qla_hw_data *ha = vha->hw;
++ uint32_t signature;
+
+ valid_pri_image = valid_sec_image = 1;
+ ha->active_image = 0;
+@@ -7218,7 +7224,9 @@ uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
+ qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status),
+ ha->flt_region_img_status_pri, size);
+
+- if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
++ signature = le32_to_cpu(pri_image_status.signature);
++ if (signature != QLA27XX_IMG_STATUS_SIGN &&
++ signature != QLA28XX_IMG_STATUS_SIGN) {
+ ql_dbg(ql_dbg_init, vha, 0x018b,
+ "Primary image signature (0x%x) not valid\n",
+ pri_image_status.signature);
+@@ -7248,7 +7256,9 @@ uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha)
+ qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status),
+ ha->flt_region_img_status_sec, size);
+
+- if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) {
++ signature = le32_to_cpu(sec_image_status.signature);
++ if (signature != QLA27XX_IMG_STATUS_SIGN &&
++ signature != QLA28XX_IMG_STATUS_SIGN) {
+ ql_dbg(ql_dbg_init, vha, 0x018d,
+ "Secondary image signature(0x%x) not valid\n",
+ sec_image_status.signature);
+@@ -7308,7 +7318,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+ dcode = (uint32_t *)req->ring;
+ *srisc_addr = 0;
+
+- if (IS_QLA27XX(ha) &&
++ if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
+ qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+ faddr = ha->flt_region_fw_sec;
+
+@@ -7372,7 +7382,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+ segments--;
+ }
+
+- if (!IS_QLA27XX(ha))
++ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return rval;
+
+ if (ha->fw_dump_template)
+@@ -7640,7 +7650,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
+ segments--;
+ }
+
+- if (!IS_QLA27XX(ha))
++ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return rval;
+
+ if (ha->fw_dump_template)
+@@ -8145,7 +8155,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
+ ha->login_retry_count = ql2xloginretrycount;
+
+ /* if not running MSI-X we need handshaking on interrupts */
+- if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha)))
++ if (!vha->hw->flags.msix_enabled &&
++ (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)))
+ icb->firmware_options_2 |= cpu_to_le32(BIT_22);
+
+ /* Enable ZIO. */
+@@ -8178,7 +8189,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
+ /* N2N: driver will initiate Login instead of FW */
+ icb->firmware_options_3 |= BIT_8;
+
+- if (IS_QLA27XX(ha)) {
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ icb->firmware_options_3 |= BIT_8;
+ ql_dbg(ql_log_info, vha, 0x0075,
+ "Enabling direct connection.\n");
+@@ -8591,7 +8602,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
+ qpair->msix->in_use = 1;
+ list_add_tail(&qpair->qp_list_elem, &vha->qp_list);
+ qpair->pdev = ha->pdev;
+- if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
++ if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
+ qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
+
+ mutex_unlock(&ha->mq_lock);
+diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
+index 456a41d2e2c6..8d139f1d53e5 100644
+--- a/drivers/scsi/qla2xxx/qla_iocb.c
++++ b/drivers/scsi/qla2xxx/qla_iocb.c
+@@ -467,7 +467,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
+ req->ring_ptr++;
+
+ /* Set chip new ring index. */
+- if (ha->mqenable || IS_QLA27XX(ha)) {
++ if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+ } else if (IS_QLA83XX(ha)) {
+ WRT_REG_DWORD(req->req_q_in, req->ring_index);
+@@ -2325,7 +2325,8 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
+ if (req->cnt < req_cnt + 2) {
+ if (qpair->use_shadow_reg)
+ cnt = *req->out_ptr;
+- else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
++ else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
++ IS_QLA28XX(ha))
+ cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
+ else if (IS_P3P_TYPE(ha))
+ cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index 69bbea9239cc..1552f81ee4bd 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -708,7 +708,8 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
+ break;
+
+ case MBA_SYSTEM_ERR: /* System Error */
+- mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ?
++ mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
++ IS_QLA28XX(ha)) ?
+ RD_REG_WORD(&reg24->mailbox7) : 0;
+ ql_log(ql_log_warn, vha, 0x5003,
+ "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
+@@ -3014,7 +3015,8 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
+ qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
+ break;
+ case ABTS_RECV_24XX:
+- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
++ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
++ IS_QLA28XX(ha)) {
+ /* ensure that the ATIO queue is empty */
+ qlt_handle_abts_recv(vha, rsp,
+ (response_t *)pkt);
+@@ -3087,7 +3089,7 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha)
+ struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+
+ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
+- !IS_QLA27XX(ha))
++ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return;
+
+ rval = QLA_SUCCESS;
+@@ -3539,7 +3541,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
+ }
+
+ /* Enable MSI-X vector for response queue update for queue 0 */
+- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
++ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ if (ha->msixbase && ha->mqiobase &&
+ (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 ||
+ ql2xmqsupport))
+@@ -3570,7 +3572,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
+ /* If possible, enable MSI-X. */
+ if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
+ !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
+- !IS_QLAFX00(ha) && !IS_QLA27XX(ha)))
++ !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
+ goto skip_msi;
+
+ if (ql2xenablemsix == 2)
+@@ -3609,7 +3611,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
+
+ if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
+ !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
+- !IS_QLA27XX(ha))
++ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ goto skip_msi;
+
+ ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
+diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
+index 5400696e1f6b..381221118830 100644
+--- a/drivers/scsi/qla2xxx/qla_mbx.c
++++ b/drivers/scsi/qla2xxx/qla_mbx.c
+@@ -656,7 +656,7 @@ static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha)
+ {
+ uint16_t mb4 = BIT_0;
+
+- if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
++ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ mb4 |= ha->long_range_distance << LR_DIST_FW_POS;
+
+ return mb4;
+@@ -666,7 +666,7 @@ static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha)
+ {
+ uint16_t mb4 = BIT_0;
+
+- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
++ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ struct nvram_81xx *nv = ha->nvram;
+
+ mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features);
+@@ -711,7 +711,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
+ mcp->mb[4] = 0;
+ ha->flags.using_lr_setting = 0;
+ if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+- IS_QLA27XX(ha)) {
++ IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ if (ql2xautodetectsfp) {
+ if (ha->flags.detected_lr_sfp) {
+ mcp->mb[4] |=
+@@ -730,10 +730,10 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
+ }
+ }
+
+- if (ql2xnvmeenable && IS_QLA27XX(ha))
++ if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
+ mcp->mb[4] |= NVME_ENABLE_FLAG;
+
+- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
++ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ struct nvram_81xx *nv = ha->nvram;
+ /* set minimum speed if specified in nvram */
+ if (nv->min_link_speed >= 2 &&
+@@ -777,7 +777,8 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
+ "fw_ability_mask=%x.\n", ha->fw_ability_mask);
+ ql_dbg(ql_dbg_mbx, vha, 0x1027,
+ "exchanges=%x.\n", mcp->mb[1]);
+- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
++ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
++ IS_QLA28XX(ha)) {
+ ha->max_speed_sup = mcp->mb[2] & BIT_0;
+ ql_dbg(ql_dbg_mbx, vha, 0x119b,
+ "Maximum speed supported=%s.\n",
+@@ -1053,7 +1054,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
+ mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
+ if (IS_FWI2_CAPABLE(ha))
+ mcp->in_mb |= MBX_17|MBX_16|MBX_15;
+- if (IS_QLA27XX(ha))
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ mcp->in_mb |=
+ MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
+ MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8;
+@@ -1122,7 +1123,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
+ }
+ }
+
+- if (IS_QLA27XX(ha)) {
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ ha->mpi_version[0] = mcp->mb[10] & 0xff;
+ ha->mpi_version[1] = mcp->mb[11] >> 8;
+ ha->mpi_version[2] = mcp->mb[11] & 0xff;
+@@ -1638,7 +1639,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
+ mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
+ if (IS_FWI2_CAPABLE(vha->hw))
+ mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
+- if (IS_QLA27XX(vha->hw))
++ if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
+ mcp->in_mb |= MBX_15;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+@@ -1692,7 +1693,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
+ }
+ }
+
+- if (IS_QLA27XX(vha->hw))
++ if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw))
+ vha->bbcr = mcp->mb[15];
+ }
+
+@@ -1808,7 +1809,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
+ }
+ /* 1 and 2 should normally be captured. */
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+- if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
++ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ /* mb3 is additional info about the installed SFP. */
+ mcp->in_mb |= MBX_3;
+ mcp->buf_size = size;
+@@ -1822,7 +1823,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
+ "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n",
+ rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
+ } else {
+- if (IS_QLA27XX(ha)) {
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
+ ql_dbg(ql_dbg_mbx, vha, 0x119d,
+ "Invalid SFP/Validation Failed\n");
+@@ -2076,7 +2077,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
+ /*EMPTY*/
+ ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
+ } else {
+- if (IS_QLA27XX(ha)) {
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ if (mcp->mb[2] == 6 || mcp->mb[3] == 2)
+ ql_dbg(ql_dbg_mbx, vha, 0x119e,
+ "Invalid SFP/Validation Failed\n");
+@@ -2859,7 +2860,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
+ mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
+ mcp->out_mb = MBX_0;
+ mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+- if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw))
++ if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
++ IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ mcp->in_mb |= MBX_12;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+@@ -2884,7 +2886,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha)
+ ha->orig_fw_iocb_count = mcp->mb[10];
+ if (ha->flags.npiv_supported)
+ ha->max_npiv_vports = mcp->mb[11];
+- if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
++ if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
++ IS_QLA28XX(ha))
+ ha->fw_max_fcf_count = mcp->mb[12];
+ }
+
+@@ -3323,7 +3326,7 @@ qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data)
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
+- !IS_QLA27XX(vha->hw))
++ !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182,
+@@ -3362,7 +3365,7 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) &&
+- !IS_QLA27XX(vha->hw))
++ !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185,
+@@ -3631,7 +3634,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
+ "Entered %s.\n", __func__);
+
+ if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
+- !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
++ !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
++ !IS_QLA28XX(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+@@ -4318,7 +4322,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
+ mcp->mb[12] = req->qos;
+ mcp->mb[11] = req->vp_idx;
+ mcp->mb[13] = req->rid;
+- if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
++ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ mcp->mb[15] = 0;
+
+ mcp->mb[4] = req->id;
+@@ -4332,9 +4336,10 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
+ mcp->flags = MBX_DMA_OUT;
+ mcp->tov = MBX_TOV_SECONDS * 2;
+
+- if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
++ if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
++ IS_QLA28XX(ha))
+ mcp->in_mb |= MBX_1;
+- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
++ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ mcp->out_mb |= MBX_15;
+ /* debug q create issue in SR-IOV */
+ mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
+@@ -4343,7 +4348,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (!(req->options & BIT_0)) {
+ WRT_REG_DWORD(req->req_q_in, 0);
+- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
++ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ WRT_REG_DWORD(req->req_q_out, 0);
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+@@ -4387,7 +4392,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
+ mcp->mb[5] = rsp->length;
+ mcp->mb[14] = rsp->msix->entry;
+ mcp->mb[13] = rsp->rid;
+- if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
++ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ mcp->mb[15] = 0;
+
+ mcp->mb[4] = rsp->id;
+@@ -4404,7 +4409,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
+ if (IS_QLA81XX(ha)) {
+ mcp->out_mb |= MBX_12|MBX_11|MBX_10;
+ mcp->in_mb |= MBX_1;
+- } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
++ } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10;
+ mcp->in_mb |= MBX_1;
+ /* debug q create issue in SR-IOV */
+@@ -4414,7 +4419,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (!(rsp->options & BIT_0)) {
+ WRT_REG_DWORD(rsp->rsp_q_out, 0);
+- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
++ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ WRT_REG_DWORD(rsp->rsp_q_in, 0);
+ }
+
+@@ -4472,7 +4477,7 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
+ "Entered %s.\n", __func__);
+
+ if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
+- !IS_QLA27XX(vha->hw))
++ !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
+@@ -4504,7 +4509,7 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
+- !IS_QLA27XX(vha->hw))
++ !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
+@@ -4539,7 +4544,7 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) &&
+- !IS_QLA27XX(vha->hw))
++ !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
+@@ -5278,7 +5283,7 @@ qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode)
+
+ mcp->out_mb = MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+- if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
++ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ mcp->in_mb |= MBX_4|MBX_3;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+@@ -5316,7 +5321,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
+ mcp->mb[1] = QLA_GET_DATA_RATE;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+- if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
++ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ mcp->in_mb |= MBX_3;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+@@ -5346,7 +5351,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
+ "Entered %s.\n", __func__);
+
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
+- !IS_QLA27XX(ha))
++ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return QLA_FUNCTION_FAILED;
+ mcp->mb[0] = MBC_GET_PORT_CONFIG;
+ mcp->out_mb = MBX_0;
+@@ -5842,7 +5847,7 @@ qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
++ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
+@@ -5917,7 +5922,7 @@ qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data)
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long retry_max_time = jiffies + (2 * HZ);
+
+- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
++ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__);
+@@ -5967,7 +5972,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
++ if (!IS_QLA83XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
+@@ -6101,7 +6106,8 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
+ mbx_cmd_t *mcp = &mc;
+ dma_addr_t dd_dma;
+
+- if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
++ if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) &&
++ !IS_QLA28XX(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f,
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index 677f82fdf56f..f5fce5b52067 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -427,7 +427,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
+ qla_cpu_update(rsp->qpair, raw_smp_processor_id());
+ ha->base_qpair->pdev = ha->pdev;
+
+- if (IS_QLA27XX(ha) || IS_QLA83XX(ha))
++ if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
+ ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs;
+ }
+
+@@ -2753,6 +2753,24 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
+ ha->device_type |= DT_T10_PI;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ break;
++ case PCI_DEVICE_ID_QLOGIC_ISP2081:
++ case PCI_DEVICE_ID_QLOGIC_ISP2089:
++ ha->isp_type |= DT_ISP2081;
++ ha->device_type |= DT_ZIO_SUPPORTED;
++ ha->device_type |= DT_FWI2;
++ ha->device_type |= DT_IIDMA;
++ ha->device_type |= DT_T10_PI;
++ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
++ break;
++ case PCI_DEVICE_ID_QLOGIC_ISP2281:
++ case PCI_DEVICE_ID_QLOGIC_ISP2289:
++ ha->isp_type |= DT_ISP2281;
++ ha->device_type |= DT_ZIO_SUPPORTED;
++ ha->device_type |= DT_FWI2;
++ ha->device_type |= DT_IIDMA;
++ ha->device_type |= DT_T10_PI;
++ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
++ break;
+ }
+
+ if (IS_QLA82XX(ha))
+@@ -2760,7 +2778,8 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
+ else {
+ /* Get adapter physical port no from interrupt pin register. */
+ pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
+- if (IS_QLA27XX(ha))
++ if (IS_QLA25XX(ha) || IS_QLA2031(ha) ||
++ IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ ha->port_no--;
+ else
+ ha->port_no = !(ha->port_no & 1);
+@@ -2857,7 +2876,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 ||
+- pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261) {
++ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 ||
++ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 ||
++ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 ||
++ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 ||
++ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) {
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ mem_only = 1;
+ ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
+@@ -2906,7 +2929,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+
+ /* Set EEH reset type to fundamental if required by hba */
+ if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) ||
+- IS_QLA83XX(ha) || IS_QLA27XX(ha))
++ IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ pdev->needs_freset = 1;
+
+ ha->prev_topology = 0;
+@@ -3085,6 +3108,23 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
+ ha->nvram_conf_off = ~0;
+ ha->nvram_data_off = ~0;
++ } else if (IS_QLA28XX(ha)) {
++ ha->portnum = PCI_FUNC(ha->pdev->devfn);
++ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
++ ha->mbx_count = MAILBOX_REGISTER_COUNT;
++ req_length = REQUEST_ENTRY_CNT_24XX;
++ rsp_length = RESPONSE_ENTRY_CNT_2300;
++ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
++ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
++ ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
++ ha->gid_list_info_size = 8;
++ ha->optrom_size = OPTROM_SIZE_28XX;
++ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
++ ha->isp_ops = &qla27xx_isp_ops;
++ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX;
++ ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX;
++ ha->nvram_conf_off = ~0;
++ ha->nvram_data_off = ~0;
+ }
+
+ ql_dbg_pci(ql_dbg_init, pdev, 0x001e,
+@@ -3250,7 +3290,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ req->req_q_out = &ha->iobase->isp24.req_q_out;
+ rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in;
+ rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out;
+- if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
++ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
++ IS_QLA28XX(ha)) {
+ req->req_q_in = &ha->mqiobase->isp25mq.req_q_in;
+ req->req_q_out = &ha->mqiobase->isp25mq.req_q_out;
+ rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in;
+@@ -3576,7 +3617,8 @@ qla2x00_shutdown(struct pci_dev *pdev)
+ if (ha->eft)
+ qla2x00_disable_eft_trace(vha);
+
+- if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
++ if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
++ IS_QLA28XX(ha)) {
+ if (ha->flags.fw_started)
+ qla2x00_abort_isp_cleanup(vha);
+ } else {
+@@ -3681,7 +3723,8 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha)
+ if (ha->mqiobase)
+ iounmap(ha->mqiobase);
+
+- if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && ha->msixbase)
++ if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
++ ha->msixbase)
+ iounmap(ha->msixbase);
+ }
+ }
+@@ -3732,7 +3775,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
+ }
+ qla2x00_wait_for_hba_ready(base_vha);
+
+- if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
++ if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
++ IS_QLA28XX(ha)) {
+ if (ha->flags.fw_started)
+ qla2x00_abort_isp_cleanup(base_vha);
+ } else if (!IS_QLAFX00(ha)) {
+@@ -4222,7 +4266,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
+ ha->npiv_info = NULL;
+
+ /* Get consistent memory allocated for EX-INIT-CB. */
+- if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
++ if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
++ IS_QLA28XX(ha)) {
+ ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
+ &ha->ex_init_cb_dma);
+ if (!ha->ex_init_cb)
+@@ -6690,7 +6735,7 @@ qla2x00_timer(struct timer_list *t)
+ if (!vha->vp_idx &&
+ (atomic_read(&ha->zio_threshold) != ha->last_zio_threshold) &&
+ (ha->zio_mode == QLA_ZIO_MODE_6) &&
+- (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
++ (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
+ ql_log(ql_log_info, vha, 0x3002,
+ "Sched: Set ZIO exchange threshold to %d.\n",
+ ha->last_zio_threshold);
+@@ -6736,7 +6781,6 @@ qla2x00_timer(struct timer_list *t)
+
+ /* Firmware interface routines. */
+
+-#define FW_BLOBS 11
+ #define FW_ISP21XX 0
+ #define FW_ISP22XX 1
+ #define FW_ISP2300 2
+@@ -6748,6 +6792,7 @@ qla2x00_timer(struct timer_list *t)
+ #define FW_ISP2031 8
+ #define FW_ISP8031 9
+ #define FW_ISP27XX 10
++#define FW_ISP28XX 11
+
+ #define FW_FILE_ISP21XX "ql2100_fw.bin"
+ #define FW_FILE_ISP22XX "ql2200_fw.bin"
+@@ -6760,11 +6805,12 @@ qla2x00_timer(struct timer_list *t)
+ #define FW_FILE_ISP2031 "ql2600_fw.bin"
+ #define FW_FILE_ISP8031 "ql8300_fw.bin"
+ #define FW_FILE_ISP27XX "ql2700_fw.bin"
++#define FW_FILE_ISP28XX "ql2800_fw.bin"
+
+
+ static DEFINE_MUTEX(qla_fw_lock);
+
+-static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
++static struct fw_blob qla_fw_blobs[] = {
+ { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, },
+ { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, },
+ { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, },
+@@ -6776,6 +6822,8 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
+ { .name = FW_FILE_ISP2031, },
+ { .name = FW_FILE_ISP8031, },
+ { .name = FW_FILE_ISP27XX, },
++ { .name = FW_FILE_ISP28XX, },
++ { .name = NULL, },
+ };
+
+ struct fw_blob *
+@@ -6806,10 +6854,15 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
+ blob = &qla_fw_blobs[FW_ISP8031];
+ } else if (IS_QLA27XX(ha)) {
+ blob = &qla_fw_blobs[FW_ISP27XX];
++ } else if (IS_QLA28XX(ha)) {
++ blob = &qla_fw_blobs[FW_ISP28XX];
+ } else {
+ return NULL;
+ }
+
++ if (!blob->name)
++ return NULL;
++
+ mutex_lock(&qla_fw_lock);
+ if (blob->fw)
+ goto out;
+@@ -6819,7 +6872,6 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
+ "Failed to load firmware image (%s).\n", blob->name);
+ blob->fw = NULL;
+ blob = NULL;
+- goto out;
+ }
+
+ out:
+@@ -6830,11 +6882,11 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
+ static void
+ qla2x00_release_firmware(void)
+ {
+- int idx;
++ struct fw_blob *blob;
+
+ mutex_lock(&qla_fw_lock);
+- for (idx = 0; idx < FW_BLOBS; idx++)
+- release_firmware(qla_fw_blobs[idx].fw);
++ for (blob = qla_fw_blobs; blob->name; blob++)
++ release_firmware(blob->fw);
+ mutex_unlock(&qla_fw_lock);
+ }
+
+@@ -7220,6 +7272,11 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) },
++ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) },
++ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) },
++ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) },
++ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) },
++ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) },
+ { 0 },
+ };
+ MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
+diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
+index 2a3055c799fb..579d6a8c7ba0 100644
+--- a/drivers/scsi/qla2xxx/qla_sup.c
++++ b/drivers/scsi/qla2xxx/qla_sup.c
+@@ -571,6 +571,9 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
+ } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
+ *start = FA_FLASH_LAYOUT_ADDR_83;
+ goto end;
++ } else if (IS_QLA28XX(ha)) {
++ *start = FA_FLASH_LAYOUT_ADDR_28;
++ goto end;
+ }
+ /* Begin with first PCI expansion ROM header. */
+ buf = (uint8_t *)req->ring;
+@@ -753,13 +756,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
+ ha->flt_region_vpd = start;
+ break;
+ case FLT_REG_VPD_2:
+- if (!IS_QLA27XX(ha))
++ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ break;
+ if (ha->port_no == 2)
+ ha->flt_region_vpd = start;
+ break;
+ case FLT_REG_VPD_3:
+- if (!IS_QLA27XX(ha))
++ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ break;
+ if (ha->port_no == 3)
+ ha->flt_region_vpd = start;
+@@ -777,13 +780,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
+ ha->flt_region_nvram = start;
+ break;
+ case FLT_REG_NVRAM_2:
+- if (!IS_QLA27XX(ha))
++ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ break;
+ if (ha->port_no == 2)
+ ha->flt_region_nvram = start;
+ break;
+ case FLT_REG_NVRAM_3:
+- if (!IS_QLA27XX(ha))
++ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ break;
+ if (ha->port_no == 3)
+ ha->flt_region_nvram = start;
+@@ -847,35 +850,35 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
+ ha->flt_region_nvram = start;
+ break;
+ case FLT_REG_IMG_PRI_27XX:
+- if (IS_QLA27XX(ha))
++ if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ ha->flt_region_img_status_pri = start;
+ break;
+ case FLT_REG_IMG_SEC_27XX:
+- if (IS_QLA27XX(ha))
++ if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ ha->flt_region_img_status_sec = start;
+ break;
+ case FLT_REG_FW_SEC_27XX:
+- if (IS_QLA27XX(ha))
++ if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ ha->flt_region_fw_sec = start;
+ break;
+ case FLT_REG_BOOTLOAD_SEC_27XX:
+- if (IS_QLA27XX(ha))
++ if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ ha->flt_region_boot_sec = start;
+ break;
+ case FLT_REG_VPD_SEC_27XX_0:
+- if (IS_QLA27XX(ha))
++ if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ ha->flt_region_vpd_sec = start;
+ break;
+ case FLT_REG_VPD_SEC_27XX_1:
+- if (IS_QLA27XX(ha))
++ if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ ha->flt_region_vpd_sec = start;
+ break;
+ case FLT_REG_VPD_SEC_27XX_2:
+- if (IS_QLA27XX(ha))
++ if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ ha->flt_region_vpd_sec = start;
+ break;
+ case FLT_REG_VPD_SEC_27XX_3:
+- if (IS_QLA27XX(ha))
++ if (IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ ha->flt_region_vpd_sec = start;
+ break;
+ }
+@@ -1045,7 +1048,8 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha)
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
+- !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha))
++ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
++ !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ return QLA_SUCCESS;
+
+ ret = qla2xxx_find_flt_start(vha, &flt_addr);
+@@ -1248,7 +1252,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
+
+ /* Prepare burst-capable write on supported ISPs. */
+ if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+- IS_QLA27XX(ha)) &&
++ IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
+ !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) {
+ optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
+ &optrom_dma, GFP_KERNEL);
+@@ -1728,7 +1732,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha)
+ {
+ uint32_t led_select_value = 0;
+
+- if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
++ if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+ goto out;
+
+ if (ha->port_no == 0)
+@@ -1749,13 +1753,14 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha)
+ uint16_t orig_led_cfg[6];
+ uint32_t led_10_value, led_43_value;
+
+- if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha) && !IS_QLA27XX(ha))
++ if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha) && !IS_QLA27XX(ha) &&
++ !IS_QLA28XX(ha))
+ return;
+
+ if (!ha->beacon_blink_led)
+ return;
+
+- if (IS_QLA27XX(ha)) {
++ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ qla2x00_write_ram_word(vha, 0x1003, 0x40000230);
+ qla2x00_write_ram_word(vha, 0x1004, 0x40000230);
+ } else if (IS_QLA2031(ha)) {
+@@ -1845,7 +1850,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
+ return QLA_FUNCTION_FAILED;
+ }
+
+- if (IS_QLA2031(ha) || IS_QLA27XX(ha))
++ if (IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ goto skip_gpio;
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+@@ -1885,7 +1890,7 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
+
+ ha->beacon_blink_led = 0;
+
+- if (IS_QLA2031(ha) || IS_QLA27XX(ha))
++ if (IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ goto set_fw_options;
+
+ if (IS_QLA8031(ha) || IS_QLA81XX(ha))
+@@ -2620,7 +2625,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ struct qla_hw_data *ha = vha->hw;
+
+ if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+- IS_QLA27XX(ha))
++ IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ goto try_fast;
+ if (offset & 0xfff)
+ goto slow_read;
+@@ -3042,7 +3047,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+
+ dcode = mbuf;
+ pcihdr = ha->flt_region_boot << 2;
+- if (IS_QLA27XX(ha) &&
++ if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
+ qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+ pcihdr = ha->flt_region_boot_sec << 2;
+
+@@ -3119,7 +3124,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+ memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
+ dcode = mbuf;
+ faddr = ha->flt_region_fw;
+- if (IS_QLA27XX(ha) &&
++ if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
+ qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
+ faddr = ha->flt_region_fw_sec;
+
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 582d1663f971..12889a8cb108 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -6937,7 +6937,7 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
+ RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
+
+ if (ha->flags.msix_enabled) {
+- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
++ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ if (IS_QLA2071(ha)) {
+ /* 4 ports Baker: Enable Interrupt Handshake */
+ icb->msix_atio = 0;
+@@ -6952,7 +6952,7 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha)
+ }
+ } else {
+ /* INTx|MSI */
+- if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
++ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ icb->msix_atio = 0;
+ icb->firmware_options_2 |= BIT_26;
+ ql_dbg(ql_dbg_init, vha, 0xf072,
+@@ -7201,7 +7201,8 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+- if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
++ if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
++ IS_QLA28XX(ha)) {
+ ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
+ ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
+ } else {
+--
+2.16.4
+
diff --git a/patches.drivers/scsi-qla2xxx-Add-Serdes-support-for-ISP28XX.patch b/patches.drivers/scsi-qla2xxx-Add-Serdes-support-for-ISP28XX.patch
new file mode 100644
index 0000000000..0ed528fd48
--- /dev/null
+++ b/patches.drivers/scsi-qla2xxx-Add-Serdes-support-for-ISP28XX.patch
@@ -0,0 +1,374 @@
+From: Joe Carnuccio <joe.carnuccio@cavium.com>
+Date: Tue, 12 Mar 2019 11:08:14 -0700
+Subject: [PATCH] scsi: qla2xxx: Add Serdes support for ISP28XX
+Git-commit: 2a3192a3f3bc4fe1b077c55fffb6d8afe3213d57
+Patch-mainline: v5.2-rc1
+References: bsc#1136215
+
+This patch adds sysfs node for serdes_version and also cleans up port_speed
+display.
+
+Signed-off-by: Joe Carnuccio <joe.carnuccio@cavium.com>
+Signed-off-by: Himanshu Madhani <hmadhani@marvell.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Acked-by: Hannes Reinecke <hare@suse.com>
+---
+ drivers/scsi/qla2xxx/qla_attr.c | 43 +++++++++++++++++------
+ drivers/scsi/qla2xxx/qla_def.h | 4 ++-
+ drivers/scsi/qla2xxx/qla_gs.c | 77 ++++++++++++++++-------------------------
+ drivers/scsi/qla2xxx/qla_isr.c | 4 ++-
+ drivers/scsi/qla2xxx/qla_mbx.c | 25 +++++++------
+ 5 files changed, 82 insertions(+), 71 deletions(-)
+
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index f035251a86ce..93da79a0ccf5 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -1377,6 +1377,21 @@ qla24xx_84xx_fw_version_show(struct device *dev,
+ return scnprintf(buf, PAGE_SIZE, "\n");
+ }
+
++static ssize_t
++qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
++ struct qla_hw_data *ha = vha->hw;
++
++ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
++ return scnprintf(buf, PAGE_SIZE, "\n");
++
++ return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
++ ha->serdes_version[0], ha->serdes_version[1],
++ ha->serdes_version[2]);
++}
++
+ static ssize_t
+ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+@@ -2220,6 +2235,7 @@ static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
+ NULL);
+ static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
+ NULL);
++static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL);
+ static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
+ static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
+ static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
+@@ -2272,6 +2288,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
+ &dev_attr_optrom_fw_version,
+ &dev_attr_84xx_fw_version,
+ &dev_attr_total_isp_aborts,
++ &dev_attr_serdes_version,
+ &dev_attr_mpi_version,
+ &dev_attr_phy_version,
+ &dev_attr_flash_block_size,
+@@ -2328,16 +2345,15 @@ qla2x00_get_host_port_id(struct Scsi_Host *shost)
+ static void
+ qla2x00_get_host_speed(struct Scsi_Host *shost)
+ {
+- struct qla_hw_data *ha = ((struct scsi_qla_host *)
+- (shost_priv(shost)))->hw;
+- u32 speed = FC_PORTSPEED_UNKNOWN;
++ scsi_qla_host_t *vha = shost_priv(shost);
++ u32 speed;
+
+- if (IS_QLAFX00(ha)) {
++ if (IS_QLAFX00(vha->hw)) {
+ qlafx00_get_host_speed(shost);
+ return;
+ }
+
+- switch (ha->link_data_rate) {
++ switch (vha->hw->link_data_rate) {
+ case PORT_SPEED_1GB:
+ speed = FC_PORTSPEED_1GBIT;
+ break;
+@@ -2362,7 +2378,11 @@ qla2x00_get_host_speed(struct Scsi_Host *shost)
+ case PORT_SPEED_64GB:
+ speed = FC_PORTSPEED_64GBIT;
+ break;
++ default:
++ speed = FC_PORTSPEED_UNKNOWN;
++ break;
+ }
++
+ fc_host_speed(shost) = speed;
+ }
+
+@@ -2370,7 +2390,7 @@ static void
+ qla2x00_get_host_port_type(struct Scsi_Host *shost)
+ {
+ scsi_qla_host_t *vha = shost_priv(shost);
+- uint32_t port_type = FC_PORTTYPE_UNKNOWN;
++ uint32_t port_type;
+
+ if (vha->vp_idx) {
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+@@ -2389,7 +2409,11 @@ qla2x00_get_host_port_type(struct Scsi_Host *shost)
+ case ISP_CFG_F:
+ port_type = FC_PORTTYPE_NPORT;
+ break;
++ default:
++ port_type = FC_PORTTYPE_UNKNOWN;
++ break;
+ }
++
+ fc_host_port_type(shost) = port_type;
+ }
+
+@@ -2451,13 +2475,10 @@ qla2x00_get_starget_port_id(struct scsi_target *starget)
+ fc_starget_port_id(starget) = port_id;
+ }
+
+-static void
++static inline void
+ qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
+ {
+- if (timeout)
+- rport->dev_loss_tmo = timeout;
+- else
+- rport->dev_loss_tmo = 1;
++ rport->dev_loss_tmo = timeout ? timeout : 1;
+ }
+
+ static void
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index c64cd555ef0d..a4661306cc34 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -4023,6 +4023,7 @@ struct qla_hw_data {
+ uint8_t fw_seriallink_options[4];
+ uint16_t fw_seriallink_options24[4];
+
++ uint8_t serdes_version[3];
+ uint8_t mpi_version[3];
+ uint32_t mpi_capabilities;
+ uint8_t phy_version[3];
+@@ -4034,7 +4035,8 @@ struct qla_hw_data {
+ /* Firmware dump information. */
+ struct qla2xxx_fw_dump *fw_dump;
+ uint32_t fw_dump_len;
+- int fw_dumped;
++ bool fw_dumped;
++ bool fw_dump_mpi;
+ unsigned long fw_dump_cap_flags;
+ #define RISC_PAUSE_CMPL 0
+ #define DMA_SHUTDOWN_CMPL 1
+diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
+index 41b5fa1f5774..2d96344025ef 100644
+--- a/drivers/scsi/qla2xxx/qla_gs.c
++++ b/drivers/scsi/qla2xxx/qla_gs.c
+@@ -2783,6 +2783,31 @@ qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
+ return &p->p.req;
+ }
+
++static uint16_t
++qla2x00_port_speed_capability(uint16_t speed)
++{
++ switch (speed) {
++ case BIT_15:
++ return PORT_SPEED_1GB;
++ case BIT_14:
++ return PORT_SPEED_2GB;
++ case BIT_13:
++ return PORT_SPEED_4GB;
++ case BIT_12:
++ return PORT_SPEED_10GB;
++ case BIT_11:
++ return PORT_SPEED_8GB;
++ case BIT_10:
++ return PORT_SPEED_16GB;
++ case BIT_8:
++ return PORT_SPEED_32GB;
++ case BIT_7:
++ return PORT_SPEED_64GB;
++ default:
++ return PORT_SPEED_UNKNOWN;
++ }
++}
++
+ /**
+ * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
+ * @vha: HA context
+@@ -2855,31 +2880,8 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
+ }
+ rval = QLA_FUNCTION_FAILED;
+ } else {
+- /* Save port-speed */
+- switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
+- case BIT_15:
+- list[i].fp_speed = PORT_SPEED_1GB;
+- break;
+- case BIT_14:
+- list[i].fp_speed = PORT_SPEED_2GB;
+- break;
+- case BIT_13:
+- list[i].fp_speed = PORT_SPEED_4GB;
+- break;
+- case BIT_12:
+- list[i].fp_speed = PORT_SPEED_10GB;
+- break;
+- case BIT_11:
+- list[i].fp_speed = PORT_SPEED_8GB;
+- break;
+- case BIT_10:
+- list[i].fp_speed = PORT_SPEED_16GB;
+- break;
+- case BIT_8:
+- list[i].fp_speed = PORT_SPEED_32GB;
+- break;
+- }
+-
++ list->fp_speed = qla2x00_port_speed_capability(
++ be16_to_cpu(ct_rsp->rsp.gpsc.speed));
+ ql_dbg(ql_dbg_disc, vha, 0x205b,
+ "GPSC ext entry - fpn "
+ "%8phN speeds=%04x speed=%04x.\n",
+@@ -3048,29 +3050,8 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
+ goto done;
+ }
+ } else {
+- switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
+- case BIT_15:
+- fcport->fp_speed = PORT_SPEED_1GB;
+- break;
+- case BIT_14:
+- fcport->fp_speed = PORT_SPEED_2GB;
+- break;
+- case BIT_13:
+- fcport->fp_speed = PORT_SPEED_4GB;
+- break;
+- case BIT_12:
+- fcport->fp_speed = PORT_SPEED_10GB;
+- break;
+- case BIT_11:
+- fcport->fp_speed = PORT_SPEED_8GB;
+- break;
+- case BIT_10:
+- fcport->fp_speed = PORT_SPEED_16GB;
+- break;
+- case BIT_8:
+- fcport->fp_speed = PORT_SPEED_32GB;
+- break;
+- }
++ fcport->fp_speed = qla2x00_port_speed_capability(
++ be16_to_cpu(ct_rsp->rsp.gpsc.speed));
+
+ ql_dbg(ql_dbg_disc, vha, 0x2054,
+ "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index 1552f81ee4bd..c6139c054c62 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -714,7 +714,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
+ ql_log(ql_log_warn, vha, 0x5003,
+ "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
+ "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
+-
++ ha->fw_dump_mpi =
++ (IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
++ RD_REG_WORD(&reg24->mailbox7) & BIT_8;
+ ha->isp_ops->fw_dump(vha, 1);
+ ha->flags.fw_init_done = 0;
+ QLA_FW_STOPPED(ha);
+diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
+index 381221118830..304c21491d83 100644
+--- a/drivers/scsi/qla2xxx/qla_mbx.c
++++ b/drivers/scsi/qla2xxx/qla_mbx.c
+@@ -634,14 +634,15 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
+ mcp->out_mb |= MBX_4;
+ }
+
+- mcp->in_mb = MBX_0;
++ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1023,
+- "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
++ "Failed=%x mb[0]=%x mb[1]=%x.\n",
++ rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
+ "Done %s.\n", __func__);
+@@ -1057,7 +1058,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
+ mcp->in_mb |=
+ MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18|
+- MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8;
++ MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7;
+
+ mcp->flags = 0;
+ mcp->tov = MBX_TOV_SECONDS;
+@@ -1124,6 +1125,9 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
+ }
+
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
++ ha->serdes_version[0] = mcp->mb[7] & 0xff;
++ ha->serdes_version[1] = mcp->mb[8] >> 8;
++ ha->serdes_version[2] = mcp->mb[8] & 0xff;
+ ha->mpi_version[0] = mcp->mb[10] & 0xff;
+ ha->mpi_version[1] = mcp->mb[11] >> 8;
+ ha->mpi_version[2] = mcp->mb[11] & 0xff;
+@@ -3748,7 +3752,7 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ /* Return mailbox statuses. */
+- if (mb != NULL) {
++ if (mb) {
+ mb[0] = mcp->mb[0];
+ mb[1] = mcp->mb[1];
+ mb[3] = mcp->mb[3];
+@@ -3783,7 +3787,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
+ mcp->mb[0] = MBC_PORT_PARAMS;
+ mcp->mb[1] = loop_id;
+ mcp->mb[2] = BIT_0;
+- mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
++ mcp->mb[3] = port_speed & 0x3F;
+ mcp->mb[9] = vha->vp_idx;
+ mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_3|MBX_1|MBX_0;
+@@ -3792,7 +3796,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ /* Return mailbox statuses. */
+- if (mb != NULL) {
++ if (mb) {
+ mb[0] = mcp->mb[0];
+ mb[1] = mcp->mb[1];
+ mb[3] = mcp->mb[3];
+@@ -4823,10 +4827,10 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x10e9,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+- if (mcp->mb[0] == MBS_COMMAND_ERROR &&
+- mcp->mb[1] == 0x22)
++ if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) {
+ /* sfp is not there */
+ rval = QLA_INTERFACE_ERROR;
++ }
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
+ "Done %s.\n", __func__);
+@@ -5166,13 +5170,14 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
+ mcp->mb[3] = MSW(data);
+ mcp->mb[8] = MSW(risc_addr);
+ mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
+- mcp->in_mb = MBX_0;
++ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = 30;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1101,
+- "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
++ "Failed=%x mb[0]=%x mb[1]=%x.\n",
++ rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
+ "Done %s.\n", __func__);
+--
+2.16.4
+
diff --git a/patches.drivers/scsi-qla2xxx-Add-fw_attr-and-port_no-SysFS-node.patch b/patches.drivers/scsi-qla2xxx-Add-fw_attr-and-port_no-SysFS-node.patch
new file mode 100644
index 0000000000..ea3b4dab9e
--- /dev/null
+++ b/patches.drivers/scsi-qla2xxx-Add-fw_attr-and-port_no-SysFS-node.patch
@@ -0,0 +1,76 @@
+From: Joe Carnuccio <joe.carnuccio@cavium.com>
+Date: Tue, 12 Mar 2019 11:08:10 -0700
+Subject: [PATCH] scsi: qla2xxx: Add fw_attr and port_no SysFS node
+Git-commit: df617ffbbc5ecb64334548546d4b0cc4ff0527c0
+Patch-Mainline: v5.2-rc1
+References: bsc#1136215
+
+This patch adds new sysfs node to display firmware attributes and port
+number.
+
+Signed-off-by: Joe Carnuccio <joe.carnuccio@cavium.com>
+Signed-off-by: Himanshu Madhani <hmadhani@marvell.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Acked-by: Hannes Reinecke <hare@suse.com>
+---
+ drivers/scsi/qla2xxx/qla_attr.c | 30 ++++++++++++++++++++++++++++++
+ 1 file changed, 30 insertions(+)
+
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index f928c4d3a1ef..8cc4d6589cbb 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -2164,6 +2164,32 @@ qla2x00_dif_bundle_statistics_show(struct device *dev,
+ ha->dif_bundle_dma_allocs, ha->pool.unusable.count);
+ }
+
++static ssize_t
++qla2x00_fw_attr_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
++ struct qla_hw_data *ha = vha->hw;
++
++ if (!IS_QLA27XX(ha))
++ return scnprintf(buf, PAGE_SIZE, "\n");
++
++ return scnprintf(buf, PAGE_SIZE, "%llx\n",
++ (uint64_t)ha->fw_attributes_ext[1] << 48 |
++ (uint64_t)ha->fw_attributes_ext[0] << 32 |
++ (uint64_t)ha->fw_attributes_h << 16 |
++ (uint64_t)ha->fw_attributes);
++}
++
++static ssize_t
++qla2x00_port_no_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
++
++ return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no);
++}
++
+ static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL);
+ static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
+ static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
+@@ -2221,6 +2247,8 @@ static DEVICE_ATTR(dif_bundle_statistics, 0444,
+ qla2x00_dif_bundle_statistics_show, NULL);
+ static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show,
+ qla2x00_port_speed_store);
++static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL);
++static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL);
+
+
+ struct device_attribute *qla2x00_host_attrs[] = {
+@@ -2261,6 +2289,8 @@ struct device_attribute *qla2x00_host_attrs[] = {
+ &dev_attr_zio_threshold,
+ &dev_attr_dif_bundle_statistics,
+ &dev_attr_port_speed,
++ &dev_attr_port_no,
++ &dev_attr_fw_attr,
+ NULL, /* reserve for qlini_mode */
+ NULL, /* reserve for ql2xiniexchg */
+ NULL, /* reserve for ql2xexchoffld */
+--
+2.16.4
+
diff --git a/patches.drivers/scsi-qla2xxx-Add-support-for-multiple-fwdump-templat.patch b/patches.drivers/scsi-qla2xxx-Add-support-for-multiple-fwdump-templat.patch
new file mode 100644
index 0000000000..a84934654b
--- /dev/null
+++ b/patches.drivers/scsi-qla2xxx-Add-support-for-multiple-fwdump-templat.patch
@@ -0,0 +1,862 @@
+From: Joe Carnuccio <joe.carnuccio@cavium.com>
+Date: Tue, 12 Mar 2019 11:08:17 -0700
+Subject: [PATCH] scsi: qla2xxx: Add support for multiple fwdump
+ templates/segments
+Git-commit: a28d9e4ef99729d7e4db31d2dfeaf00755be4ab7
+Patch-mainline: v5.2-rc1
+References: bsc#1136215
+
+This patch adds multipe firmware dump template and segments support for
+ISP27XX/28XX.
+
+Signed-off-by: Joe Carnuccio <joe.carnuccio@cavium.com>
+Signed-off-by: Himanshu Madhani <hmadhani@marvell.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Acked-by: Hannes Reinecke <hare@suse.com>
+---
+ drivers/scsi/qla2xxx/qla_bsg.c | 3 +-
+ drivers/scsi/qla2xxx/qla_def.h | 9 +-
+ drivers/scsi/qla2xxx/qla_gbl.h | 2 +-
+ drivers/scsi/qla2xxx/qla_init.c | 408 +++++++++++++++++++++++-----------------
+ drivers/scsi/qla2xxx/qla_os.c | 14 +-
+ drivers/scsi/qla2xxx/qla_sup.c | 2 +
+ drivers/scsi/qla2xxx/qla_tmpl.c | 89 +++++----
+ 7 files changed, 304 insertions(+), 223 deletions(-)
+
+diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
+index 9547d9680bb2..4c294bcd100a 100644
+--- a/drivers/scsi/qla2xxx/qla_bsg.c
++++ b/drivers/scsi/qla2xxx/qla_bsg.c
+@@ -84,8 +84,7 @@ qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
+ return 0;
+ }
+
+- if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
+- bcode[3] != 'S') {
++ if (memcmp(bcode, "HQOS", 4)) {
+ /* Invalid FCP priority data header*/
+ ql_dbg(ql_dbg_user, vha, 0x7052,
+ "Invalid FCP Priority data header. bcode=0x%x.\n",
+diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
+index 533e498c5346..cf2f597fa7f4 100644
+--- a/drivers/scsi/qla2xxx/qla_def.h
++++ b/drivers/scsi/qla2xxx/qla_def.h
+@@ -4030,9 +4030,11 @@ struct qla_hw_data {
+ uint8_t pep_version[3];
+
+ /* Firmware dump template */
+- void *fw_dump_template;
+- uint32_t fw_dump_template_len;
+- /* Firmware dump information. */
++ struct fwdt {
++ void *template;
++ ulong length;
++ ulong dump_size;
++ } fwdt[2];
+ struct qla2xxx_fw_dump *fw_dump;
+ uint32_t fw_dump_len;
+ bool fw_dumped;
+@@ -4075,7 +4077,6 @@ struct qla_hw_data {
+ uint16_t product_id[4];
+
+ uint8_t model_number[16+1];
+-#define BINZERO "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"
+ char model_desc[80];
+ uint8_t adapter_id[16+1];
+
+diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
+index e300a701296a..a222997141d3 100644
+--- a/drivers/scsi/qla2xxx/qla_gbl.h
++++ b/drivers/scsi/qla2xxx/qla_gbl.h
+@@ -611,7 +611,7 @@ extern void qla82xx_fw_dump(scsi_qla_host_t *, int);
+ extern void qla8044_fw_dump(scsi_qla_host_t *, int);
+
+ extern void qla27xx_fwdump(scsi_qla_host_t *, int);
+-extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *);
++extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *, void *);
+ extern int qla27xx_fwdt_template_valid(void *);
+ extern ulong qla27xx_fwdt_template_size(void *);
+
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 17509ab553f0..24fc0a51053a 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -3089,12 +3089,15 @@ qla2x00_alloc_offload_mem(scsi_qla_host_t *vha)
+ void
+ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+ {
++ int rval;
+ uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size,
+ eft_size, fce_size, mq_size;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
+ struct rsp_que *rsp = ha->rsp_q_map[0];
+ struct qla2xxx_fw_dump *fw_dump;
++ dma_addr_t tc_dma;
++ void *tc;
+
+ dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
+ req_q_size = rsp_q_size = 0;
+@@ -3139,20 +3142,51 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
+
+ fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
+ try_eft:
++ if (ha->eft)
++ dma_free_coherent(&ha->pdev->dev,
++ EFT_SIZE, ha->eft, ha->eft_dma);
++
++ /* Allocate memory for Extended Trace Buffer. */
++ tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
++ GFP_KERNEL);
++ if (!tc) {
++ ql_log(ql_log_warn, vha, 0x00c1,
++ "Unable to allocate (%d KB) for EFT.\n",
++ EFT_SIZE / 1024);
++ goto allocate;
++ }
++
++ rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS);
++ if (rval) {
++ ql_log(ql_log_warn, vha, 0x00c2,
++ "Unable to initialize EFT (%d).\n", rval);
++ dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc,
++ tc_dma);
++ }
+ ql_dbg(ql_dbg_init, vha, 0x00c3,
+ "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
+ eft_size = EFT_SIZE;
+ }
+
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+- if (!ha->fw_dump_template) {
+- ql_log(ql_log_warn, vha, 0x00ba,
+- "Failed missing fwdump template\n");
+- return;
++ struct fwdt *fwdt = ha->fwdt;
++ uint j;
++
++ for (j = 0; j < 2; j++, fwdt++) {
++ if (!fwdt->template) {
++ ql_log(ql_log_warn, vha, 0x00ba,
++ "-> fwdt%u no template\n", j);
++ continue;
++ }
++ ql_dbg(ql_dbg_init, vha, 0x00fa,
++ "-> fwdt%u calculating fwdump size...\n", j);
++ fwdt->dump_size = qla27xx_fwdt_calculate_dump_size(
++ vha, fwdt->template);
++ ql_dbg(ql_dbg_init, vha, 0x00fa,
++ "-> fwdt%u calculated fwdump size = %#lx bytes\n",
++ j, fwdt->dump_size);
++ dump_size += fwdt->dump_size;
+ }
+- dump_size = qla27xx_fwdt_calculate_dump_size(vha);
+- ql_dbg(ql_dbg_init, vha, 0x00fa,
+- "-> allocating fwdump (%x bytes)...\n", dump_size);
+ goto allocate;
+ }
+
+@@ -4270,11 +4304,14 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
+ {
+ char *st, *en;
+ uint16_t index;
++ uint64_t zero[2] = { 0 };
+ struct qla_hw_data *ha = vha->hw;
+ int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
+ !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
+
+- if (memcmp(model, BINZERO, len) != 0) {
++ if (len > sizeof(zero))
++ len = sizeof(zero);
++ if (memcmp(model, &zero, len) != 0) {
+ strncpy(ha->model_number, model, len);
+ st = en = ha->model_number;
+ en += len - 1;
+@@ -4378,8 +4415,8 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
+ nv, ha->nvram_size);
+
+ /* Bad NVRAM data, set defaults parameters. */
+- if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' ||
+- nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
++ if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
++ nv->nvram_version < 1) {
+ /* Reset NVRAM data. */
+ ql_log(ql_log_warn, vha, 0x0064,
+ "Inconsistent NVRAM "
+@@ -6986,9 +7023,8 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
+ nv, ha->nvram_size);
+
+ /* Bad NVRAM data, set defaults parameters. */
+- if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P'
+- || nv->id[3] != ' ' ||
+- nv->nvram_version < cpu_to_le16(ICB_VERSION)) {
++ if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) ||
++ le16_to_cpu(nv->nvram_version) < ICB_VERSION) {
+ /* Reset NVRAM data. */
+ ql_log(ql_log_warn, vha, 0x006b,
+ "Inconsistent NVRAM detected: checksum=0x%x id=%c "
+@@ -7304,14 +7340,16 @@ static int
+ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+ uint32_t faddr)
+ {
+- int rval = QLA_SUCCESS;
+- int segments, fragment;
+- uint32_t *dcode, dlen;
+- uint32_t risc_addr;
+- uint32_t risc_size;
+- uint32_t i;
++ int rval;
++ uint templates, segments, fragment;
++ ulong i;
++ uint j;
++ ulong dlen;
++ uint32_t *dcode;
++ uint32_t risc_addr, risc_size, risc_attr = 0;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = ha->req_q_map[0];
++ struct fwdt *fwdt = ha->fwdt;
+
+ ql_dbg(ql_dbg_init, vha, 0x008b,
+ "FW: Loading firmware from flash (%x).\n", faddr);
+@@ -7329,34 +7367,36 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+ return QLA_FUNCTION_FAILED;
+ }
+
+- while (segments && rval == QLA_SUCCESS) {
+- /* Read segment's load information. */
+- qla24xx_read_flash_data(vha, dcode, faddr, 4);
+-
++ dcode = (void *)req->ring;
++ *srisc_addr = 0;
++ segments = FA_RISC_CODE_SEGMENTS;
++ for (j = 0; j < segments; j++) {
++ ql_dbg(ql_dbg_init, vha, 0x008d,
++ "-> Loading segment %u...\n", j);
++ qla24xx_read_flash_data(vha, dcode, faddr, 10);
+ risc_addr = be32_to_cpu(dcode[2]);
+- *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr;
+ risc_size = be32_to_cpu(dcode[3]);
++ if (!*srisc_addr) {
++ *srisc_addr = risc_addr;
++ risc_attr = be32_to_cpu(dcode[9]);
++ }
+
+- fragment = 0;
+- while (risc_size > 0 && rval == QLA_SUCCESS) {
+- dlen = (uint32_t)(ha->fw_transfer_size >> 2);
++ dlen = ha->fw_transfer_size >> 2;
++ for (fragment = 0; risc_size; fragment++) {
+ if (dlen > risc_size)
+ dlen = risc_size;
+
+ ql_dbg(ql_dbg_init, vha, 0x008e,
+- "Loading risc segment@ risc addr %x "
+- "number of dwords 0x%x offset 0x%x.\n",
+- risc_addr, dlen, faddr);
+-
++ "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n",
++ fragment, risc_addr, faddr, dlen);
+ qla24xx_read_flash_data(vha, dcode, faddr, dlen);
+ for (i = 0; i < dlen; i++)
+ dcode[i] = swab32(dcode[i]);
+
+- rval = qla2x00_load_ram(vha, req->dma, risc_addr,
+- dlen);
++ rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen);
+ if (rval) {
+ ql_log(ql_log_fatal, vha, 0x008f,
+- "Failed to load segment %d of firmware.\n",
++ "-> Failed load firmware fragment %u.\n",
+ fragment);
+ return QLA_FUNCTION_FAILED;
+ }
+@@ -7364,72 +7404,83 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+ faddr += dlen;
+ risc_addr += dlen;
+ risc_size -= dlen;
+- fragment++;
+ }
+-
+- /* Next segment. */
+- segments--;
+ }
+
+ if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+- return rval;
++ return QLA_SUCCESS;
+
+- if (ha->fw_dump_template)
+- vfree(ha->fw_dump_template);
+- ha->fw_dump_template = NULL;
+- ha->fw_dump_template_len = 0;
+-
+- ql_dbg(ql_dbg_init, vha, 0x0161,
+- "Loading fwdump template from %x\n", faddr);
+- qla24xx_read_flash_data(vha, dcode, faddr, 7);
+- risc_size = be32_to_cpu(dcode[2]);
+- ql_dbg(ql_dbg_init, vha, 0x0162,
+- "-> array size %x dwords\n", risc_size);
+- if (risc_size == 0 || risc_size == ~0)
+- goto failed;
++ templates = (risc_attr & BIT_9) ? 2 : 1;
++ ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates);
++ for (j = 0; j < templates; j++, fwdt++) {
++ if (fwdt->template)
++ vfree(fwdt->template);
++ fwdt->template = NULL;
++ fwdt->length = 0;
++
++ qla24xx_read_flash_data(vha, dcode, faddr, 7);
++ risc_size = be32_to_cpu(dcode[2]);
++ ql_dbg(ql_dbg_init, vha, 0x0161,
++ "-> fwdt%u template array at %#x (%#x dwords)\n",
++ j, faddr, risc_size);
++ if (!risc_size || !~risc_size) {
++ ql_dbg(ql_dbg_init, vha, 0x0162,
++ "-> fwdt%u failed t