Home Home > GIT Browse > openSUSE-15.1
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPetr Tesarik <ptesarik@suse.cz>2019-06-07 14:47:56 +0200
committerPetr Tesarik <ptesarik@suse.cz>2019-06-07 14:47:56 +0200
commit94a6e53ab4f10f2ade8edf8033aa0f40d1906af8 (patch)
tree8177a6a18a7f3f2493c484ba50449d91e91c404b
parent1317a80ac8775ea02085999e70f1b7b02633f74c (diff)
parente71809e152f436fd11aa707d6fb253f5f9f42963 (diff)
Merge branch 'users/tbogendoerfer/SLE15-SP1/for-next' into SLE15-SP1
Pull Hi1620 RDMA kernel enablement from Thomas Bogendoerfer
-rw-r--r--kabi/severities1
-rw-r--r--patches.drivers/RDMA-hns-Add-SCC-context-allocation-support-for-hip0.patch447
-rw-r--r--patches.drivers/RDMA-hns-Add-SCC-context-clr-support-for-hip08.patch189
-rw-r--r--patches.drivers/RDMA-hns-Add-constraint-on-the-setting-of-local-ACK-.patch42
-rw-r--r--patches.drivers/RDMA-hns-Add-the-process-of-AEQ-overflow-for-hip08.patch47
-rw-r--r--patches.drivers/RDMA-hns-Add-timer-allocation-support-for-hip08.patch466
-rw-r--r--patches.drivers/RDMA-hns-Bugfix-for-SCC-hem-free.patch41
-rw-r--r--patches.drivers/RDMA-hns-Bugfix-for-mapping-user-db.patch41
-rw-r--r--patches.drivers/RDMA-hns-Bugfix-for-posting-multiple-srq-work-reques.patch142
-rw-r--r--patches.drivers/RDMA-hns-Bugfix-for-sending-with-invalidate.patch29
-rw-r--r--patches.drivers/RDMA-hns-Bugfix-for-set-hem-of-SCC.patch60
-rw-r--r--patches.drivers/RDMA-hns-Bugfix-for-the-scene-without-receiver-queue.patch29
-rw-r--r--patches.drivers/RDMA-hns-Configure-capacity-of-hns-device.patch38
-rw-r--r--patches.drivers/RDMA-hns-Delete-unused-variable-in-hns_roce_v2_modif.patch34
-rw-r--r--patches.drivers/RDMA-hns-Delete-useful-prints-for-aeq-subtype-event.patch91
-rw-r--r--patches.drivers/RDMA-hns-Fix-bad-endianess-of-port_pd-variable.patch36
-rw-r--r--patches.drivers/RDMA-hns-Fix-the-Oops-during-rmmod-or-insmod-ko-when.patch307
-rw-r--r--patches.drivers/RDMA-hns-Fix-the-bug-with-updating-rq-head-pointer-w.patch48
-rw-r--r--patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-door.patch168
-rw-r--r--patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-mail.patch285
-rw-r--r--patches.drivers/RDMA-hns-Fix-the-state-of-rereg-mr.patch29
-rw-r--r--patches.drivers/RDMA-hns-Hide-error-print-information-with-roce-vf-d.patch54
-rw-r--r--patches.drivers/RDMA-hns-Limit-minimum-ROCE-CQ-depth-to-64.patch27
-rw-r--r--patches.drivers/RDMA-hns-Limit-scope-of-hns_roce_cmq_send.patch34
-rw-r--r--patches.drivers/RDMA-hns-Make-some-function-static.patch60
-rw-r--r--patches.drivers/RDMA-hns-Modify-qp-cq-pd-specification-according-to-.patch48
-rw-r--r--patches.drivers/RDMA-hns-Modify-the-pbl-ba-page-size-for-hip08.patch28
-rw-r--r--patches.drivers/RDMA-hns-Move-spin_lock_irqsave-to-the-correct-place.patch92
-rw-r--r--patches.drivers/RDMA-hns-Only-assgin-some-fields-if-the-relatived-at.patch248
-rw-r--r--patches.drivers/RDMA-hns-Only-assign-the-fields-of-the-rq-psn-if-IB_.patch68
-rw-r--r--patches.drivers/RDMA-hns-Only-assign-the-relatived-fields-of-psn-if-.patch133
-rw-r--r--patches.drivers/RDMA-hns-RDMA-hns-Assign-rq-head-pointer-when-enable.patch62
-rw-r--r--patches.drivers/RDMA-hns-Remove-jiffies-operation-in-disable-interru.patch124
-rw-r--r--patches.drivers/RDMA-hns-Remove-set-but-not-used-variable-rst.patch41
-rw-r--r--patches.drivers/RDMA-hns-Set-allocated-memory-to-zero-for-wrid.patch33
-rw-r--r--patches.drivers/RDMA-hns-Support-to-create-1M-srq-queue.patch29
-rw-r--r--patches.drivers/RDMA-hns-Update-CQE-specifications.patch29
-rw-r--r--patches.drivers/RDMA-hns-Update-the-range-of-raq_psn-field-of-qp-con.patch29
-rw-r--r--patches.drivers/RDMA-qedr-Fix-incorrect-device-rate.patch89
-rw-r--r--patches.drivers/net-hns3-dump-more-information-when-tx-timeout-happe.patch166
-rw-r--r--patches.drivers/net-hns3-fix-for-HNS3_RXD_GRO_SIZE_M-macro.patch32
-rw-r--r--series.conf42
42 files changed, 4038 insertions, 0 deletions
diff --git a/kabi/severities b/kabi/severities
index 2a51aa55e1..ed91247c4c 100644
--- a/kabi/severities
+++ b/kabi/severities
@@ -45,3 +45,4 @@ drivers/infiniband/hw/qedr/* PASS
drivers/net/ethernet/hisilicon/hns3/* PASS
drivers/net/ethernet/hisilicon/hns3/hns3pf/* PASS
drivers/net/ethernet/hisilicon/hns3/hns3vf/* PASS
+drivers/infiniband/hw/hns/* PASS
diff --git a/patches.drivers/RDMA-hns-Add-SCC-context-allocation-support-for-hip0.patch b/patches.drivers/RDMA-hns-Add-SCC-context-allocation-support-for-hip0.patch
new file mode 100644
index 0000000000..182a71dcf2
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Add-SCC-context-allocation-support-for-hip0.patch
@@ -0,0 +1,447 @@
+From: Yangyang Li <liyangyang20@huawei.com>
+Date: Tue, 18 Dec 2018 21:21:53 +0800
+Subject: RDMA/hns: Add SCC context allocation support for hip08
+Patch-mainline: v5.1-rc1
+Git-commit: 6a157f7d1b14eb88d89fbd396cfea15ac4bded2d
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+This patch adds SCC context allocation and initialization support for
+DCQCN in kernel space driver.
+
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_cmd.h | 4 ++
+ drivers/infiniband/hw/hns/hns_roce_device.h | 6 +++
+ drivers/infiniband/hw/hns/hns_roce_hem.c | 26 ++++++++++++++-
+ drivers/infiniband/hw/hns/hns_roce_hem.h | 1
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 46 +++++++++++++++++++++++++++-
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 33 ++++++++++++++++++--
+ drivers/infiniband/hw/hns/hns_roce_main.c | 18 ++++++++++
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 20 +++++++++++-
+ 8 files changed, 146 insertions(+), 8 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
++++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
+@@ -98,6 +98,10 @@ enum {
+ HNS_ROCE_CMD_MODIFY_CEQC = 0x91,
+ HNS_ROCE_CMD_QUERY_CEQC = 0x92,
+ HNS_ROCE_CMD_DESTROY_CEQC = 0x93,
++
++ /* SCC CTX BT commands */
++ HNS_ROCE_CMD_READ_SCCC_BT0 = 0xa4,
++ HNS_ROCE_CMD_WRITE_SCCC_BT0 = 0xa5,
+ };
+
+ enum {
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -482,6 +482,7 @@ struct hns_roce_qp_table {
+ struct hns_roce_hem_table qp_table;
+ struct hns_roce_hem_table irrl_table;
+ struct hns_roce_hem_table trrl_table;
++ struct hns_roce_hem_table sccc_table;
+ };
+
+ struct hns_roce_cq_table {
+@@ -769,6 +770,7 @@ struct hns_roce_caps {
+ int irrl_entry_sz;
+ int trrl_entry_sz;
+ int cqc_entry_sz;
++ int sccc_entry_sz;
+ int srqc_entry_sz;
+ int idx_entry_sz;
+ u32 pbl_ba_pg_sz;
+@@ -781,6 +783,7 @@ struct hns_roce_caps {
+ u32 srqc_bt_num;
+ u32 cqc_bt_num;
+ u32 mpt_bt_num;
++ u32 sccc_bt_num;
+ u32 qpc_ba_pg_sz;
+ u32 qpc_buf_pg_sz;
+ u32 qpc_hop_num;
+@@ -796,6 +799,9 @@ struct hns_roce_caps {
+ u32 mtt_ba_pg_sz;
+ u32 mtt_buf_pg_sz;
+ u32 mtt_hop_num;
++ u32 sccc_ba_pg_sz;
++ u32 sccc_buf_pg_sz;
++ u32 sccc_hop_num;
+ u32 cqe_ba_pg_sz;
+ u32 cqe_buf_pg_sz;
+ u32 cqe_hop_num;
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -45,6 +45,7 @@ bool hns_roce_check_whether_mhop(struct
+ (hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) ||
+ (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
+ (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
++ (hr_dev->caps.sccc_hop_num && type == HEM_TYPE_SCCC) ||
+ (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
+ (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) ||
+ (hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) ||
+@@ -125,6 +126,14 @@ int hns_roce_calc_hem_mhop(struct hns_ro
+ mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
+ mhop->hop_num = hr_dev->caps.cqc_hop_num;
+ break;
++ case HEM_TYPE_SCCC:
++ mhop->buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
++ + PAGE_SHIFT);
++ mhop->bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
++ + PAGE_SHIFT);
++ mhop->ba_l0_num = hr_dev->caps.sccc_bt_num;
++ mhop->hop_num = hr_dev->caps.sccc_hop_num;
++ break;
+ case HEM_TYPE_SRQC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ + PAGE_SHIFT);
+@@ -175,7 +184,7 @@ int hns_roce_calc_hem_mhop(struct hns_ro
+ return 0;
+
+ /*
+- * QPC/MTPT/CQC/SRQC alloc hem for buffer pages.
++ * QPC/MTPT/CQC/SRQC/SCCC alloc hem for buffer pages.
+ * MTT/CQE alloc hem for bt pages.
+ */
+ bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
+@@ -486,7 +495,7 @@ static int hns_roce_table_mhop_get(struc
+ }
+
+ /*
+- * alloc buffer space chunk for QPC/MTPT/CQC/SRQC.
++ * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
+ * alloc bt space chunk for MTT/CQE.
+ */
+ size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size;
+@@ -658,7 +667,7 @@ static void hns_roce_table_mhop_put(stru
+ }
+
+ /*
+- * free buffer space chunk for QPC/MTPT/CQC/SRQC.
++ * free buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
+ * free bt space chunk for MTT/CQE.
+ */
+ hns_roce_free_hem(hr_dev, table->hem[hem_idx]);
+@@ -904,6 +913,14 @@ int hns_roce_init_hem_table(struct hns_r
+ num_bt_l0 = hr_dev->caps.cqc_bt_num;
+ hop_num = hr_dev->caps.cqc_hop_num;
+ break;
++ case HEM_TYPE_SCCC:
++ buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
++ + PAGE_SHIFT);
++ bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
++ + PAGE_SHIFT);
++ num_bt_l0 = hr_dev->caps.sccc_bt_num;
++ hop_num = hr_dev->caps.sccc_hop_num;
++ break;
+ case HEM_TYPE_SRQC:
+ buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ + PAGE_SHIFT);
+@@ -1081,6 +1098,9 @@ void hns_roce_cleanup_hem(struct hns_roc
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->srq_table.table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
++ if (hr_dev->caps.sccc_entry_sz)
++ hns_roce_cleanup_hem_table(hr_dev,
++ &hr_dev->qp_table.sccc_table);
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qp_table.trrl_table);
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
+@@ -44,6 +44,7 @@ enum {
+ HEM_TYPE_MTPT,
+ HEM_TYPE_CQC,
+ HEM_TYPE_SRQC,
++ HEM_TYPE_SCCC,
+
+ /* UNMAP HEM */
+ HEM_TYPE_MTT,
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1078,6 +1078,9 @@ static int hns_roce_query_pf_resource(st
+ hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
+ PF_RES_DATA_3_PF_SL_NUM_M,
+ PF_RES_DATA_3_PF_SL_NUM_S);
++ hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
++ PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
++ PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
+
+ return 0;
+ }
+@@ -1193,6 +1196,14 @@ static int hns_roce_alloc_vf_resource(st
+ VF_RES_B_DATA_3_VF_SL_NUM_M,
+ VF_RES_B_DATA_3_VF_SL_NUM_S,
+ HNS_ROCE_VF_SL_NUM);
++
++ roce_set_field(req_b->vf_sccc_idx_num,
++ VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
++ VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
++ roce_set_field(req_b->vf_sccc_idx_num,
++ VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
++ VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
++ HNS_ROCE_VF_SCCC_BT_NUM);
+ }
+ }
+
+@@ -1205,6 +1216,7 @@ static int hns_roce_v2_set_bt(struct hns
+ u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
+ u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
+ u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
++ u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
+ struct hns_roce_cfg_bt_attr *req;
+ struct hns_roce_cmq_desc desc;
+
+@@ -1252,6 +1264,20 @@ static int hns_roce_v2_set_bt(struct hns
+ CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
+ mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
+
++ roce_set_field(req->vf_sccc_cfg,
++ CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
++ CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
++ hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
++ roce_set_field(req->vf_sccc_cfg,
++ CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
++ CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
++ hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
++ roce_set_field(req->vf_sccc_cfg,
++ CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
++ CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
++ sccc_hop_num ==
++ HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
++
+ return hns_roce_cmq_send(hr_dev, &desc, 1);
+ }
+
+@@ -1408,9 +1434,14 @@ static int hns_roce_v2_profile(struct hn
+ caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
+ caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
+
+- if (hr_dev->pci_dev->revision == 0x21)
++ if (hr_dev->pci_dev->revision == 0x21) {
+ caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC |
+ HNS_ROCE_CAP_FLAG_SRQ;
++ caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
++ caps->sccc_ba_pg_sz = 0;
++ caps->sccc_buf_pg_sz = 0;
++ caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
++ }
+
+ ret = hns_roce_v2_set_bt(hr_dev);
+ if (ret)
+@@ -2663,11 +2694,18 @@ static int hns_roce_v2_set_hem(struct hn
+ case HEM_TYPE_SRQC:
+ op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
+ break;
++ case HEM_TYPE_SCCC:
++ op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
++ break;
+ default:
+ dev_warn(dev, "Table %d not to be written by mailbox!\n",
+ table->type);
+ return 0;
+ }
++
++ if (table->type == HEM_TYPE_SCCC && step_idx)
++ return 0;
++
+ op += step_idx;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+@@ -2722,6 +2760,8 @@ static int hns_roce_v2_clear_hem(struct
+ case HEM_TYPE_CQC:
+ op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
+ break;
++ case HEM_TYPE_SCCC:
++ break;
+ case HEM_TYPE_SRQC:
+ op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
+ break;
+@@ -2730,6 +2770,10 @@ static int hns_roce_v2_clear_hem(struct
+ table->type);
+ return 0;
+ }
++
++ if (table->type == HEM_TYPE_SCCC)
++ return 0;
++
+ op += step_idx;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -36,6 +36,7 @@
+ #include <linux/bitops.h>
+
+ #define HNS_ROCE_VF_QPC_BT_NUM 256
++#define HNS_ROCE_VF_SCCC_BT_NUM 64
+ #define HNS_ROCE_VF_SRQC_BT_NUM 64
+ #define HNS_ROCE_VF_CQC_BT_NUM 64
+ #define HNS_ROCE_VF_MPT_BT_NUM 64
+@@ -83,6 +84,7 @@
+ #define HNS_ROCE_V2_MTPT_ENTRY_SZ 64
+ #define HNS_ROCE_V2_MTT_ENTRY_SZ 64
+ #define HNS_ROCE_V2_CQE_ENTRY_SIZE 32
++#define HNS_ROCE_V2_SCCC_ENTRY_SZ 32
+ #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
+ #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
+ #define HNS_ROCE_INVALID_LKEY 0x100
+@@ -91,6 +93,7 @@
+ #define HNS_ROCE_V2_RSV_QPS 8
+
+ #define HNS_ROCE_CONTEXT_HOP_NUM 1
++#define HNS_ROCE_SCCC_HOP_NUM 1
+ #define HNS_ROCE_MTT_HOP_NUM 1
+ #define HNS_ROCE_CQE_HOP_NUM 1
+ #define HNS_ROCE_SRQWQE_HOP_NUM 1
+@@ -1300,7 +1303,8 @@ struct hns_roce_pf_res_b {
+ __le32 smac_idx_num;
+ __le32 sgid_idx_num;
+ __le32 qid_idx_sl_num;
+- __le32 rsv[2];
++ __le32 sccc_bt_idx_num;
++ __le32 rsv;
+ };
+
+ #define PF_RES_DATA_1_PF_SMAC_IDX_S 0
+@@ -1321,6 +1325,12 @@ struct hns_roce_pf_res_b {
+ #define PF_RES_DATA_3_PF_SL_NUM_S 16
+ #define PF_RES_DATA_3_PF_SL_NUM_M GENMASK(26, 16)
+
++#define PF_RES_DATA_4_PF_SCCC_BT_IDX_S 0
++#define PF_RES_DATA_4_PF_SCCC_BT_IDX_M GENMASK(8, 0)
++
++#define PF_RES_DATA_4_PF_SCCC_BT_NUM_S 9
++#define PF_RES_DATA_4_PF_SCCC_BT_NUM_M GENMASK(17, 9)
++
+ struct hns_roce_vf_res_a {
+ __le32 vf_id;
+ __le32 vf_qpc_bt_idx_num;
+@@ -1365,7 +1375,8 @@ struct hns_roce_vf_res_b {
+ __le32 vf_smac_idx_num;
+ __le32 vf_sgid_idx_num;
+ __le32 vf_qid_idx_sl_num;
+- __le32 rsv[2];
++ __le32 vf_sccc_idx_num;
++ __le32 rsv1;
+ };
+
+ #define VF_RES_B_DATA_0_VF_ID_S 0
+@@ -1389,6 +1400,12 @@ struct hns_roce_vf_res_b {
+ #define VF_RES_B_DATA_3_VF_SL_NUM_S 16
+ #define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16)
+
++#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S 0
++#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M GENMASK(8, 0)
++
++#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S 9
++#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M GENMASK(17, 9)
++
+ struct hns_roce_vf_switch {
+ __le32 rocee_sel;
+ __le32 fun_id;
+@@ -1424,7 +1441,8 @@ struct hns_roce_cfg_bt_attr {
+ __le32 vf_srqc_cfg;
+ __le32 vf_cqc_cfg;
+ __le32 vf_mpt_cfg;
+- __le32 rsv[2];
++ __le32 vf_sccc_cfg;
++ __le32 rsv;
+ };
+
+ #define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S 0
+@@ -1463,6 +1481,15 @@ struct hns_roce_cfg_bt_attr {
+ #define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S 8
+ #define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8)
+
++#define CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S 0
++#define CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M GENMASK(3, 0)
++
++#define CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S 4
++#define CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M GENMASK(7, 4)
++
++#define CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S 8
++#define CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M GENMASK(9, 8)
++
+ struct hns_roce_cfg_sgid_tb {
+ __le32 table_idx_rsv;
+ __le32 vf_sgid_l;
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -704,8 +704,26 @@ static int hns_roce_init_hem(struct hns_
+ }
+ }
+
++ if (hr_dev->caps.sccc_entry_sz) {
++ ret = hns_roce_init_hem_table(hr_dev,
++ &hr_dev->qp_table.sccc_table,
++ HEM_TYPE_SCCC,
++ hr_dev->caps.sccc_entry_sz,
++ hr_dev->caps.num_qps, 1);
++ if (ret) {
++ dev_err(dev,
++ "Failed to init SCC context memory, aborting.\n");
++ goto err_unmap_idx;
++ }
++ }
++
+ return 0;
+
++err_unmap_idx:
++ if (hr_dev->caps.num_idx_segs)
++ hns_roce_cleanup_hem_table(hr_dev,
++ &hr_dev->mr_table.mtt_idx_table);
++
+ err_unmap_srqwqe:
+ if (hr_dev->caps.num_srqwqe_segs)
+ hns_roce_cleanup_hem_table(hr_dev,
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -209,13 +209,23 @@ static int hns_roce_qp_alloc(struct hns_
+ }
+ }
+
++ if (hr_dev->caps.sccc_entry_sz) {
++ /* Alloc memory for SCC CTX */
++ ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table,
++ hr_qp->qpn);
++ if (ret) {
++ dev_err(dev, "SCC CTX table get failed\n");
++ goto err_put_trrl;
++ }
++ }
++
+ spin_lock_irq(&qp_table->lock);
+ ret = radix_tree_insert(&hr_dev->qp_table_tree,
+ hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp);
+ spin_unlock_irq(&qp_table->lock);
+ if (ret) {
+ dev_err(dev, "QPC radix_tree_insert failed\n");
+- goto err_put_trrl;
++ goto err_put_sccc;
+ }
+
+ atomic_set(&hr_qp->refcount, 1);
+@@ -223,6 +233,11 @@ static int hns_roce_qp_alloc(struct hns_
+
+ return 0;
+
++err_put_sccc:
++ if (hr_dev->caps.sccc_entry_sz)
++ hns_roce_table_put(hr_dev, &qp_table->sccc_table,
++ hr_qp->qpn);
++
+ err_put_trrl:
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
+@@ -258,6 +273,9 @@ void hns_roce_qp_free(struct hns_roce_de
+ wait_for_completion(&hr_qp->free);
+
+ if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
++ if (hr_dev->caps.sccc_entry_sz)
++ hns_roce_table_put(hr_dev, &qp_table->sccc_table,
++ hr_qp->qpn);
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_table_put(hr_dev, &qp_table->trrl_table,
+ hr_qp->qpn);
diff --git a/patches.drivers/RDMA-hns-Add-SCC-context-clr-support-for-hip08.patch b/patches.drivers/RDMA-hns-Add-SCC-context-clr-support-for-hip08.patch
new file mode 100644
index 0000000000..23cd338997
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Add-SCC-context-clr-support-for-hip08.patch
@@ -0,0 +1,189 @@
+From: Yangyang Li <liyangyang20@huawei.com>
+Date: Tue, 18 Dec 2018 21:21:54 +0800
+Subject: RDMA/hns: Add SCC context clr support for hip08
+Patch-mainline: v5.1-rc1
+Git-commit: aa84fa18741b83daf0f8f160c46ae92f4d6f1343
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+This patch adds SCC context clear support for DCQCN in kernel space
+driver.
+
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_device.h | 4 +
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 59 +++++++++++++++++++++++++++-
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 15 +++++++
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 8 +++
+ 4 files changed, 85 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -202,6 +202,7 @@ enum {
+ HNS_ROCE_CAP_FLAG_SRQ = BIT(5),
+ HNS_ROCE_CAP_FLAG_MW = BIT(7),
+ HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
++ HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
+ HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
+ };
+
+@@ -483,6 +484,7 @@ struct hns_roce_qp_table {
+ struct hns_roce_hem_table irrl_table;
+ struct hns_roce_hem_table trrl_table;
+ struct hns_roce_hem_table sccc_table;
++ struct mutex scc_mutex;
+ };
+
+ struct hns_roce_cq_table {
+@@ -868,6 +870,8 @@ struct hns_roce_hw {
+ int attr_mask, enum ib_qp_state cur_state,
+ enum ib_qp_state new_state);
+ int (*destroy_qp)(struct ib_qp *ibqp);
++ int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
++ struct hns_roce_qp *hr_qp);
+ int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+ int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr,
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1436,7 +1436,9 @@ static int hns_roce_v2_profile(struct hn
+
+ if (hr_dev->pci_dev->revision == 0x21) {
+ caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC |
+- HNS_ROCE_CAP_FLAG_SRQ;
++ HNS_ROCE_CAP_FLAG_SRQ |
++ HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
++
+ caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
+ caps->sccc_ba_pg_sz = 0;
+ caps->sccc_buf_pg_sz = 0;
+@@ -4277,6 +4279,60 @@ static int hns_roce_v2_destroy_qp(struct
+ return 0;
+ }
+
++static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
++ struct hns_roce_qp *hr_qp)
++{
++ struct hns_roce_sccc_clr_done *rst, *resp;
++ struct hns_roce_sccc_clr *clr;
++ struct hns_roce_cmq_desc desc;
++ int ret, i;
++
++ mutex_lock(&hr_dev->qp_table.scc_mutex);
++
++ /* set scc ctx clear done flag */
++ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
++ rst = (struct hns_roce_sccc_clr_done *)desc.data;
++ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
++ if (ret) {
++ dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret);
++ goto out;
++ }
++
++ /* clear scc context */
++ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
++ clr = (struct hns_roce_sccc_clr *)desc.data;
++ clr->qpn = cpu_to_le32(hr_qp->qpn);
++ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
++ if (ret) {
++ dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret);
++ goto out;
++ }
++
++ /* query scc context clear is done or not */
++ resp = (struct hns_roce_sccc_clr_done *)desc.data;
++ for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
++ hns_roce_cmq_setup_basic_desc(&desc,
++ HNS_ROCE_OPC_QUERY_SCCC, true);
++ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
++ if (ret) {
++ dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret);
++ goto out;
++ }
++
++ if (resp->clr_done)
++ goto out;
++
++ msleep(20);
++ }
++
++ dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n");
++ ret = -ETIMEDOUT;
++
++out:
++ mutex_unlock(&hr_dev->qp_table.scc_mutex);
++ return ret;
++}
++
+ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+ {
+ struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
+@@ -5819,6 +5875,7 @@ static const struct hns_roce_hw hns_roce
+ .modify_qp = hns_roce_v2_modify_qp,
+ .query_qp = hns_roce_v2_query_qp,
+ .destroy_qp = hns_roce_v2_destroy_qp,
++ .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
+ .modify_cq = hns_roce_v2_modify_cq,
+ .post_send = hns_roce_v2_post_send,
+ .post_recv = hns_roce_v2_post_recv,
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -123,6 +123,8 @@
+ #define HNS_ROCE_CMQ_EN_B 16
+ #define HNS_ROCE_CMQ_ENABLE BIT(HNS_ROCE_CMQ_EN_B)
+
++#define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT 5
++
+ #define check_whether_last_step(hop_num, step_idx) \
+ ((step_idx == 0 && hop_num == HNS_ROCE_HOP_NUM_0) || \
+ (step_idx == 1 && hop_num == 1) || \
+@@ -232,6 +234,9 @@ enum hns_roce_opcode_type {
+ HNS_ROCE_OPC_POST_MB = 0x8504,
+ HNS_ROCE_OPC_QUERY_MB_ST = 0x8505,
+ HNS_ROCE_OPC_CFG_BT_ATTR = 0x8506,
++ HNS_ROCE_OPC_CLR_SCCC = 0x8509,
++ HNS_ROCE_OPC_QUERY_SCCC = 0x850a,
++ HNS_ROCE_OPC_RESET_SCCC = 0x850b,
+ HNS_SWITCH_PARAMETER_CFG = 0x1033,
+ };
+
+@@ -1757,4 +1762,14 @@ struct hns_roce_wqe_atomic_seg {
+ __le64 cmp_data;
+ };
+
++struct hns_roce_sccc_clr {
++ __le32 qpn;
++ __le32 rsv[5];
++};
++
++struct hns_roce_sccc_clr_done {
++ __le32 clr_done;
++ __le32 rsv[5];
++};
++
+ #endif
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -812,6 +812,13 @@ static int hns_roce_create_qp_common(str
+ if (ret)
+ goto err_qp;
+ }
++
++ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) {
++ ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp);
++ if (ret)
++ goto err_qp;
++ }
++
+ hr_qp->event = hns_roce_ib_qp_event;
+
+ return 0;
+@@ -1153,6 +1160,7 @@ int hns_roce_init_qp_table(struct hns_ro
+ int reserved_from_bot;
+ int ret;
+
++ mutex_init(&qp_table->scc_mutex);
+ spin_lock_init(&qp_table->lock);
+ INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
+
diff --git a/patches.drivers/RDMA-hns-Add-constraint-on-the-setting-of-local-ACK-.patch b/patches.drivers/RDMA-hns-Add-constraint-on-the-setting-of-local-ACK-.patch
new file mode 100644
index 0000000000..b2f6bc67ef
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Add-constraint-on-the-setting-of-local-ACK-.patch
@@ -0,0 +1,42 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Wed, 12 Dec 2018 17:49:08 +0800
+Subject: RDMA/hns: Add constraint on the setting of local ACK timeout
+Patch-mainline: v5.1-rc1
+Git-commit: 44754b95dd35ee07c462b5425ae9c4cde8c7e7c8
+References: bsc#1104427 FATE#326416 bsc#1137233
+
+According to IB protocol, local ACK timeout shall be a 5 bit
+value. Currently, hip08 could not support the possible max value 31. Fail
+the request in this case.
+
+Signed-off-by: Yixian Liu <liuyixian@huawei.com>
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3686,10 +3686,16 @@ static int modify_qp_rtr_to_rts(struct i
+ V2_QPC_BYTE_212_LSN_S, 0);
+
+ if (attr_mask & IB_QP_TIMEOUT) {
+- roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
+- V2_QPC_BYTE_28_AT_S, attr->timeout);
+- roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_AT_M,
+- V2_QPC_BYTE_28_AT_S, 0);
++ if (attr->timeout < 31) {
++ roce_set_field(context->byte_28_at_fl,
++ V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
++ attr->timeout);
++ roce_set_field(qpc_mask->byte_28_at_fl,
++ V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
++ 0);
++ } else {
++ dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
++ }
+ }
+
+ roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
diff --git a/patches.drivers/RDMA-hns-Add-the-process-of-AEQ-overflow-for-hip08.patch b/patches.drivers/RDMA-hns-Add-the-process-of-AEQ-overflow-for-hip08.patch
new file mode 100644
index 0000000000..0f7e6c080e
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Add-the-process-of-AEQ-overflow-for-hip08.patch
@@ -0,0 +1,47 @@
+From: Xiaofei Tan <tanxiaofei@huawei.com>
+Date: Sat, 19 Jan 2019 14:23:29 +0800
+Subject: RDMA/hns: Add the process of AEQ overflow for hip08
+Patch-mainline: v5.1-rc1
+Git-commit: 2b9acb9a97fe9b4101ca020643760c4a090b4cb4
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+AEQ overflow will be reported by hardware when too many asynchronous
+events occurred but not be handled in time. Normally, AEQ overflow error
+is not easy to occur. Once happened, we have to do physical function reset
+to recover. PF reset is implemented in two steps. Firstly, set reset
+level with ae_dev->ops->set_default_reset_request. Secondly, run reset
+with ae_dev->ops->reset_event.
+
+Signed-off-by: Xiaofei Tan <tanxiaofei@huawei.com>
+Signed-off-by: Yixian Liu <liuyixian@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -4702,11 +4702,22 @@ static irqreturn_t hns_roce_v2_msix_inte
+ int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
+
+ if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
++ struct pci_dev *pdev = hr_dev->pci_dev;
++ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
++ const struct hnae3_ae_ops *ops = ae_dev->ops;
++
+ dev_err(dev, "AEQ overflow!\n");
+
+ roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
+
++ /* Set reset level for reset_event() */
++ if (ops->set_default_reset_request)
++ ops->set_default_reset_request(ae_dev,
++ HNAE3_FUNC_RESET);
++ if (ops->reset_event)
++ ops->reset_event(pdev, NULL);
++
+ roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1);
+ roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
+
diff --git a/patches.drivers/RDMA-hns-Add-timer-allocation-support-for-hip08.patch b/patches.drivers/RDMA-hns-Add-timer-allocation-support-for-hip08.patch
new file mode 100644
index 0000000000..67166ad771
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Add-timer-allocation-support-for-hip08.patch
@@ -0,0 +1,466 @@
+From: Yangyang Li <liyangyang20@huawei.com>
+Date: Tue, 18 Dec 2018 21:21:55 +0800
+Subject: RDMA/hns: Add timer allocation support for hip08
+Patch-mainline: v5.1-rc1
+Git-commit: 0e40dc2f70cda099e13392a26bd37aed24bcd25d
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+This patch adds qpc timer and cqc timer allocation support for hardware
+timeout retransmission in kernel space driver.
+
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_cmd.h | 8 ++
+ drivers/infiniband/hw/hns/hns_roce_device.h | 14 +++
+ drivers/infiniband/hw/hns/hns_roce_hem.c | 42 +++++++++++
+ drivers/infiniband/hw/hns/hns_roce_hem.h | 2
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 103 +++++++++++++++++++++++++++-
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 24 ++++++
+ drivers/infiniband/hw/hns/hns_roce_main.c | 36 +++++++++
+ 7 files changed, 227 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
++++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
+@@ -75,6 +75,10 @@ enum {
+ HNS_ROCE_CMD_DESTROY_MPT_BT1 = 0x29,
+ HNS_ROCE_CMD_DESTROY_MPT_BT2 = 0x2a,
+
++ /* CQC TIMER commands */
++ HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 = 0x23,
++ HNS_ROCE_CMD_READ_CQC_TIMER_BT0 = 0x27,
++
+ /* MPT commands */
+ HNS_ROCE_CMD_QUERY_MPT = 0x62,
+
+@@ -89,6 +93,10 @@ enum {
+ HNS_ROCE_CMD_DESTROY_SRQC_BT1 = 0x39,
+ HNS_ROCE_CMD_DESTROY_SRQC_BT2 = 0x3a,
+
++ /* QPC TIMER commands */
++ HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 = 0x33,
++ HNS_ROCE_CMD_READ_QPC_TIMER_BT0 = 0x37,
++
+ /* EQC commands */
+ HNS_ROCE_CMD_CREATE_AEQC = 0x80,
+ HNS_ROCE_CMD_MODIFY_AEQC = 0x81,
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -733,6 +733,8 @@ struct hns_roce_caps {
+ u32 max_extend_sg;
+ int num_qps; /* 256k */
+ int reserved_qps;
++ int num_qpc_timer;
++ int num_cqc_timer;
+ u32 max_srq_sg;
+ int num_srqs;
+ u32 max_wqes; /* 16k */
+@@ -773,6 +775,8 @@ struct hns_roce_caps {
+ int trrl_entry_sz;
+ int cqc_entry_sz;
+ int sccc_entry_sz;
++ int qpc_timer_entry_sz;
++ int cqc_timer_entry_sz;
+ int srqc_entry_sz;
+ int idx_entry_sz;
+ u32 pbl_ba_pg_sz;
+@@ -782,8 +786,10 @@ struct hns_roce_caps {
+ int ceqe_depth;
+ enum ib_mtu max_mtu;
+ u32 qpc_bt_num;
++ u32 qpc_timer_bt_num;
+ u32 srqc_bt_num;
+ u32 cqc_bt_num;
++ u32 cqc_timer_bt_num;
+ u32 mpt_bt_num;
+ u32 sccc_bt_num;
+ u32 qpc_ba_pg_sz;
+@@ -804,6 +810,12 @@ struct hns_roce_caps {
+ u32 sccc_ba_pg_sz;
+ u32 sccc_buf_pg_sz;
+ u32 sccc_hop_num;
++ u32 qpc_timer_ba_pg_sz;
++ u32 qpc_timer_buf_pg_sz;
++ u32 qpc_timer_hop_num;
++ u32 cqc_timer_ba_pg_sz;
++ u32 cqc_timer_buf_pg_sz;
++ u32 cqc_timer_hop_num;
+ u32 cqe_ba_pg_sz;
+ u32 cqe_buf_pg_sz;
+ u32 cqe_hop_num;
+@@ -931,6 +943,8 @@ struct hns_roce_dev {
+ struct hns_roce_srq_table srq_table;
+ struct hns_roce_qp_table qp_table;
+ struct hns_roce_eq_table eq_table;
++ struct hns_roce_hem_table qpc_timer_table;
++ struct hns_roce_hem_table cqc_timer_table;
+
+ int cmd_mod;
+ int loop_idc;
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -46,6 +46,8 @@ bool hns_roce_check_whether_mhop(struct
+ (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
+ (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
+ (hr_dev->caps.sccc_hop_num && type == HEM_TYPE_SCCC) ||
++ (hr_dev->caps.qpc_timer_hop_num && type == HEM_TYPE_QPC_TIMER) ||
++ (hr_dev->caps.cqc_timer_hop_num && type == HEM_TYPE_CQC_TIMER) ||
+ (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
+ (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) ||
+ (hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) ||
+@@ -134,6 +136,22 @@ int hns_roce_calc_hem_mhop(struct hns_ro
+ mhop->ba_l0_num = hr_dev->caps.sccc_bt_num;
+ mhop->hop_num = hr_dev->caps.sccc_hop_num;
+ break;
++ case HEM_TYPE_QPC_TIMER:
++ mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
++ + PAGE_SHIFT);
++ mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
++ + PAGE_SHIFT);
++ mhop->ba_l0_num = hr_dev->caps.qpc_timer_bt_num;
++ mhop->hop_num = hr_dev->caps.qpc_timer_hop_num;
++ break;
++ case HEM_TYPE_CQC_TIMER:
++ mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
++ + PAGE_SHIFT);
++ mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
++ + PAGE_SHIFT);
++ mhop->ba_l0_num = hr_dev->caps.cqc_timer_bt_num;
++ mhop->hop_num = hr_dev->caps.cqc_timer_hop_num;
++ break;
+ case HEM_TYPE_SRQC:
+ mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ + PAGE_SHIFT);
+@@ -602,6 +620,7 @@ out:
+ mutex_unlock(&table->mutex);
+ return ret;
+ }
++EXPORT_SYMBOL_GPL(hns_roce_table_get);
+
+ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+@@ -744,6 +763,7 @@ void hns_roce_table_put(struct hns_roce_
+
+ mutex_unlock(&table->mutex);
+ }
++EXPORT_SYMBOL_GPL(hns_roce_table_put);
+
+ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
+ struct hns_roce_hem_table *table,
+@@ -921,6 +941,22 @@ int hns_roce_init_hem_table(struct hns_r
+ num_bt_l0 = hr_dev->caps.sccc_bt_num;
+ hop_num = hr_dev->caps.sccc_hop_num;
+ break;
++ case HEM_TYPE_QPC_TIMER:
++ buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
++ + PAGE_SHIFT);
++ bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
++ + PAGE_SHIFT);
++ num_bt_l0 = hr_dev->caps.qpc_timer_bt_num;
++ hop_num = hr_dev->caps.qpc_timer_hop_num;
++ break;
++ case HEM_TYPE_CQC_TIMER:
++ buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
++ + PAGE_SHIFT);
++ bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
++ + PAGE_SHIFT);
++ num_bt_l0 = hr_dev->caps.cqc_timer_bt_num;
++ hop_num = hr_dev->caps.cqc_timer_hop_num;
++ break;
+ case HEM_TYPE_SRQC:
+ buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
+ + PAGE_SHIFT);
+@@ -1098,6 +1134,12 @@ void hns_roce_cleanup_hem(struct hns_roc
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->srq_table.table);
+ hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
++ if (hr_dev->caps.qpc_timer_entry_sz)
++ hns_roce_cleanup_hem_table(hr_dev,
++ &hr_dev->qpc_timer_table);
++ if (hr_dev->caps.cqc_timer_entry_sz)
++ hns_roce_cleanup_hem_table(hr_dev,
++ &hr_dev->cqc_timer_table);
+ if (hr_dev->caps.sccc_entry_sz)
+ hns_roce_cleanup_hem_table(hr_dev,
+ &hr_dev->qp_table.sccc_table);
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
+@@ -45,6 +45,8 @@ enum {
+ HEM_TYPE_CQC,
+ HEM_TYPE_SRQC,
+ HEM_TYPE_SCCC,
++ HEM_TYPE_QPC_TIMER,
++ HEM_TYPE_CQC_TIMER,
+
+ /* UNMAP HEM */
+ HEM_TYPE_MTT,
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1085,6 +1085,41 @@ static int hns_roce_query_pf_resource(st
+ return 0;
+ }
+
++static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
++{
++ struct hns_roce_pf_timer_res_a *req_a;
++ struct hns_roce_cmq_desc desc[2];
++ int ret, i;
++
++ for (i = 0; i < 2; i++) {
++ hns_roce_cmq_setup_basic_desc(&desc[i],
++ HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
++ true);
++
++ if (i == 0)
++ desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
++ else
++ desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
++ }
++
++ ret = hns_roce_cmq_send(hr_dev, desc, 2);
++ if (ret)
++ return ret;
++
++ req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data;
++
++ hr_dev->caps.qpc_timer_bt_num =
++ roce_get_field(req_a->qpc_timer_bt_idx_num,
++ PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
++ PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
++ hr_dev->caps.cqc_timer_bt_num =
++ roce_get_field(req_a->cqc_timer_bt_idx_num,
++ PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
++ PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
++
++ return 0;
++}
++
+ static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
+ int vf_id)
+ {
+@@ -1315,6 +1350,16 @@ static int hns_roce_v2_profile(struct hn
+ return ret;
+ }
+
++ if (hr_dev->pci_dev->revision == 0x21) {
++ ret = hns_roce_query_pf_timer_resource(hr_dev);
++ if (ret) {
++ dev_err(hr_dev->dev,
++ "Query pf timer resource fail, ret = %d.\n",
++ ret);
++ return ret;
++ }
++ }
++
+ ret = hns_roce_alloc_vf_resource(hr_dev);
+ if (ret) {
+ dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
+@@ -1439,6 +1484,17 @@ static int hns_roce_v2_profile(struct hn
+ HNS_ROCE_CAP_FLAG_SRQ |
+ HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
+
++ caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
++ caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
++ caps->qpc_timer_ba_pg_sz = 0;
++ caps->qpc_timer_buf_pg_sz = 0;
++ caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
++ caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
++ caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
++ caps->cqc_timer_ba_pg_sz = 0;
++ caps->cqc_timer_buf_pg_sz = 0;
++ caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
++
+ caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
+ caps->sccc_ba_pg_sz = 0;
+ caps->sccc_buf_pg_sz = 0;
+@@ -1644,7 +1700,8 @@ static void hns_roce_free_link_table(str
+ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
+ {
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+- int ret;
++ int qpc_count, cqc_count;
++ int ret, i;
+
+ /* TSQ includes SQ doorbell and ack doorbell */
+ ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
+@@ -1659,8 +1716,40 @@ static int hns_roce_v2_init(struct hns_r
+ goto err_tpq_init_failed;
+ }
+
++ /* Alloc memory for QPC Timer buffer space chunk*/
++ for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
++ qpc_count++) {
++ ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
++ qpc_count);
++ if (ret) {
++ dev_err(hr_dev->dev, "QPC Timer get failed\n");
++ goto err_qpc_timer_failed;
++ }
++ }
++
++ /* Alloc memory for CQC Timer buffer space chunk*/
++ for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
++ cqc_count++) {
++ ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
++ cqc_count);
++ if (ret) {
++ dev_err(hr_dev->dev, "CQC Timer get failed\n");
++ goto err_cqc_timer_failed;
++ }
++ }
++
+ return 0;
+
++err_cqc_timer_failed:
++ for (i = 0; i < cqc_count; i++)
++ hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
++
++err_qpc_timer_failed:
++ for (i = 0; i < qpc_count; i++)
++ hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
++
++ hns_roce_free_link_table(hr_dev, &priv->tpq);
++
+ err_tpq_init_failed:
+ hns_roce_free_link_table(hr_dev, &priv->tsq);
+
+@@ -2699,6 +2788,12 @@ static int hns_roce_v2_set_hem(struct hn
+ case HEM_TYPE_SCCC:
+ op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
+ break;
++ case HEM_TYPE_QPC_TIMER:
++ op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
++ break;
++ case HEM_TYPE_CQC_TIMER:
++ op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
++ break;
+ default:
+ dev_warn(dev, "Table %d not to be written by mailbox!\n",
+ table->type);
+@@ -2763,6 +2858,8 @@ static int hns_roce_v2_clear_hem(struct
+ op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
+ break;
+ case HEM_TYPE_SCCC:
++ case HEM_TYPE_QPC_TIMER:
++ case HEM_TYPE_CQC_TIMER:
+ break;
+ case HEM_TYPE_SRQC:
+ op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
+@@ -2773,7 +2870,9 @@ static int hns_roce_v2_clear_hem(struct
+ return 0;
+ }
+
+- if (table->type == HEM_TYPE_SCCC)
++ if (table->type == HEM_TYPE_SCCC ||
++ table->type == HEM_TYPE_QPC_TIMER ||
++ table->type == HEM_TYPE_CQC_TIMER)
+ return 0;
+
+ op += step_idx;
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -46,11 +46,13 @@
+ #define HNS_ROCE_VF_SL_NUM 8
+
+ #define HNS_ROCE_V2_MAX_QP_NUM 0x2000
++#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
+ #define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
+ #define HNS_ROCE_V2_MAX_SRQ 0x100000
+ #define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
+ #define HNS_ROCE_V2_MAX_SRQ_SGE 0x100
+ #define HNS_ROCE_V2_MAX_CQ_NUM 0x8000
++#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
+ #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
+ #define HNS_ROCE_V2_MAX_CQE_NUM 0x10000
+ #define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000
+@@ -85,6 +87,8 @@
+ #define HNS_ROCE_V2_MTT_ENTRY_SZ 64
+ #define HNS_ROCE_V2_CQE_ENTRY_SIZE 32
+ #define HNS_ROCE_V2_SCCC_ENTRY_SZ 32
++#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ 4096
++#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ 4096
+ #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
+ #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
+ #define HNS_ROCE_INVALID_LKEY 0x100
+@@ -229,6 +233,7 @@ enum hns_roce_opcode_type {
+ HNS_ROCE_OPC_ALLOC_VF_RES = 0x8401,
+ HNS_ROCE_OPC_CFG_EXT_LLM = 0x8403,
+ HNS_ROCE_OPC_CFG_TMOUT_LLM = 0x8404,
++ HNS_ROCE_OPC_QUERY_PF_TIMER_RES = 0x8406,
+ HNS_ROCE_OPC_CFG_SGID_TB = 0x8500,
+ HNS_ROCE_OPC_CFG_SMAC_TB = 0x8501,
+ HNS_ROCE_OPC_POST_MB = 0x8504,
+@@ -1336,6 +1341,25 @@ struct hns_roce_pf_res_b {
+ #define PF_RES_DATA_4_PF_SCCC_BT_NUM_S 9
+ #define PF_RES_DATA_4_PF_SCCC_BT_NUM_M GENMASK(17, 9)
+
++struct hns_roce_pf_timer_res_a {
++ __le32 rsv0;
++ __le32 qpc_timer_bt_idx_num;
++ __le32 cqc_timer_bt_idx_num;
++ __le32 rsv[3];
++};
++
++#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_S 0
++#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_M GENMASK(11, 0)
++
++#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S 16
++#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M GENMASK(28, 16)
++
++#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_S 0
++#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_M GENMASK(10, 0)
++
++#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S 16
++#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M GENMASK(27, 16)
++
+ struct hns_roce_vf_res_a {
+ __le32 vf_id;
+ __le32 vf_qpc_bt_idx_num;
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -717,8 +717,44 @@ static int hns_roce_init_hem(struct hns_
+ }
+ }
+
++ if (hr_dev->caps.qpc_timer_entry_sz) {
++ ret = hns_roce_init_hem_table(hr_dev,
++ &hr_dev->qpc_timer_table,
++ HEM_TYPE_QPC_TIMER,
++ hr_dev->caps.qpc_timer_entry_sz,
++ hr_dev->caps.num_qpc_timer, 1);
++ if (ret) {
++ dev_err(dev,
++ "Failed to init QPC timer memory, aborting.\n");
++ goto err_unmap_ctx;
++ }
++ }
++
++ if (hr_dev->caps.cqc_timer_entry_sz) {
++ ret = hns_roce_init_hem_table(hr_dev,
++ &hr_dev->cqc_timer_table,
++ HEM_TYPE_CQC_TIMER,
++ hr_dev->caps.cqc_timer_entry_sz,
++ hr_dev->caps.num_cqc_timer, 1);
++ if (ret) {
++ dev_err(dev,
++ "Failed to init CQC timer memory, aborting.\n");
++ goto err_unmap_qpc_timer;
++ }
++ }
++
+ return 0;
+
++err_unmap_qpc_timer:
++ if (hr_dev->caps.qpc_timer_entry_sz)
++ hns_roce_cleanup_hem_table(hr_dev,
++ &hr_dev->qpc_timer_table);
++
++err_unmap_ctx:
++ if (hr_dev->caps.sccc_entry_sz)
++ hns_roce_cleanup_hem_table(hr_dev,
++ &hr_dev->qp_table.sccc_table);
++
+ err_unmap_idx:
+ if (hr_dev->caps.num_idx_segs)
+ hns_roce_cleanup_hem_table(hr_dev,
diff --git a/patches.drivers/RDMA-hns-Bugfix-for-SCC-hem-free.patch b/patches.drivers/RDMA-hns-Bugfix-for-SCC-hem-free.patch
new file mode 100644
index 0000000000..9dad6cd70b
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Bugfix-for-SCC-hem-free.patch
@@ -0,0 +1,41 @@
+From: Yangyang Li <liyangyang20@huawei.com>
+Date: Sun, 7 Apr 2019 13:23:39 +0800
+Subject: RDMA/hns: Bugfix for SCC hem free
+Patch-mainline: v5.1-rc5
+Git-commit: 00fb67ec6b98114a887d9ef26fc7c3e566e7f665
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+The method of hem free for SCC context is different from qp context.
+
+In the current version, if free SCC hem during the execution of qp free,
+there may be smmu error as below:
+
+ arm-smmu-v3 arm-smmu-v3.1.auto: event 0x10 received:
+ arm-smmu-v3 arm-smmu-v3.1.auto: 0x00007d0000000010
+ arm-smmu-v3 arm-smmu-v3.1.auto: 0x000012000000017c
+ arm-smmu-v3 arm-smmu-v3.1.auto: 0x00000000000009e0
+ arm-smmu-v3 arm-smmu-v3.1.auto: 0x0000000000000000
+
+As SCC context is still used by hardware after qp free, we can solve this
+problem by removing SCC hem free from hns_roce_qp_free.
+
+Fixes: 6a157f7d1b14 ("RDMA/hns: Add SCC context allocation support for hip08")
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -273,9 +273,6 @@ void hns_roce_qp_free(struct hns_roce_de
+ wait_for_completion(&hr_qp->free);
+
+ if ((hr_qp->ibqp.qp_type) != IB_QPT_GSI) {
+- if (hr_dev->caps.sccc_entry_sz)
+- hns_roce_table_put(hr_dev, &qp_table->sccc_table,
+- hr_qp->qpn);
+ if (hr_dev->caps.trrl_entry_sz)
+ hns_roce_table_put(hr_dev, &qp_table->trrl_table,
+ hr_qp->qpn);
diff --git a/patches.drivers/RDMA-hns-Bugfix-for-mapping-user-db.patch b/patches.drivers/RDMA-hns-Bugfix-for-mapping-user-db.patch
new file mode 100644
index 0000000000..c6bc11523f
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Bugfix-for-mapping-user-db.patch
@@ -0,0 +1,41 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Tue, 23 Apr 2019 17:30:26 +0800
+Subject: RDMA/hns: Bugfix for mapping user db
+Patch-mainline: v5.1-rc7
+Git-commit: 2557fabd6e29f349bfa0ac13f38ac98aa5eafc74
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+When the maximum send wr delivered by the user is zero, the qp does not
+have a sq.
+
+When allocating the sq db buffer to store the user sq pi pointer and map
+it to the kernel mode, max_send_wr is used as the trigger condition, while
+the kernel does not consider the max_send_wr trigger condition when
+mapmping db. It will cause sq record doorbell map fail and create qp fail.
+
+The failed print information as follows:
+
+ hns3 0000:7d:00.1: Send cmd: tail - 418, opcode - 0x8504, flag - 0x0011, retval - 0x0000
+ hns3 0000:7d:00.1: Send cmd: 0xe59dc000 0x00000000 0x00000000 0x00000000 0x00000116 0x0000ffff
+ hns3 0000:7d:00.1: sq record doorbell map failed!
+ hns3 0000:7d:00.1: Create RC QP failed
+
+Fixes: 0425e3e6e0c7 ("RDMA/hns: Support flush cqe for hip08 in kernel space")
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -532,7 +532,7 @@ static int hns_roce_set_kernel_sq_size(s
+
+ static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr)
+ {
+- if (attr->qp_type == IB_QPT_XRC_TGT)
++ if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr)
+ return 0;
+
+ return 1;
diff --git a/patches.drivers/RDMA-hns-Bugfix-for-posting-multiple-srq-work-reques.patch b/patches.drivers/RDMA-hns-Bugfix-for-posting-multiple-srq-work-reques.patch
new file mode 100644
index 0000000000..8150c82df3
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Bugfix-for-posting-multiple-srq-work-reques.patch
@@ -0,0 +1,142 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Thu, 30 May 2019 23:55:53 +0800
+Subject: RDMA/hns: Bugfix for posting multiple srq work request
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
+Git-commit: 97545b10221ad14b046dba135a37f4e98a560697
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+When the user submits more than 32 work request to a srq queue
+at a time, it needs to find the corresponding number of entries
+in the bitmap in the idx queue. However, the original lookup
+function named ffs only processes 32 bits of the array element,
+When the number of srq wqe issued exceeds 32, the ffs will only
+process the lower 32 bits of the elements, it will not be able
+to get the correct wqe index for srq wqe.
+
+Signed-off-by: Xi Wang <wangxi11@huawei.com>
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_device.h | 2 -
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 34 ++++++++++++++--------------
+ drivers/infiniband/hw/hns/hns_roce_srq.c | 15 ++----------
+ 3 files changed, 22 insertions(+), 29 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -472,7 +472,7 @@ struct hns_roce_idx_que {
+ u32 buf_size;
+ struct ib_umem *umem;
+ struct hns_roce_mtt mtt;
+- u64 *bitmap;
++ unsigned long *bitmap;
+ };
+
+ struct hns_roce_srq {
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -2343,15 +2343,10 @@ static void *get_srq_wqe(struct hns_roce
+
+ static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
+ {
+- u32 bitmap_num;
+- int bit_num;
+-
+ /* always called with interrupts disabled. */
+ spin_lock(&srq->lock);
+
+- bitmap_num = wqe_index / (sizeof(u64) * 8);
+- bit_num = wqe_index % (sizeof(u64) * 8);
+- srq->idx_que.bitmap[bitmap_num] |= (1ULL << bit_num);
++ bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
+ srq->tail++;
+
+ spin_unlock(&srq->lock);
+@@ -5971,18 +5966,19 @@ out:
+ return ret;
+ }
+
+-static int find_empty_entry(struct hns_roce_idx_que *idx_que)
++static int find_empty_entry(struct hns_roce_idx_que *idx_que,
++ unsigned long size)
+ {
+- int bit_num;
+- int i;
++ int wqe_idx;
+
+- /* bitmap[i] is set zero if all bits are allocated */
+- for (i = 0; idx_que->bitmap[i] == 0; ++i)
+- ;
+- bit_num = ffs(idx_que->bitmap[i]);
+- idx_que->bitmap[i] &= ~(1ULL << (bit_num - 1));
++ if (unlikely(bitmap_full(idx_que->bitmap, size)))
++ return -ENOSPC;
++
++ wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
++
++ bitmap_set(idx_que->bitmap, wqe_idx, 1);
+
+- return i * sizeof(u64) * 8 + (bit_num - 1);
++ return wqe_idx;
+ }
+
+ static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
+@@ -6028,7 +6024,13 @@ static int hns_roce_v2_post_srq_recv(str
+ break;
+ }
+
+- wqe_idx = find_empty_entry(&srq->idx_que);
++ wqe_idx = find_empty_entry(&srq->idx_que, srq->max);
++ if (wqe_idx < 0) {
++ ret = -ENOMEM;
++ *bad_wr = wr;
++ break;
++ }
++
+ fill_idx_queue(&srq->idx_que, ind, wqe_idx);
+ wqe = get_srq_wqe(srq, wqe_idx);
+ dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
+--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
+@@ -188,28 +188,19 @@ static int hns_roce_create_idx_que(struc
+ {
+ struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+ struct hns_roce_idx_que *idx_que = &srq->idx_que;
+- u32 bitmap_num;
+- int i;
+
+- bitmap_num = HNS_ROCE_ALOGN_UP(srq->max, 8 * sizeof(u64));
+-
+- idx_que->bitmap = kcalloc(1, bitmap_num / 8, GFP_KERNEL);
++ idx_que->bitmap = bitmap_zalloc(srq->max, GFP_KERNEL);
+ if (!idx_que->bitmap)
+ return -ENOMEM;
+
+- bitmap_num = bitmap_num / (8 * sizeof(u64));
+-
+ idx_que->buf_size = srq->idx_que.buf_size;
+
+ if (hns_roce_buf_alloc(hr_dev, idx_que->buf_size, (1 << page_shift) * 2,
+ &idx_que->idx_buf, page_shift)) {
+- kfree(idx_que->bitmap);
++ bitmap_free(idx_que->bitmap);
+ return -ENOMEM;
+ }
+
+- for (i = 0; i < bitmap_num; i++)
+- idx_que->bitmap[i] = ~(0UL);
+-
+ return 0;
+ }
+
+@@ -415,7 +406,7 @@ err_idx_mtt:
+ err_create_idx:
+ hns_roce_buf_free(hr_dev, srq->idx_que.buf_size,
+ &srq->idx_que.idx_buf);
+- kfree(srq->idx_que.bitmap);
++ bitmap_free(srq->idx_que.bitmap);
+
+ err_srq_mtt:
+ hns_roce_mtt_cleanup(hr_dev, &srq->mtt);
diff --git a/patches.drivers/RDMA-hns-Bugfix-for-sending-with-invalidate.patch b/patches.drivers/RDMA-hns-Bugfix-for-sending-with-invalidate.patch
new file mode 100644
index 0000000000..0ab666db0e
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Bugfix-for-sending-with-invalidate.patch
@@ -0,0 +1,29 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:26 +0800
+Subject: RDMA/hns: Bugfix for sending with invalidate
+Patch-mainline: v5.2-rc1
+Git-commit: 82342e493b7e53f5e0d0698a48190f05e84d6690
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+According to IB protocol, the send with invalidate operation will not
+invalidate mr that was created through a register mr or reregister mr.
+
+Fixes: e93df0108579 ("RDMA/hns: Support local invalidate for hip08 in kernel space")
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -2155,7 +2155,7 @@ static int hns_roce_v2_write_mtpt(void *
+ V2_MPT_BYTE_4_PD_S, mr->pd);
+
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
+- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
++ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
+ (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
diff --git a/patches.drivers/RDMA-hns-Bugfix-for-set-hem-of-SCC.patch b/patches.drivers/RDMA-hns-Bugfix-for-set-hem-of-SCC.patch
new file mode 100644
index 0000000000..375f057966
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Bugfix-for-set-hem-of-SCC.patch
@@ -0,0 +1,60 @@
+From: Yangyang Li <liyangyang20@huawei.com>
+Date: Sat, 16 Feb 2019 20:10:25 +0800
+Subject: RDMA/hns: Bugfix for set hem of SCC
+Patch-mainline: v5.1-rc1
+Git-commit: 6ac16e403900a98f9b330daa5f0d89f76a24c6eb
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+The method of set hem for scc context is different from other contexts. It
+should notify the hardware with the detailed idx in bt0 for scc, while for
+other contexts, it only need to notify the bt step and the hardware will
+calculate the idx.
+
+Here fixes the following error when unloading the hip08 driver:
+
+[ 123.570768] {1}[Hardware Error]: Hardware error from APEI Generic Hardware Error Source: 0
+[ 123.579023] {1}[Hardware Error]: event severity: recoverable
+[ 123.584670] {1}[Hardware Error]: Error 0, type: recoverable
+[ 123.590317] {1}[Hardware Error]: section_type: PCIe error
+[ 123.595877] {1}[Hardware Error]: version: 4.0
+[ 123.600395] {1}[Hardware Error]: command: 0x0006, status: 0x0010
+[ 123.606562] {1}[Hardware Error]: device_id: 0000:7d:00.0
+[ 123.612034] {1}[Hardware Error]: slot: 0
+[ 123.616120] {1}[Hardware Error]: secondary_bus: 0x00
+[ 123.621245] {1}[Hardware Error]: vendor_id: 0x19e5, device_id: 0xa222
+[ 123.627847] {1}[Hardware Error]: class_code: 000002
+[ 123.632977] hns3 0000:7d:00.0: aer_status: 0x00000000, aer_mask: 0x00000000
+[ 123.639928] hns3 0000:7d:00.0: aer_layer=Transaction Layer, aer_agent=Receiver ID
+[ 123.647400] hns3 0000:7d:00.0: aer_uncor_severity: 0x00000000
+[ 123.653136] hns3 0000:7d:00.0: PCI error detected, state(=1)!!
+[ 123.658959] hns3 0000:7d:00.0: ROCEE uncorrected RAS error identified
+[ 123.665395] hns3 0000:7d:00.0: ROCEE RAS AXI rresp error
+[ 123.670713] hns3 0000:7d:00.0: requesting reset due to PCI error
+[ 123.676715] hns3 0000:7d:00.0: received reset event , reset type is 5
+[ 123.683147] hns3 0000:7d:00.0: AER: Device recovery successful
+[ 123.688978] hns3 0000:7d:00.0: PF Reset requested
+[ 123.693684] hns3 0000:7d:00.0: PF failed(=-5) to send mailbox message to VF
+[ 123.700633] hns3 0000:7d:00.0: inform reset to vf(1) failded -5!
+
+Fixes: 6a157f7d1b14 ("RDMA/hns: Add SCC context allocation support for hip08")
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Reviewed-by: Yixian Liu <liuyixian@huawei.com>
+Reviewed-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -2945,6 +2945,9 @@ static int hns_roce_v2_set_hem(struct hn
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
++ if (table->type == HEM_TYPE_SCCC)
++ obj = mhop.l0_idx;
++
+ if (check_whether_last_step(hop_num, step_idx)) {
+ hem = table->hem[hem_idx];
+ for (hns_roce_hem_first(hem, &iter);
diff --git a/patches.drivers/RDMA-hns-Bugfix-for-the-scene-without-receiver-queue.patch b/patches.drivers/RDMA-hns-Bugfix-for-the-scene-without-receiver-queue.patch
new file mode 100644
index 0000000000..3a6e75684f
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Bugfix-for-the-scene-without-receiver-queue.patch
@@ -0,0 +1,29 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Wed, 12 Dec 2018 17:49:07 +0800
+Subject: RDMA/hns: Bugfix for the scene without receiver queue
+Patch-mainline: v5.1-rc1
+Git-commit: 4d103905eb1e4f14cb62fcf962c9d35da7005dea
+References: bsc#1104427 FATE#326416 bsc#1137233
+
+In some application scenario, the user could not have receive queue when
+run rdma write or read operation.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -526,7 +526,8 @@ static int hns_roce_qp_has_sq(struct ib_
+ static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
+ {
+ if (attr->qp_type == IB_QPT_XRC_INI ||
+- attr->qp_type == IB_QPT_XRC_TGT || attr->srq)
++ attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
++ !attr->cap.max_recv_wr)
+ return 0;
+
+ return 1;
diff --git a/patches.drivers/RDMA-hns-Configure-capacity-of-hns-device.patch b/patches.drivers/RDMA-hns-Configure-capacity-of-hns-device.patch
new file mode 100644
index 0000000000..6f06739949
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Configure-capacity-of-hns-device.patch
@@ -0,0 +1,38 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sun, 3 Feb 2019 16:13:07 +0800
+Subject: RDMA/hns: Configure capacity of hns device
+Patch-mainline: v5.1-rc1
+Git-commit: dad1f9802ecee3a21143293b2505e1b57b1ae525
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+This patch adds new device capability for IB_DEVICE_MEM_MGT_EXTENSIONS to
+indicate device support for the following features:
+
+1. Fast register memory region.
+2. send with remote invalidate by frmr
+3. local invalidate memory regsion
+
+As well as adds the max depth of frmr page list len.
+
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_main.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -226,6 +226,11 @@ static int hns_roce_query_device(struct
+ props->max_srq_sge = hr_dev->caps.max_srq_sges;
+ }
+
++ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) {
++ props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
++ props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA;
++ }
++
+ return 0;
+ }
+
diff --git a/patches.drivers/RDMA-hns-Delete-unused-variable-in-hns_roce_v2_modif.patch b/patches.drivers/RDMA-hns-Delete-unused-variable-in-hns_roce_v2_modif.patch
new file mode 100644
index 0000000000..014a7d2d59
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Delete-unused-variable-in-hns_roce_v2_modif.patch
@@ -0,0 +1,34 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:28 +0800
+Subject: RDMA/hns: Delete unused variable in hns_roce_v2_modify_qp function
+Patch-mainline: v5.2-rc1
+Git-commit: d0a935563bc0f447abed7799388fa3f13099cc0d
+References: bsc#1104427 FATE#326416
+
+The src_mac array is not used in hns_roce_v2_modify_qp function.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3975,7 +3975,6 @@ static int hns_roce_v2_modify_qp(struct
+ const struct ib_global_route *grh =
+ rdma_ah_read_grh(&attr->ah_attr);
+ const struct ib_gid_attr *gid_attr = NULL;
+- u8 src_mac[ETH_ALEN];
+ int is_roce_protocol;
+ u16 vlan = 0xffff;
+ u8 ib_port;
+@@ -3990,7 +3989,6 @@ static int hns_roce_v2_modify_qp(struct
+ if (is_roce_protocol) {
+ gid_attr = attr->ah_attr.grh.sgid_attr;
+ vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev);
+- memcpy(src_mac, gid_attr->ndev->dev_addr, ETH_ALEN);
+ }
+
+ if (is_vlan_dev(gid_attr->ndev)) {
diff --git a/patches.drivers/RDMA-hns-Delete-useful-prints-for-aeq-subtype-event.patch b/patches.drivers/RDMA-hns-Delete-useful-prints-for-aeq-subtype-event.patch
new file mode 100644
index 0000000000..9288fac491
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Delete-useful-prints-for-aeq-subtype-event.patch
@@ -0,0 +1,91 @@
+From: Yixian Liu <liuyixian@huawei.com>
+Date: Sun, 3 Feb 2019 16:13:06 +0800
+Subject: RDMA/hns: Delete useful prints for aeq subtype event
+Patch-mainline: v5.1-rc1
+Git-commit: e95c716c7faa0d0eede5eabb6fea2504709e25b6
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+Current all messages printed for aeq subtype event are wrong. Thus,
+delete them and only the value of subtype event is printed.
+
+Signed-off-by: Yixian Liu <liuyixian@huawei.com>
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 57 +++--------------------------
+ 1 file changed, 6 insertions(+), 51 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -4663,64 +4663,19 @@ static void hns_roce_irq_work_handle(str
+ dev_warn(dev, "Send queue drained.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+- dev_err(dev, "Local work queue catastrophic error.\n");
++ dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n",
++ qpn, irq_work->sub_type);
+ hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+- switch (irq_work->sub_type) {
+- case HNS_ROCE_LWQCE_QPC_ERROR:
+- dev_err(dev, "QP %d, QPC error.\n", qpn);
+- break;
+- case HNS_ROCE_LWQCE_MTU_ERROR:
+- dev_err(dev, "QP %d, MTU error.\n", qpn);
+- break;
+- case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
+- dev_err(dev, "QP %d, WQE BA addr error.\n", qpn);
+- break;
+- case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
+- dev_err(dev, "QP %d, WQE addr error.\n", qpn);
+- break;
+- case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
+- dev_err(dev, "QP %d, WQE shift error.\n", qpn);
+- break;
+- default:
+- dev_err(dev, "Unhandled sub_event type %d.\n",
+- irq_work->sub_type);
+- break;
+- }
+ break;
+ case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+- dev_err(dev, "Invalid request local work queue error.\n");
++ dev_err(dev, "Invalid request local work queue 0x%x error.\n",
++ qpn);
+ hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+- dev_err(dev, "Local access violation work queue error.\n");
++ dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n",
++ qpn, irq_work->sub_type);
+ hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+- switch (irq_work->sub_type) {
+- case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
+- dev_err(dev, "QP %d, R_key violation.\n", qpn);
+- break;
+- case HNS_ROCE_LAVWQE_LENGTH_ERROR:
+- dev_err(dev, "QP %d, length error.\n", qpn);
+- break;
+- case HNS_ROCE_LAVWQE_VA_ERROR:
+- dev_err(dev, "QP %d, VA error.\n", qpn);
+- break;
+- case HNS_ROCE_LAVWQE_PD_ERROR:
+- dev_err(dev, "QP %d, PD error.\n", qpn);
+- break;
+- case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
+- dev_err(dev, "QP %d, rw acc error.\n", qpn);
+- break;
+- case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
+- dev_err(dev, "QP %d, key state error.\n", qpn);
+- break;
+- case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
+- dev_err(dev, "QP %d, MR operation error.\n", qpn);
+- break;
+- default:
+- dev_err(dev, "Unhandled sub_event type %d.\n",
+- irq_work->sub_type);
+- break;
+- }
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+ dev_warn(dev, "SRQ limit reach.\n");
diff --git a/patches.drivers/RDMA-hns-Fix-bad-endianess-of-port_pd-variable.patch b/patches.drivers/RDMA-hns-Fix-bad-endianess-of-port_pd-variable.patch
new file mode 100644
index 0000000000..ac01c7524f
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Fix-bad-endianess-of-port_pd-variable.patch
@@ -0,0 +1,36 @@
+From: Leon Romanovsky <leonro@mellanox.com>
+Date: Tue, 19 Mar 2019 11:10:08 +0200
+Subject: RDMA/hns: Fix bad endianess of port_pd variable
+Patch-mainline: v5.2-rc1
+Git-commit: 6734b2973565e36659e97e12ab0d0faf1d9f3fbe
+References: bsc#1104427 FATE#326416
+
+port_pd is treated as le32 in declaration and read, fix assignment to be
+in le32 too. This change fixes the following compilation warnings.
+
+drivers/infiniband/hw/hns/hns_roce_ah.c:67:24: warning: incorrect type
+in assignment (different base types)
+drivers/infiniband/hw/hns/hns_roce_ah.c:67:24: expected restricted __le32 [usertype] port_pd
+drivers/infiniband/hw/hns/hns_roce_ah.c:67:24: got restricted __be32 [usertype]
+
+Fixes: 9a4435375cd1 ("IB/hns: Add driver files for hns RoCE driver")
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Reviewed-by: Gal Pressman <galpress@amazon.com>
+Reviewed-by: Lijun Ou <ouliun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_ah.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
++++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
+@@ -69,7 +69,7 @@ struct ib_ah *hns_roce_create_ah(struct
+ HNS_ROCE_VLAN_SL_BIT_MASK) <<
+ HNS_ROCE_VLAN_SL_SHIFT;
+
+- ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn |
++ ah->av.port_pd = cpu_to_le32(to_hr_pd(ibpd)->pdn |
+ (rdma_ah_get_port_num(ah_attr) <<
+ HNS_ROCE_PORT_NUM_SHIFT));
+ ah->av.gid_index = grh->sgid_index;
diff --git a/patches.drivers/RDMA-hns-Fix-the-Oops-during-rmmod-or-insmod-ko-when.patch b/patches.drivers/RDMA-hns-Fix-the-Oops-during-rmmod-or-insmod-ko-when.patch
new file mode 100644
index 0000000000..65227a37ae
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Fix-the-Oops-during-rmmod-or-insmod-ko-when.patch
@@ -0,0 +1,307 @@
+From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
+Date: Sun, 3 Feb 2019 20:43:13 +0800
+Subject: RDMA/hns: Fix the Oops during rmmod or insmod ko when reset occurs
+Patch-mainline: v5.1-rc1
+Git-commit: d061effc36f7bd38a12912977a37a50ac9140d11
+References: bsc#1104427 FATE#326416 bsc#1137232
+
+In the reset process, the hns3 NIC driver notifies the RoCE driver to
+perform reset related processing by calling the .reset_notify() interface
+registered by the RoCE driver in hip08 SoC.
+
+In the current version, if a reset occurs simultaneously during the
+execution of rmmod or insmod ko, there may be Oops error as below:
+
+ Internal error: Oops: 86000007 [#1] PREEMPT SMP
+ Modules linked in: hns_roce(O) hns3(O) hclge(O) hnae3(O) [last unloaded: hns_roce_hw_v2]
+ CPU: 0 PID: 14 Comm: kworker/0:1 Tainted: G O 4.19.0-ge00d540 #1
+ Hardware name: Huawei Technologies Co., Ltd.
+ Workqueue: events hclge_reset_service_task [hclge]
+ pstate: 60c00009 (nZCv daif +PAN +UAO)
+ pc : 0xffff00000100b0b8
+ lr : 0xffff00000100aea0
+ sp : ffff000009afbab0
+ x29: ffff000009afbab0 x28: 0000000000000800
+ x27: 0000000000007ff0 x26: ffff80002f90c004
+ x25: 00000000000007ff x24: ffff000008f97000
+ x23: ffff80003efee0a8 x22: 0000000000001000
+ x21: ffff80002f917ff0 x20: ffff8000286ea070
+ x19: 0000000000000800 x18: 0000000000000400
+ x17: 00000000c4d3225d x16: 00000000000021b8
+ x15: 0000000000000400 x14: 0000000000000400
+ x13: 0000000000000000 x12: ffff80003fac6e30
+ x11: 0000800036303000 x10: 0000000000000001
+ x9 : 0000000000000000 x8 : ffff80003016d000
+ x7 : 0000000000000000 x6 : 000000000000003f
+ x5 : 0000000000000040 x4 : 0000000000000000
+ x3 : 0000000000000004 x2 : 00000000000007ff
+ x1 : 0000000000000000 x0 : 0000000000000000
+ Process kworker/0:1 (pid: 14, stack limit = 0x00000000af8f0ad9)
+ Call trace:
+ 0xffff00000100b0b8
+ 0xffff00000100b3a0
+ hns_roce_init+0x624/0xc88 [hns_roce]
+ 0xffff000001002df8
+ 0xffff000001006960
+ hclge_notify_roce_client+0x74/0xe0 [hclge]
+ hclge_reset_service_task+0xa58/0xbc0 [hclge]
+ process_one_work+0x1e4/0x458
+ worker_thread+0x40/0x450
+ kthread+0x12c/0x130
+ ret_from_fork+0x10/0x18
+ Code: bad PC value
+
+In the reset process, we will release the resources firstly, and after the
+hardware reset is completed, we will reapply resources and reconfigure the
+hardware.
+
+We can solve this problem by modifying both the NIC and the RoCE
+driver. We can modify the concurrent processing in the NIC driver to avoid
+calling the .reset_notify and .uninit_instance ops at the same time. And
+we need to modify the RoCE driver to record the reset stage and the
+driver's init/uninit state, and check the state in the .reset_notify,
+.init_instance. and uninit_instance functions to avoid NULL pointer
+operation.
+
+Fixes: cb7a94c9c808 ("RDMA/hns: Add reset process for RoCE in hip08")
+Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_device.h | 21 +++++
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 103 ++++++++++++++++++++++++----
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 1
+ 3 files changed, 112 insertions(+), 13 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -217,6 +217,26 @@ enum {
+ HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4
+ };
+
++enum hns_roce_reset_stage {
++ HNS_ROCE_STATE_NON_RST,
++ HNS_ROCE_STATE_RST_BEF_DOWN,
++ HNS_ROCE_STATE_RST_DOWN,
++ HNS_ROCE_STATE_RST_UNINIT,
++ HNS_ROCE_STATE_RST_INIT,
++ HNS_ROCE_STATE_RST_INITED,
++};
++
++enum hns_roce_instance_state {
++ HNS_ROCE_STATE_NON_INIT,
++ HNS_ROCE_STATE_INIT,
++ HNS_ROCE_STATE_INITED,
++ HNS_ROCE_STATE_UNINIT,
++};
++
++enum {
++ HNS_ROCE_RST_DIRECT_RETURN = 0,
++};
++
+ #define HNS_ROCE_CMD_SUCCESS 1
+
+ #define HNS_ROCE_PORT_DOWN 0
+@@ -919,6 +939,7 @@ struct hns_roce_dev {
+ spinlock_t bt_cmd_lock;
+ bool active;
+ bool is_reset;
++ unsigned long reset_cnt;
+ struct hns_roce_ib_iboe iboe;
+
+ struct list_head pgdir_list;
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -6002,6 +6002,7 @@ MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_
+ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
+ struct hnae3_handle *handle)
+ {
++ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ const struct pci_device_id *id;
+ int i;
+
+@@ -6032,10 +6033,13 @@ static int hns_roce_hw_v2_get_cfg(struct
+ hr_dev->cmd_mod = 1;
+ hr_dev->loop_idc = 0;
+
++ hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
++ priv->handle = handle;
++
+ return 0;
+ }
+
+-static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
++static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
+ {
+ struct hns_roce_dev *hr_dev;
+ int ret;
+@@ -6052,7 +6056,6 @@ static int hns_roce_hw_v2_init_instance(
+
+ hr_dev->pci_dev = handle->pdev;
+ hr_dev->dev = &handle->pdev->dev;
+- handle->priv = hr_dev;
+
+ ret = hns_roce_hw_v2_get_cfg(hr_dev, handle);
+ if (ret) {
+@@ -6066,6 +6069,8 @@ static int hns_roce_hw_v2_init_instance(
+ goto error_failed_get_cfg;
+ }
+
++ handle->priv = hr_dev;
++
+ return 0;
+
+ error_failed_get_cfg:
+@@ -6077,7 +6082,7 @@ error_failed_kzalloc:
+ return ret;
+ }
+
+-static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
++static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
+ bool reset)
+ {
+ struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
+@@ -6085,24 +6090,78 @@ static void hns_roce_hw_v2_uninit_instan
+ if (!hr_dev)
+ return;
+
++ handle->priv = NULL;
+ hns_roce_exit(hr_dev);
+ kfree(hr_dev->priv);
+ ib_dealloc_device(&hr_dev->ib_dev);
+ }
+
++static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
++{
++ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++ struct device *dev = &handle->pdev->dev;
++ int ret;
++
++ handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
++
++ if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
++ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
++ goto reset_chk_err;
++ }
++
++ ret = __hns_roce_hw_v2_init_instance(handle);
++ if (ret) {
++ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
++ dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
++ if (ops->ae_dev_resetting(handle) ||
++ ops->get_hw_reset_stat(handle))
++ goto reset_chk_err;
++ else
++ return ret;
++ }
++
++ handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
++
++
++ return 0;
++
++reset_chk_err:
++ dev_err(dev, "Device is busy in resetting state.\n"
++ "please retry later.\n");
++
++ return -EBUSY;
++}
++
++static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
++ bool reset)
++{
++ if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
++ return;
++
++ handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
++
++ __hns_roce_hw_v2_uninit_instance(handle, reset);
++
++ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
++}
+ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
+ {
+- struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
++ struct hns_roce_dev *hr_dev;
+ struct ib_event event;
+
+- if (!hr_dev) {
+- dev_err(&handle->pdev->dev,
+- "Input parameter handle->priv is NULL!\n");
+- return -EINVAL;
++ if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
++ set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
++ return 0;
+ }
+
++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
++ clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
++
++ hr_dev = (struct hns_roce_dev *)handle->priv;
++ if (!hr_dev)
++ return 0;
++
+ hr_dev->active = false;
+- hr_dev->is_reset = true;
+
+ event.event = IB_EVENT_DEVICE_FATAL;
+ event.device = &hr_dev->ib_dev;
+@@ -6114,17 +6173,29 @@ static int hns_roce_hw_v2_reset_notify_d
+
+ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
+ {
++ struct device *dev = &handle->pdev->dev;
+ int ret;
+
+- ret = hns_roce_hw_v2_init_instance(handle);
++ if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
++ &handle->rinfo.state)) {
++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
++ return 0;
++ }
++
++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
++
++ dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
++ ret = __hns_roce_hw_v2_init_instance(handle);
+ if (ret) {
+ /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
+ * callback function, RoCE Engine reinitialize. If RoCE reinit
+ * failed, we should inform NIC driver.
+ */
+ handle->priv = NULL;
+- dev_err(&handle->pdev->dev,
+- "In reset process RoCE reinit failed %d.\n", ret);
++ dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
++ } else {
++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
++ dev_info(dev, "Reset done, RoCE client reinit finished.\n");
+ }
+
+ return ret;
+@@ -6132,8 +6203,14 @@ static int hns_roce_hw_v2_reset_notify_i
+
+ static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
+ {
++ if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
++ return 0;
++
++ handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
++ dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
+ msleep(100);
+- hns_roce_hw_v2_uninit_instance(handle, false);
++ __hns_roce_hw_v2_uninit_instance(handle, false);
++
+ return 0;
+ }
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -1602,6 +1602,7 @@ struct hns_roce_link_table_entry {
+ #define HNS_ROCE_LINK_TABLE_NXT_PTR_M GENMASK(31, 20)
+
+ struct hns_roce_v2_priv {
++ struct hnae3_handle *handle;
+ struct hns_roce_v2_cmq cmq;
+ struct hns_roce_link_table tsq;
+ struct hns_roce_link_table tpq;
diff --git a/patches.drivers/RDMA-hns-Fix-the-bug-with-updating-rq-head-pointer-w.patch b/patches.drivers/RDMA-hns-Fix-the-bug-with-updating-rq-head-pointer-w.patch
new file mode 100644
index 0000000000..25bd379ed6
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Fix-the-bug-with-updating-rq-head-pointer-w.patch
@@ -0,0 +1,48 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Wed, 12 Dec 2018 17:49:06 +0800
+Subject: RDMA/hns: Fix the bug with updating rq head pointer when flush cqe
+Patch-mainline: v5.1-rc1
+Git-commit: 9c6ccc035c209dda07685e8dba829a203ba17499
+References: bsc#1104427 FATE#326416 bsc#1137233
+
+When flush cqe with srq, the driver disable to update the rq head pointer
+into the hardware.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3789,13 +3789,16 @@ static int hns_roce_v2_modify_qp(struct
+ roce_set_field(qpc_mask->byte_160_sq_ci_pi,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
+- roce_set_field(context->byte_84_rq_ci_pi,
++
++ if (!ibqp->srq) {
++ roce_set_field(context->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
+ hr_qp->rq.head);
+- roce_set_field(qpc_mask->byte_84_rq_ci_pi,
++ roce_set_field(qpc_mask->byte_84_rq_ci_pi,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
+ V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
++ }
+ }
+
+ if (attr_mask & IB_QP_AV) {
+@@ -4281,7 +4284,8 @@ static void hns_roce_set_qps_to_err(stru
+ if (hr_qp->ibqp.uobject) {
+ if (hr_qp->sdb_en == 1) {
+ hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
+- hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
++ if (hr_qp->rdb_en == 1)
++ hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
+ } else {
+ dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
+ return;
diff --git a/patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-door.patch b/patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-door.patch
new file mode 100644
index 0000000000..b0f860e779
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-door.patch
@@ -0,0 +1,168 @@
+From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
+Date: Sun, 3 Feb 2019 20:43:15 +0800
+Subject: RDMA/hns: Fix the chip hanging caused by sending doorbell during
+ reset
+Patch-mainline: v5.1-rc1
+Git-commit: d3743fa94ccd177917783726faf54632439ddb54
+References: bsc#1104427 FATE#326416 bsc#1137232
+
+On hi08 chip, There is a possibility of chip hanging when sending doorbell
+during reset. We can fix it by prohibiting doorbell during reset.
+
+Fixes: 2d40788825ac ("RDMA/hns: Add support for processing send wr and receive wr")
+Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_device.h | 1 +
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 25 ++++++++++++++++---------
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 11 +++++++++++
+ 3 files changed, 28 insertions(+), 9 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -946,6 +946,7 @@ struct hns_roce_dev {
+ spinlock_t bt_cmd_lock;
+ bool active;
+ bool is_reset;
++ bool dis_db;
+ unsigned long reset_cnt;
+ struct hns_roce_ib_iboe iboe;
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -587,7 +587,7 @@ out:
+ roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
+ V2_DB_PARAMETER_SL_S, qp->sl);
+
+- hns_roce_write64_k((__le32 *)&sq_db, qp->sq.db_reg_l);
++ hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
+
+ qp->sq_next_wqe = ind;
+ qp->next_sge = sge_ind;
+@@ -717,7 +717,7 @@ static int hns_roce_v2_cmd_hw_reseted(st
+ unsigned long reset_stage)
+ {
+ /* When hardware reset has been completed once or more, we should stop
+- * sending mailbox&cmq to hardware. If now in .init_instance()
++ * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
+ * function, we should exit with error. If now at HNAE3_INIT_CLIENT
+ * stage of soft reset process, we should exit with error, and then
+ * HNAE3_INIT_CLIENT related process can rollback the operation like
+@@ -726,6 +726,7 @@ static int hns_roce_v2_cmd_hw_reseted(st
+ * reset process once again.
+ */
+ hr_dev->is_reset = true;
++ hr_dev->dis_db = true;
+
+ if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
+ instance_stage == HNS_ROCE_STATE_INIT)
+@@ -742,8 +743,8 @@ static int hns_roce_v2_cmd_hw_resetting(
+ struct hnae3_handle *handle = priv->handle;
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+- /* When hardware reset is detected, we should stop sending mailbox&cmq
+- * to hardware. If now in .init_instance() function, we should
++ /* When hardware reset is detected, we should stop sending mailbox&cmq&
++ * doorbell to hardware. If now in .init_instance() function, we should
+ * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
+ * process, we should exit with error, and then HNAE3_INIT_CLIENT
+ * related process can rollback the operation like notifing hardware to
+@@ -751,6 +752,7 @@ static int hns_roce_v2_cmd_hw_resetting(
+ * error to notify NIC driver to reschedule soft reset process once
+ * again.
+ */
++ hr_dev->dis_db = true;
+ if (!ops->get_hw_reset_stat(handle))
+ hr_dev->is_reset = true;
+
+@@ -768,9 +770,10 @@ static int hns_roce_v2_cmd_sw_resetting(
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ /* When software reset is detected at .init_instance() function, we
+- * should stop sending mailbox&cmq to hardware, and exit with
+- * error.
++ * should stop sending mailbox&cmq&doorbell to hardware, and exit
++ * with error.
+ */
++ hr_dev->dis_db = true;
+ if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
+ hr_dev->is_reset = true;
+
+@@ -2495,6 +2498,7 @@ static void hns_roce_v2_write_cqc(struct
+ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
+ enum ib_cq_notify_flags flags)
+ {
++ struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
+ struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
+ u32 notification_flag;
+ u32 doorbell[2];
+@@ -2520,7 +2524,7 @@ static int hns_roce_v2_req_notify_cq(str
+ roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
+ notification_flag);
+
+- hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
++ hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
+
+ return 0;
+ }
+@@ -4763,6 +4767,7 @@ static void hns_roce_v2_init_irq_work(st
+
+ static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
+ {
++ struct hns_roce_dev *hr_dev = eq->hr_dev;
+ u32 doorbell[2];
+
+ doorbell[0] = 0;
+@@ -4789,7 +4794,7 @@ static void set_eq_cons_index_v2(struct
+ HNS_ROCE_V2_EQ_DB_PARA_S,
+ (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
+
+- hns_roce_write64_k(doorbell, eq->doorbell);
++ hns_roce_write64(hr_dev, doorbell, eq->doorbell);
+ }
+
+ static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
+@@ -6011,6 +6016,7 @@ static int hns_roce_v2_post_srq_recv(str
+ const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+ {
++ struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
+ struct hns_roce_srq *srq = to_hr_srq(ibsrq);
+ struct hns_roce_v2_wqe_data_seg *dseg;
+ struct hns_roce_v2_db srq_db;
+@@ -6072,7 +6078,7 @@ static int hns_roce_v2_post_srq_recv(str
+ srq_db.byte_4 = HNS_ROCE_V2_SRQ_DB << 24 | srq->srqn;
+ srq_db.parameter = srq->head;
+
+- hns_roce_write64_k((__le32 *)&srq_db, srq->db_reg_l);
++ hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
+
+ }
+
+@@ -6291,6 +6297,7 @@ static int hns_roce_hw_v2_reset_notify_d
+ return 0;
+
+ hr_dev->active = false;
++ hr_dev->dis_db = true;
+
+ event.event = IB_EVENT_DEVICE_FATAL;
+ event.device = &hr_dev->ib_dev;
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -1799,4 +1799,15 @@ struct hns_roce_sccc_clr_done {
+ __le32 rsv[5];
+ };
+
++static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2],
++ void __iomem *dest)
++{
++ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
++ struct hnae3_handle *handle = priv->handle;
++ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++
++ if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle))
++ hns_roce_write64_k(val, dest);
++}
++
+ #endif
diff --git a/patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-mail.patch b/patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-mail.patch
new file mode 100644
index 0000000000..529996624b
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-mail.patch
@@ -0,0 +1,285 @@
+From: "Wei Hu (Xavier)" <xavier.huwei@huawei.com>
+Date: Sun, 3 Feb 2019 20:43:14 +0800
+Subject: RDMA/hns: Fix the chip hanging caused by sending mailbox&CMQ during
+ reset
+Patch-mainline: v5.1-rc1
+Git-commit: 6a04aed6afaefd5fd396f23da184298135f31e37
+References: bsc#1104427 FATE#326416 bsc#1137232
+
+On hi08 chip, There is a possibility of chip hanging and some errors when
+sending mailbox & doorbell during reset. We can fix it by prohibiting
+mailbox and doorbell during reset and reset occurred to ensure that
+hardware can work normally.
+
+Fixes: a04ff739f2a9 ("RDMA/hns: Add command queue support for hip08 RoCE driver")
+Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_cmd.c | 32 ++++--
+ drivers/infiniband/hw/hns/hns_roce_device.h | 7 +
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 139 ++++++++++++++++++++++++++--
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 2
+ 4 files changed, 167 insertions(+), 13 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
++++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
+@@ -176,17 +176,33 @@ int hns_roce_cmd_mbox(struct hns_roce_de
+ unsigned long in_modifier, u8 op_modifier, u16 op,
+ unsigned long timeout)
+ {
+- if (hr_dev->is_reset)
+- return 0;
++ int ret;
++
++ if (hr_dev->hw->rst_prc_mbox) {
++ ret = hr_dev->hw->rst_prc_mbox(hr_dev);
++ if (ret == CMD_RST_PRC_SUCCESS)
++ return 0;
++ else if (ret == CMD_RST_PRC_EBUSY)
++ return -EBUSY;
++ }
+
+ if (hr_dev->cmd.use_events)
+- return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
+- in_modifier, op_modifier, op,
+- timeout);
++ ret = hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param,
++ in_modifier, op_modifier, op,
++ timeout);
+ else
+- return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
+- in_modifier, op_modifier, op,
+- timeout);
++ ret = hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param,
++ in_modifier, op_modifier, op,
++ timeout);
++
++ if (ret == CMD_RST_PRC_EBUSY)
++ return -EBUSY;
++
++ if (ret && (hr_dev->hw->rst_prc_mbox &&
++ hr_dev->hw->rst_prc_mbox(hr_dev) == CMD_RST_PRC_SUCCESS))
++ return 0;
++
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(hns_roce_cmd_mbox);
+
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -237,6 +237,12 @@ enum {
+ HNS_ROCE_RST_DIRECT_RETURN = 0,
+ };
+
++enum {
++ CMD_RST_PRC_OTHERS,
++ CMD_RST_PRC_SUCCESS,
++ CMD_RST_PRC_EBUSY,
++};
++
+ #define HNS_ROCE_CMD_SUCCESS 1
+
+ #define HNS_ROCE_PORT_DOWN 0
+@@ -875,6 +881,7 @@ struct hns_roce_hw {
+ u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
+ u16 token, int event);
+ int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout);
++ int (*rst_prc_mbox)(struct hns_roce_dev *hr_dev);
+ int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
+ const union ib_gid *gid, const struct ib_gid_attr *attr);
+ int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -712,6 +712,110 @@ out:
+ return ret;
+ }
+
++static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
++ unsigned long instance_stage,
++ unsigned long reset_stage)
++{
++ /* When hardware reset has been completed once or more, we should stop
++ * sending mailbox&cmq to hardware. If now in .init_instance()
++ * function, we should exit with error. If now at HNAE3_INIT_CLIENT
++ * stage of soft reset process, we should exit with error, and then
++ * HNAE3_INIT_CLIENT related process can rollback the operation like
++ * notifing hardware to free resources, HNAE3_INIT_CLIENT related
++ * process will exit with error to notify NIC driver to reschedule soft
++ * reset process once again.
++ */
++ hr_dev->is_reset = true;
++
++ if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
++ instance_stage == HNS_ROCE_STATE_INIT)
++ return CMD_RST_PRC_EBUSY;
++
++ return CMD_RST_PRC_SUCCESS;
++}
++
++static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
++ unsigned long instance_stage,
++ unsigned long reset_stage)
++{
++ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
++ struct hnae3_handle *handle = priv->handle;
++ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++
++ /* When hardware reset is detected, we should stop sending mailbox&cmq
++ * to hardware. If now in .init_instance() function, we should
++ * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
++ * process, we should exit with error, and then HNAE3_INIT_CLIENT
++ * related process can rollback the operation like notifing hardware to
++ * free resources, HNAE3_INIT_CLIENT related process will exit with
++ * error to notify NIC driver to reschedule soft reset process once
++ * again.
++ */
++ if (!ops->get_hw_reset_stat(handle))
++ hr_dev->is_reset = true;
++
++ if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
++ instance_stage == HNS_ROCE_STATE_INIT)
++ return CMD_RST_PRC_EBUSY;
++
++ return CMD_RST_PRC_SUCCESS;
++}
++
++static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
++{
++ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
++ struct hnae3_handle *handle = priv->handle;
++ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++
++ /* When software reset is detected at .init_instance() function, we
++ * should stop sending mailbox&cmq to hardware, and exit with
++ * error.
++ */
++ if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
++ hr_dev->is_reset = true;
++
++ return CMD_RST_PRC_EBUSY;
++}
++
++static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
++{
++ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
++ struct hnae3_handle *handle = priv->handle;
++ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++ unsigned long instance_stage; /* the current instance stage */
++ unsigned long reset_stage; /* the current reset stage */
++ unsigned long reset_cnt;
++ bool sw_resetting;
++ bool hw_resetting;
++
++ if (hr_dev->is_reset)
++ return CMD_RST_PRC_SUCCESS;
++
++ /* Get information about reset from NIC driver or RoCE driver itself,
++ * the meaning of the following variables from NIC driver are described
++ * as below:
++ * reset_cnt -- The count value of completed hardware reset.
++ * hw_resetting -- Whether hardware device is resetting now.
++ * sw_resetting -- Whether NIC's software reset process is running now.
++ */
++ instance_stage = handle->rinfo.instance_state;
++ reset_stage = handle->rinfo.reset_state;
++ reset_cnt = ops->ae_dev_reset_cnt(handle);
++ hw_resetting = ops->get_hw_reset_stat(handle);
++ sw_resetting = ops->ae_dev_resetting(handle);
++
++ if (reset_cnt != hr_dev->reset_cnt)
++ return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
++ reset_stage);
++ else if (hw_resetting)
++ return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
++ reset_stage);
++ else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
++ return hns_roce_v2_cmd_sw_resetting(hr_dev);
++
++ return 0;
++}
++
+ static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
+ {
+ int ntu = ring->next_to_use;
+@@ -892,8 +996,8 @@ static int hns_roce_cmq_csq_clean(struct
+ return clean;
+ }
+
+-static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+- struct hns_roce_cmq_desc *desc, int num)
++static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
++ struct hns_roce_cmq_desc *desc, int num)
+ {
+ struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
+ struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
+@@ -905,9 +1009,6 @@ static int hns_roce_cmq_send(struct hns_
+ int ret = 0;
+ int ntc;
+
+- if (hr_dev->is_reset)
+- return 0;
+-
+ spin_lock_bh(&csq->lock);
+
+ if (num > hns_roce_cmq_space(csq)) {
+@@ -982,6 +1083,30 @@ static int hns_roce_cmq_send(struct hns_
+ return ret;
+ }
+
++int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
++ struct hns_roce_cmq_desc *desc, int num)
++{
++ int retval;
++ int ret;
++
++ ret = hns_roce_v2_rst_process_cmd(hr_dev);
++ if (ret == CMD_RST_PRC_SUCCESS)
++ return 0;
++ if (ret == CMD_RST_PRC_EBUSY)
++ return ret;
++
++ ret = __hns_roce_cmq_send(hr_dev, desc, num);
++ if (ret) {
++ retval = hns_roce_v2_rst_process_cmd(hr_dev);
++ if (retval == CMD_RST_PRC_SUCCESS)
++ return 0;
++ else if (retval == CMD_RST_PRC_EBUSY)
++ return retval;
++ }
++
++ return ret;
++}
++
+ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
+ {
+ struct hns_roce_query_version *resp;
+@@ -1857,6 +1982,9 @@ static int hns_roce_v2_chk_mbox(struct h
+
+ status = hns_roce_v2_cmd_complete(hr_dev);
+ if (status != 0x1) {
++ if (status == CMD_RST_PRC_EBUSY)
++ return status;
++
+ dev_err(dev, "mailbox status 0x%x!\n", status);
+ return -EBUSY;
+ }
+@@ -5961,6 +6089,7 @@ static const struct hns_roce_hw hns_roce
+ .hw_exit = hns_roce_v2_exit,
+ .post_mbox = hns_roce_v2_post_mbox,
+ .chk_mbox = hns_roce_v2_chk_mbox,
++ .rst_prc_mbox = hns_roce_v2_rst_process_cmd,
+ .set_gid = hns_roce_v2_set_gid,
+ .set_mac = hns_roce_v2_set_mac,
+ .write_mtpt = hns_roce_v2_write_mtpt,
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -96,6 +96,8 @@
+ #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
+ #define HNS_ROCE_V2_RSV_QPS 8
+
++#define HNS_ROCE_V2_HW_RST_TIMEOUT 1000
++
+ #define HNS_ROCE_CONTEXT_HOP_NUM 1
+ #define HNS_ROCE_SCCC_HOP_NUM 1
+ #define HNS_ROCE_MTT_HOP_NUM 1
diff --git a/patches.drivers/RDMA-hns-Fix-the-state-of-rereg-mr.patch b/patches.drivers/RDMA-hns-Fix-the-state-of-rereg-mr.patch
new file mode 100644
index 0000000000..40bf3f4bf9
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Fix-the-state-of-rereg-mr.patch
@@ -0,0 +1,29 @@
+From: Yixian Liu <liuyixian@huawei.com>
+Date: Sun, 3 Feb 2019 16:13:04 +0800
+Subject: RDMA/hns: Fix the state of rereg mr
+Patch-mainline: v5.1-rc1
+Git-commit: ab22bf05216a6bb4812448f3a8609489047cf311
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+The state of mr after reregister operation should be set to valid
+state. Otherwise, it will keep the same as the state before reregistered.
+
+Signed-off-by: Yixian Liu <liuyixian@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -2195,6 +2195,9 @@ static int hns_roce_v2_rereg_write_mtpt(
+ struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
+ int ret = 0;
+
++ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
++ V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
++
+ if (flags & IB_MR_REREG_PD) {
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
+ V2_MPT_BYTE_4_PD_S, pdn);
diff --git a/patches.drivers/RDMA-hns-Hide-error-print-information-with-roce-vf-d.patch b/patches.drivers/RDMA-hns-Hide-error-print-information-with-roce-vf-d.patch
new file mode 100644
index 0000000000..6460ca7056
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Hide-error-print-information-with-roce-vf-d.patch
@@ -0,0 +1,54 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:25 +0800
+Subject: RDMA/hns: Hide error print information with roce vf device
+Patch-mainline: v5.2-rc1
+Git-commit: 07c2339a91c1ec3a8b8ada00361eced7b153ec0c
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+The driver should not print the error information when the hip08 driver
+not support virtual function.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -6123,15 +6123,8 @@ static int hns_roce_hw_v2_get_cfg(struct
+ struct hnae3_handle *handle)
+ {
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+- const struct pci_device_id *id;
+ int i;
+
+- id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev);
+- if (!id) {
+- dev_err(hr_dev->dev, "device is not compatible!\n");
+- return -ENXIO;
+- }
+-
+ hr_dev->hw = &hns_roce_hw_v2;
+ hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
+ hr_dev->odb_offset = hr_dev->sdb_offset;
+@@ -6219,6 +6212,7 @@ static void __hns_roce_hw_v2_uninit_inst
+ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
+ {
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
++ const struct pci_device_id *id;
+ struct device *dev = &handle->pdev->dev;
+ int ret;
+
+@@ -6229,6 +6223,10 @@ static int hns_roce_hw_v2_init_instance(
+ goto reset_chk_err;
+ }
+
++ id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
++ if (!id)
++ return 0;
++
+ ret = __hns_roce_hw_v2_init_instance(handle);
+ if (ret) {
+ handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
diff --git a/patches.drivers/RDMA-hns-Limit-minimum-ROCE-CQ-depth-to-64.patch b/patches.drivers/RDMA-hns-Limit-minimum-ROCE-CQ-depth-to-64.patch
new file mode 100644
index 0000000000..7a6da7c908
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Limit-minimum-ROCE-CQ-depth-to-64.patch
@@ -0,0 +1,27 @@
+From: chenglang <chenglang@huawei.com>
+Date: Sun, 3 Feb 2019 16:13:03 +0800
+Subject: RDMA/hns: Limit minimum ROCE CQ depth to 64
+Patch-mainline: v5.1-rc1
+Git-commit: 704e0e613a6d584fde1c80ead0329e918b4f8671
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+This patch modifies the minimum CQ depth specification of hip08 and is
+consistent with the processing of hip06.
+
+Signed-off-by: chenglang <chenglang@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1512,6 +1512,7 @@ static int hns_roce_v2_profile(struct hn
+ caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
+ caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
+ caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
++ caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
+ caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
+ caps->max_srqwqes = HNS_ROCE_V2_MAX_SRQWQE_NUM;
+ caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
diff --git a/patches.drivers/RDMA-hns-Limit-scope-of-hns_roce_cmq_send.patch b/patches.drivers/RDMA-hns-Limit-scope-of-hns_roce_cmq_send.patch
new file mode 100644
index 0000000000..8c19fffea0
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Limit-scope-of-hns_roce_cmq_send.patch
@@ -0,0 +1,34 @@
+From: Leon Romanovsky <leonro@mellanox.com>
+Date: Tue, 19 Mar 2019 11:10:09 +0200
+Subject: RDMA/hns: Limit scope of hns_roce_cmq_send()
+Patch-mainline: v5.2-rc1
+Git-commit: e95e52a1788d4a8af547261875c0fbae2e6e3028
+References: bsc#1104427 FATE#326416
+
+The forgotten static keyword causes to the following error to appear while
+building HNS driver. Declare hns_roce_cmq_send() to be static function to
+fix this warning.
+
+drivers/infiniband/hw/hns/hns_roce_hw_v2.c:1089:5: warning: no previous
+prototype for _hns_roce_cmq_send_ [-Wmissing-prototypes]
+ int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+
+Fixes: 6a04aed6afae ("RDMA/hns: Fix the chip hanging caused by sending mailbox&CMQ during reset")
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1086,7 +1086,7 @@ static int __hns_roce_cmq_send(struct hn
+ return ret;
+ }
+
+-int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
++static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc, int num)
+ {
+ int retval;
diff --git a/patches.drivers/RDMA-hns-Make-some-function-static.patch b/patches.drivers/RDMA-hns-Make-some-function-static.patch
new file mode 100644
index 0000000000..95f39ddb79
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Make-some-function-static.patch
@@ -0,0 +1,60 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Fri, 1 Feb 2019 11:11:04 +0800
+Subject: RDMA/hns: Make some function static
+Patch-mainline: v5.1-rc1
+Git-commit: c3c668e742397dfc107e44c09606cc68b37df30d
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+Fixes the following sparse warnings:
+
+drivers/infiniband/hw/hns/hns_roce_hw_v2.c:5822:5: warning:
+ symbol 'hns_roce_v2_query_srq' was not declared. Should it be static?
+drivers/infiniband/hw/hns/hns_roce_srq.c:158:6: warning:
+ symbol 'hns_roce_srq_free' was not declared. Should it be static?
+drivers/infiniband/hw/hns/hns_roce_srq.c:81:5: warning:
+ symbol 'hns_roce_srq_alloc' was not declared. Should it be static?
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +-
+ drivers/infiniband/hw/hns/hns_roce_srq.c | 9 +++++----
+ 2 files changed, 6 insertions(+), 5 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -5818,7 +5818,7 @@ static int hns_roce_v2_modify_srq(struct
+ return 0;
+ }
+
+-int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
++static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
+ {
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
+ struct hns_roce_srq *srq = to_hr_srq(ibsrq);
+--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
+@@ -79,9 +79,9 @@ static int hns_roce_hw2sw_srq(struct hns
+ HNS_ROCE_CMD_TIMEOUT_MSECS);
+ }
+
+-int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn, u16 xrcd,
+- struct hns_roce_mtt *hr_mtt, u64 db_rec_addr,
+- struct hns_roce_srq *srq)
++static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn,
++ u16 xrcd, struct hns_roce_mtt *hr_mtt,
++ u64 db_rec_addr, struct hns_roce_srq *srq)
+ {
+ struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
+ struct hns_roce_cmd_mailbox *mailbox;
+@@ -160,7 +160,8 @@ err_out:
+ return ret;
+ }
+
+-void hns_roce_srq_free(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
++static void hns_roce_srq_free(struct hns_roce_dev *hr_dev,
++ struct hns_roce_srq *srq)
+ {
+ struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
+ int ret;
diff --git a/patches.drivers/RDMA-hns-Modify-qp-cq-pd-specification-according-to-.patch b/patches.drivers/RDMA-hns-Modify-qp-cq-pd-specification-according-to-.patch
new file mode 100644
index 0000000000..09e16f07be
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Modify-qp-cq-pd-specification-according-to-.patch
@@ -0,0 +1,48 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 16 Feb 2019 20:10:24 +0800
+Subject: RDMA/hns: Modify qp&cq&pd specification according to UM
+Patch-mainline: v5.1-rc1
+Git-commit: 3e394f9413ecba2779b6a1d77095f4d8611a52d2
+References: bsc#1104427 FATE#326416 bsc#1137233
+
+Accroding to hip08's limitation, qp&cq specification is 1M, mtpt
+specification 1M in kernel space.
+
+Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -45,14 +45,14 @@
+ #define HNS_ROCE_VF_SGID_NUM 32
+ #define HNS_ROCE_VF_SL_NUM 8
+
+-#define HNS_ROCE_V2_MAX_QP_NUM 0x2000
+-#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
++#define HNS_ROCE_V2_MAX_QP_NUM 0x100000
++#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
+ #define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
+ #define HNS_ROCE_V2_MAX_SRQ 0x100000
+ #define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
+ #define HNS_ROCE_V2_MAX_SRQ_SGE 0x100
+-#define HNS_ROCE_V2_MAX_CQ_NUM 0x8000
+-#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
++#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
++#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
+ #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
+ #define HNS_ROCE_V2_MAX_CQE_NUM 0x10000
+ #define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000
+@@ -67,7 +67,7 @@
+ #define HNS_ROCE_V2_COMP_VEC_NUM 63
+ #define HNS_ROCE_V2_AEQE_VEC_NUM 1
+ #define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
+-#define HNS_ROCE_V2_MAX_MTPT_NUM 0x8000
++#define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000
+ #define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000
+ #define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000
+ #define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
diff --git a/patches.drivers/RDMA-hns-Modify-the-pbl-ba-page-size-for-hip08.patch b/patches.drivers/RDMA-hns-Modify-the-pbl-ba-page-size-for-hip08.patch
new file mode 100644
index 0000000000..cf9eae1d3f
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Modify-the-pbl-ba-page-size-for-hip08.patch
@@ -0,0 +1,28 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Wed, 12 Dec 2018 17:49:09 +0800
+Subject: RDMA/hns: Modify the pbl ba page size for hip08
+Patch-mainline: v5.1-rc1
+Git-commit: 91fb4d83b88a7b544ce564c44167aad29d4154f0
+References: bsc#1104427 FATE#326416 bsc#1137233
+
+Modify the pbl ba page size to 16K for in order to support 4G MR size.
+
+Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1366,7 +1366,7 @@ static int hns_roce_v2_profile(struct hn
+ caps->mpt_ba_pg_sz = 0;
+ caps->mpt_buf_pg_sz = 0;
+ caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
+- caps->pbl_ba_pg_sz = 0;
++ caps->pbl_ba_pg_sz = 2;
+ caps->pbl_buf_pg_sz = 0;
+ caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
+ caps->mtt_ba_pg_sz = 0;
diff --git a/patches.drivers/RDMA-hns-Move-spin_lock_irqsave-to-the-correct-place.patch b/patches.drivers/RDMA-hns-Move-spin_lock_irqsave-to-the-correct-place.patch
new file mode 100644
index 0000000000..3a110d3ffd
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Move-spin_lock_irqsave-to-the-correct-place.patch
@@ -0,0 +1,92 @@
+From: Lang Cheng <chenglang@huawei.com>
+Date: Fri, 24 May 2019 15:31:22 +0800
+Subject: RDMA/hns: Move spin_lock_irqsave to the correct place
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
+Git-commit: 780f33962ef27d7f27c6b47a55593c6ffd357922
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+When hip08 set gid, it will call spin_unlock_bh when send cmq. if main.ko
+call spin_lock_irqsave firstly, and the kernel is before commit
+f71b74bca637 ("irq/softirqs: Use lockdep to assert IRQs are
+disabled/enabled"), it will cause WARN_ON_ONCE because of calling
+spin_unlock_bh in disable context.
+
+In fact, the spin_lock_irqsave in main.ko is only used for hip06, and
+should be placed in hns_roce_hw_v1.c. hns_roce_hw_v2.c uses its own
+spin_unlock_bh and does not need main.ko manage spin_lock.
+
+Signed-off-by: Lang Cheng <chenglang@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 5 +++++
+ drivers/infiniband/hw/hns/hns_roce_main.c | 10 ----------
+ 2 files changed, 5 insertions(+), 10 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+@@ -1780,11 +1780,14 @@ static int hns_roce_v1_set_gid(struct hn
+ int gid_index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr)
+ {
++ unsigned long flags;
+ u32 *p = NULL;
+ u8 gid_idx = 0;
+
+ gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
+
++ spin_lock_irqsave(&hr_dev->iboe.lock, flags);
++
+ p = (u32 *)&gid->raw[0];
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
+ (HNS_ROCE_V1_GID_NUM * gid_idx));
+@@ -1801,6 +1804,8 @@ static int hns_roce_v1_set_gid(struct hn
+ roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
+ (HNS_ROCE_V1_GID_NUM * gid_idx));
+
++ spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
++
+ return 0;
+ }
+
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -78,18 +78,13 @@ static int hns_roce_add_gid(const struct
+ {
+ struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
+ u8 port = attr->port_num - 1;
+- unsigned long flags;
+ int ret;
+
+ if (port >= hr_dev->caps.num_ports)
+ return -EINVAL;
+
+- spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+-
+ ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr);
+
+- spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+-
+ return ret;
+ }
+
+@@ -98,18 +93,13 @@ static int hns_roce_del_gid(const struct
+ struct hns_roce_dev *hr_dev = to_hr_dev(attr->device);
+ struct ib_gid_attr zattr = { };
+ u8 port = attr->port_num - 1;
+- unsigned long flags;
+ int ret;
+
+ if (port >= hr_dev->caps.num_ports)
+ return -EINVAL;
+
+- spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+-
+ ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &zgid, &zattr);
+
+- spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+-
+ return ret;
+ }
+
diff --git a/patches.drivers/RDMA-hns-Only-assgin-some-fields-if-the-relatived-at.patch b/patches.drivers/RDMA-hns-Only-assgin-some-fields-if-the-relatived-at.patch
new file mode 100644
index 0000000000..1f92cbf96e
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Only-assgin-some-fields-if-the-relatived-at.patch
@@ -0,0 +1,248 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:24 +0800
+Subject: RDMA/hns: Only assgin some fields if the relatived attr_mask is set
+Patch-mainline: v5.2-rc1
+Git-commit: 5b01b243b0b3725b4460e8924e1f105bb4038969
+References: bsc#1104427 FATE#326416
+
+According to IB protocol, some fields of qp context are filled with
+optional when the relatived attr_mask are set. The relatived attr_mask
+include IB_QP_TIMEOUT, IB_QP_RETRY_CNT, IB_QP_RNR_RETRY and
+IB_QP_MIN_RNR_TIMER. Besides, we move some assignments of the fields of
+qp context into the outside of the specific qp state jump function.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 152 +++++++++++++++--------------
+ 1 file changed, 81 insertions(+), 71 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3176,12 +3176,6 @@ static void modify_qp_reset_to_init(stru
+ roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
+ roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
+
+- if (attr_mask & IB_QP_QKEY) {
+- context->qkey_xrcd = attr->qkey;
+- qpc_mask->qkey_xrcd = 0;
+- hr_qp->qkey = attr->qkey;
+- }
+-
+ if (hr_qp->rdb_en) {
+ roce_set_bit(context->byte_68_rq_db,
+ V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
+@@ -3393,7 +3387,6 @@ static void modify_qp_reset_to_init(stru
+ 0);
+
+ hr_qp->access_flags = attr->qp_access_flags;
+- hr_qp->pkey_index = attr->pkey_index;
+ roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
+ V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
+ roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
+@@ -3517,11 +3510,6 @@ static void modify_qp_init_to_init(struc
+ V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
+ }
+
+- if (attr_mask & IB_QP_QKEY) {
+- context->qkey_xrcd = attr->qkey;
+- qpc_mask->qkey_xrcd = 0;
+- }
+-
+ roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
+ V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
+ roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
+@@ -3641,13 +3629,6 @@ static int modify_qp_init_to_rtr(struct
+ V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
+ V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
+
+- roce_set_field(context->byte_80_rnr_rx_cqn,
+- V2_QPC_BYTE_80_MIN_RNR_TIME_M,
+- V2_QPC_BYTE_80_MIN_RNR_TIME_S, attr->min_rnr_timer);
+- roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
+- V2_QPC_BYTE_80_MIN_RNR_TIME_M,
+- V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
+-
+ page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
+ context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size]
+ >> PAGE_ADDR_SHIFT);
+@@ -3713,15 +3694,6 @@ static int modify_qp_init_to_rtr(struct
+ roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
+ }
+
+- if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
+- attr->max_dest_rd_atomic) {
+- roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+- V2_QPC_BYTE_140_RR_MAX_S,
+- fls(attr->max_dest_rd_atomic - 1));
+- roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
+- V2_QPC_BYTE_140_RR_MAX_S, 0);
+- }
+-
+ if (attr_mask & IB_QP_DEST_QPN) {
+ roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
+ V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
+@@ -3902,57 +3874,14 @@ static int modify_qp_rtr_to_rts(struct i
+ roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
+ V2_QPC_BYTE_212_CHECK_FLG_S, 0);
+
+- roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
+- V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
+- roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M,
+- V2_QPC_BYTE_212_RETRY_CNT_S, 0);
+-
+- roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
+- V2_QPC_BYTE_212_RETRY_NUM_INIT_S, attr->retry_cnt);
+- roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
+- V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
+-
+- roce_set_field(context->byte_244_rnr_rxack,
+- V2_QPC_BYTE_244_RNR_NUM_INIT_M,
+- V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
+- roce_set_field(qpc_mask->byte_244_rnr_rxack,
+- V2_QPC_BYTE_244_RNR_NUM_INIT_M,
+- V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
+-
+- roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
+- V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
+- roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M,
+- V2_QPC_BYTE_244_RNR_CNT_S, 0);
+-
+ roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
+ V2_QPC_BYTE_212_LSN_S, 0x100);
+ roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
+ V2_QPC_BYTE_212_LSN_S, 0);
+
+- if (attr_mask & IB_QP_TIMEOUT) {
+- if (attr->timeout < 31) {
+- roce_set_field(context->byte_28_at_fl,
+- V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
+- attr->timeout);
+- roce_set_field(qpc_mask->byte_28_at_fl,
+- V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
+- 0);
+- } else {
+- dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
+- }
+- }
+-
+ roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
+ V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
+
+- if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
+- roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+- V2_QPC_BYTE_208_SR_MAX_S,
+- fls(attr->max_rd_atomic - 1));
+- roce_set_field(qpc_mask->byte_208_irrl,
+- V2_QPC_BYTE_208_SR_MAX_M,
+- V2_QPC_BYTE_208_SR_MAX_S, 0);
+- }
+ return 0;
+ }
+
+@@ -4146,6 +4075,53 @@ static int hns_roce_v2_modify_qp(struct
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+ }
+
++ if (attr_mask & IB_QP_TIMEOUT) {
++ if (attr->timeout < 31) {
++ roce_set_field(context->byte_28_at_fl,
++ V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
++ attr->timeout);
++ roce_set_field(qpc_mask->byte_28_at_fl,
++ V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
++ 0);
++ } else {
++ dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n");
++ }
++ }
++
++ if (attr_mask & IB_QP_RETRY_CNT) {
++ roce_set_field(context->byte_212_lsn,
++ V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
++ V2_QPC_BYTE_212_RETRY_NUM_INIT_S,
++ attr->retry_cnt);
++ roce_set_field(qpc_mask->byte_212_lsn,
++ V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
++ V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
++
++ roce_set_field(context->byte_212_lsn,
++ V2_QPC_BYTE_212_RETRY_CNT_M,
++ V2_QPC_BYTE_212_RETRY_CNT_S,
++ attr->retry_cnt);
++ roce_set_field(qpc_mask->byte_212_lsn,
++ V2_QPC_BYTE_212_RETRY_CNT_M,
++ V2_QPC_BYTE_212_RETRY_CNT_S, 0);
++ }
++
++ if (attr_mask & IB_QP_RNR_RETRY) {
++ roce_set_field(context->byte_244_rnr_rxack,
++ V2_QPC_BYTE_244_RNR_NUM_INIT_M,
++ V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
++ roce_set_field(qpc_mask->byte_244_rnr_rxack,
++ V2_QPC_BYTE_244_RNR_NUM_INIT_M,
++ V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
++
++ roce_set_field(context->byte_244_rnr_rxack,
++ V2_QPC_BYTE_244_RNR_CNT_M,
++ V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
++ roce_set_field(qpc_mask->byte_244_rnr_rxack,
++ V2_QPC_BYTE_244_RNR_CNT_M,
++ V2_QPC_BYTE_244_RNR_CNT_S, 0);
++ }
++
+ if (attr_mask & IB_QP_SQ_PSN) {
+ roce_set_field(context->byte_172_sq_psn,
+ V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+@@ -4192,9 +4168,37 @@ static int hns_roce_v2_modify_qp(struct
+ V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
+ }
+
++ if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
++ attr->max_dest_rd_atomic) {
++ roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
++ V2_QPC_BYTE_140_RR_MAX_S,
++ fls(attr->max_dest_rd_atomic - 1));
++ roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
++ V2_QPC_BYTE_140_RR_MAX_S, 0);
++ }
++
++ if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
++ roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
++ V2_QPC_BYTE_208_SR_MAX_S,
++ fls(attr->max_rd_atomic - 1));
++ roce_set_field(qpc_mask->byte_208_irrl,
++ V2_QPC_BYTE_208_SR_MAX_M,
++ V2_QPC_BYTE_208_SR_MAX_S, 0);
++ }
++
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
+ set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
+
++ if (attr_mask & IB_QP_MIN_RNR_TIMER) {
++ roce_set_field(context->byte_80_rnr_rx_cqn,
++ V2_QPC_BYTE_80_MIN_RNR_TIME_M,
++ V2_QPC_BYTE_80_MIN_RNR_TIME_S,
++ attr->min_rnr_timer);
++ roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
++ V2_QPC_BYTE_80_MIN_RNR_TIME_M,
++ V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
++ }
++
+ /* RC&UC required attr */
+ if (attr_mask & IB_QP_RQ_PSN) {
+ roce_set_field(context->byte_108_rx_reqepsn,
+@@ -4211,6 +4215,12 @@ static int hns_roce_v2_modify_qp(struct
+ V2_QPC_BYTE_152_RAQ_PSN_S, 0);
+ }
+
++ if (attr_mask & IB_QP_QKEY) {
++ context->qkey_xrcd = attr->qkey;
++ qpc_mask->qkey_xrcd = 0;
++ hr_qp->qkey = attr->qkey;
++ }
++
+ roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
+ ibqp->srq ? 1 : 0);
+ roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
diff --git a/patches.drivers/RDMA-hns-Only-assign-the-fields-of-the-rq-psn-if-IB_.patch b/patches.drivers/RDMA-hns-Only-assign-the-fields-of-the-rq-psn-if-IB_.patch
new file mode 100644
index 0000000000..ae6f922422
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Only-assign-the-fields-of-the-rq-psn-if-IB_.patch
@@ -0,0 +1,68 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:22 +0800
+Subject: RDMA/hns: Only assign the fields of the rq psn if IB_QP_RQ_PSN is set
+Patch-mainline: v5.2-rc1
+Git-commit: 601f3e6d067c4399953dc7ede8f4c5448f91b02a
+References: bsc#1104427 FATE#326416
+
+Only when the IB_QP_RQ_PSN flags of attr_mask is set is it valid to assign
+the relatived fields of rq'psn into the qp context when modified qp.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 28 ++++++++++++++++------------
+ 1 file changed, 16 insertions(+), 12 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3675,13 +3675,6 @@ static int modify_qp_init_to_rtr(struct
+ V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
+ V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
+
+- roce_set_field(context->byte_108_rx_reqepsn,
+- V2_QPC_BYTE_108_RX_REQ_EPSN_M,
+- V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
+- roce_set_field(qpc_mask->byte_108_rx_reqepsn,
+- V2_QPC_BYTE_108_RX_REQ_EPSN_M,
+- V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
+-
+ roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
+ V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
+ roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
+@@ -3789,11 +3782,6 @@ static int modify_qp_init_to_rtr(struct
+ context->rq_rnr_timer = 0;
+ qpc_mask->rq_rnr_timer = 0;
+
+- roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
+- V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
+- roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
+- V2_QPC_BYTE_152_RAQ_PSN_S, 0);
+-
+ roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
+ V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
+ roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
+@@ -4207,6 +4195,22 @@ static int hns_roce_v2_modify_qp(struct
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
+ set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
+
++ /* RC&UC required attr */
++ if (attr_mask & IB_QP_RQ_PSN) {
++ roce_set_field(context->byte_108_rx_reqepsn,
++ V2_QPC_BYTE_108_RX_REQ_EPSN_M,
++ V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
++ roce_set_field(qpc_mask->byte_108_rx_reqepsn,
++ V2_QPC_BYTE_108_RX_REQ_EPSN_M,
++ V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
++
++ roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
++ V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
++ roce_set_field(qpc_mask->byte_152_raq,
++ V2_QPC_BYTE_152_RAQ_PSN_M,
++ V2_QPC_BYTE_152_RAQ_PSN_S, 0);
++ }
++
+ roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
+ ibqp->srq ? 1 : 0);
+ roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
diff --git a/patches.drivers/RDMA-hns-Only-assign-the-relatived-fields-of-psn-if-.patch b/patches.drivers/RDMA-hns-Only-assign-the-relatived-fields-of-psn-if-.patch
new file mode 100644
index 0000000000..3dfafb3c11
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Only-assign-the-relatived-fields-of-psn-if-.patch
@@ -0,0 +1,133 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:21 +0800
+Subject: RDMA/hns: Only assign the relatived fields of psn if IB_QP_SQ_PSN is
+ set
+Patch-mainline: v5.2-rc1
+Git-commit: f04cc17878b47bfa47af2e50f481d7f6eaaf3ca7
+References: bsc#1104427 FATE#326416
+
+Only when the IB_QP_SQ_PSN flags of attr_mask is set is it valid to assign
+the relatived fields of psn into the qp context when modified qp.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 83 ++++++++++++++++-------------
+ 1 file changed, 46 insertions(+), 37 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3891,13 +3891,6 @@ static int modify_qp_rtr_to_rts(struct i
+ V2_QPC_BYTE_240_RX_ACK_MSN_M,
+ V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
+
+- roce_set_field(context->byte_244_rnr_rxack,
+- V2_QPC_BYTE_244_RX_ACK_EPSN_M,
+- V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
+- roce_set_field(qpc_mask->byte_244_rnr_rxack,
+- V2_QPC_BYTE_244_RX_ACK_EPSN_M,
+- V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
+-
+ roce_set_field(qpc_mask->byte_248_ack_psn,
+ V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
+ V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
+@@ -3911,27 +3904,6 @@ static int modify_qp_rtr_to_rts(struct i
+ V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
+ V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
+
+- roce_set_field(context->byte_220_retry_psn_msn,
+- V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
+- V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
+- roce_set_field(qpc_mask->byte_220_retry_psn_msn,
+- V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
+- V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
+-
+- roce_set_field(context->byte_224_retry_msg,
+- V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
+- V2_QPC_BYTE_224_RETRY_MSG_PSN_S, attr->sq_psn >> 16);
+- roce_set_field(qpc_mask->byte_224_retry_msg,
+- V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
+- V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
+-
+- roce_set_field(context->byte_224_retry_msg,
+- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
+- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, attr->sq_psn);
+- roce_set_field(qpc_mask->byte_224_retry_msg,
+- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
+- V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
+-
+ roce_set_field(qpc_mask->byte_220_retry_psn_msn,
+ V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
+ V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
+@@ -3982,17 +3954,8 @@ static int modify_qp_rtr_to_rts(struct i
+ }
+ }
+
+- roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+- V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
+- roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M,
+- V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
+-
+ roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
+ V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
+- roce_set_field(context->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
+- V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
+- roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M,
+- V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
+
+ if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
+ roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
+@@ -4195,6 +4158,52 @@ static int hns_roce_v2_modify_qp(struct
+ hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
+ }
+
++ if (attr_mask & IB_QP_SQ_PSN) {
++ roce_set_field(context->byte_172_sq_psn,
++ V2_QPC_BYTE_172_SQ_CUR_PSN_M,
++ V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
++ roce_set_field(qpc_mask->byte_172_sq_psn,
++ V2_QPC_BYTE_172_SQ_CUR_PSN_M,
++ V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
++
++ roce_set_field(context->byte_196_sq_psn,
++ V2_QPC_BYTE_196_SQ_MAX_PSN_M,
++ V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
++ roce_set_field(qpc_mask->byte_196_sq_psn,
++ V2_QPC_BYTE_196_SQ_MAX_PSN_M,
++ V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
++
++ roce_set_field(context->byte_220_retry_psn_msn,
++ V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
++ V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
++ roce_set_field(qpc_mask->byte_220_retry_psn_msn,
++ V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
++ V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
++
++ roce_set_field(context->byte_224_retry_msg,
++ V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
++ V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
++ attr->sq_psn >> 16);
++ roce_set_field(qpc_mask->byte_224_retry_msg,
++ V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
++ V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
++
++ roce_set_field(context->byte_224_retry_msg,
++ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
++ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S,
++ attr->sq_psn);
++ roce_set_field(qpc_mask->byte_224_retry_msg,
++ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
++ V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
++
++ roce_set_field(context->byte_244_rnr_rxack,
++ V2_QPC_BYTE_244_RX_ACK_EPSN_M,
++ V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
++ roce_set_field(qpc_mask->byte_244_rnr_rxack,
++ V2_QPC_BYTE_244_RX_ACK_EPSN_M,
++ V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
++ }
++
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
+ set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
+
diff --git a/patches.drivers/RDMA-hns-RDMA-hns-Assign-rq-head-pointer-when-enable.patch b/patches.drivers/RDMA-hns-RDMA-hns-Assign-rq-head-pointer-when-enable.patch
new file mode 100644
index 0000000000..87a93b911f
--- /dev/null
+++ b/patches.drivers/RDMA-hns-RDMA-hns-Assign-rq-head-pointer-when-enable.patch
@@ -0,0 +1,62 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 12 Jan 2019 18:36:29 +0800
+Subject: RDMA/hns: RDMA/hns: Assign rq head pointer when enable rq record db
+Patch-mainline: v5.1-rc1
+Git-commit: de77503a59403e7045c18c6bb0a10c245a99b648
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+When flush cqe, it needs to get the pointer of rq and sq from db address
+space of user and update it into qp context by modified qp. if rq does not
+exist, it will not get the value from db address space of user.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 19 ++++++++++---------
+ 1 file changed, 10 insertions(+), 9 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -676,6 +676,10 @@ static int hns_roce_create_qp_common(str
+ dev_err(dev, "rq record doorbell map failed!\n");
+ goto err_sq_dbmap;
+ }
++
++ /* indicate kernel supports rq record db */
++ resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
++ hr_qp->rdb_en = 1;
+ }
+ } else {
+ if (init_attr->create_flags &
+@@ -784,16 +788,11 @@ static int hns_roce_create_qp_common(str
+ else
+ hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn);
+
+- if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) &&
+- (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) {
+-
+- /* indicate kernel supports rq record db */
+- resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB;
+- ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
++ if (udata) {
++ ret = ib_copy_to_udata(udata, &resp,
++ min(udata->outlen, sizeof(resp)));
+ if (ret)
+ goto err_qp;
+-
+- hr_qp->rdb_en = 1;
+ }
+ hr_qp->event = hns_roce_ib_qp_event;
+
+@@ -970,7 +969,9 @@ int hns_roce_modify_qp(struct ib_qp *ibq
+ (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) {
+ if (hr_qp->sdb_en == 1) {
+ hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
+- hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
++
++ if (hr_qp->rdb_en == 1)
++ hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
+ } else {
+ dev_warn(dev, "flush cqe is not supported in userspace!\n");
+ goto out;
diff --git a/patches.drivers/RDMA-hns-Remove-jiffies-operation-in-disable-interru.patch b/patches.drivers/RDMA-hns-Remove-jiffies-operation-in-disable-interru.patch
new file mode 100644
index 0000000000..741207d5f6
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Remove-jiffies-operation-in-disable-interru.patch
@@ -0,0 +1,124 @@
+From: Lang Cheng <chenglang@huawei.com>
+Date: Fri, 24 May 2019 15:31:23 +0800
+Subject: RDMA/hns: Remove jiffies operation in disable interrupt context
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
+Git-commit: 669cefb654cb69b280e31380f5fc7e3b5755b0cd
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+In some functions, the jiffies operation is unnecessary, and we can
+control delay using mdelay and udelay functions only. Especially, in
+hns_roce_v1_clear_hem, the function calls spin_lock_irqsave, the context
+disables interrupt, so we can not use jiffies and msleep functions.
+
+Signed-off-by: Lang Cheng <chenglang@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hem.c | 21 +++++++++++----------
+ drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 19 ++++++++++---------
+ 2 files changed, 21 insertions(+), 19 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -376,18 +376,19 @@ static int hns_roce_set_hem(struct hns_r
+
+ bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
+
+- end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
+- while (1) {
+- if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
+- if (!(time_before(jiffies, end))) {
+- dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
+- spin_unlock_irqrestore(lock, flags);
+- return -EBUSY;
+- }
+- } else {
++ end = HW_SYNC_TIMEOUT_MSECS;
++ while (end) {
++ if (!readl(bt_cmd) >> BT_CMD_SYNC_SHIFT)
+ break;
+- }
++
+ mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
++ end -= HW_SYNC_SLEEP_TIME_INTERVAL;
++ }
++
++ if (end <= 0) {
++ dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
++ spin_unlock_irqrestore(lock, flags);
++ return -EBUSY;
+ }
+
+ bt_cmd_l = (u32)bt_ba;
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+@@ -960,8 +960,7 @@ static int hns_roce_v1_recreate_lp_qp(st
+ struct hns_roce_free_mr *free_mr;
+ struct hns_roce_v1_priv *priv;
+ struct completion comp;
+- unsigned long end =
+- msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies;
++ unsigned long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
+
+ priv = (struct hns_roce_v1_priv *)hr_dev->priv;
+ free_mr = &priv->free_mr;
+@@ -981,10 +980,11 @@ static int hns_roce_v1_recreate_lp_qp(st
+
+ queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
+
+- while (time_before_eq(jiffies, end)) {
++ while (end) {
+ if (try_wait_for_completion(&comp))
+ return 0;
+ msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
++ end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE;
+ }
+
+ lp_qp_work->comp_flag = 0;
+@@ -1098,8 +1098,7 @@ static int hns_roce_v1_dereg_mr(struct h
+ struct hns_roce_free_mr *free_mr;
+ struct hns_roce_v1_priv *priv;
+ struct completion comp;
+- unsigned long end =
+- msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
++ unsigned long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
+ unsigned long start = jiffies;
+ int npages;
+ int ret = 0;
+@@ -1129,10 +1128,11 @@ static int hns_roce_v1_dereg_mr(struct h
+
+ queue_work(free_mr->free_mr_wq, &(mr_work->work));
+
+- while (time_before_eq(jiffies, end)) {
++ while (end) {
+ if (try_wait_for_completion(&comp))
+ goto free_mr;
+ msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
++ end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE;
+ }
+
+ mr_work->comp_flag = 0;
+@@ -2502,10 +2502,10 @@ static int hns_roce_v1_clear_hem(struct
+
+ bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
+
+- end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
++ end = HW_SYNC_TIMEOUT_MSECS;
+ while (1) {
+ if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
+- if (!(time_before(jiffies, end))) {
++ if (end < 0) {
+ dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
+ spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
+ flags);
+@@ -2514,7 +2514,8 @@ static int hns_roce_v1_clear_hem(struct
+ } else {
+ break;
+ }
+- msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
++ mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
++ end -= HW_SYNC_SLEEP_TIME_INTERVAL;
+ }
+
+ bt_cmd_val[0] = (__le32)bt_ba;
diff --git a/patches.drivers/RDMA-hns-Remove-set-but-not-used-variable-rst.patch b/patches.drivers/RDMA-hns-Remove-set-but-not-used-variable-rst.patch
new file mode 100644
index 0000000000..02c32d2774
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Remove-set-but-not-used-variable-rst.patch
@@ -0,0 +1,41 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Thu, 31 Jan 2019 15:19:21 +0000
+Subject: RDMA/hns: Remove set but not used variable 'rst'
+Patch-mainline: v5.1-rc1
+Git-commit: da91ddfdc7212e6e716be55a5cf2305ce84a422f
+References: bsc#1104427 FATE#326416 bsc#1126206
+
+Fixes gcc '-Wunused-but-set-variable' warning:
+
+drivers/infiniband/hw/hns/hns_roce_hw_v2.c: In function 'hns_roce_v2_qp_flow_control_init':
+drivers/infiniband/hw/hns/hns_roce_hw_v2.c:4384:33: warning:
+ variable 'rst' set but not used [-Wunused-but-set-variable]
+
+It never used since introduction.
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -4381,7 +4381,7 @@ static int hns_roce_v2_destroy_qp(struct
+ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+ {
+- struct hns_roce_sccc_clr_done *rst, *resp;
++ struct hns_roce_sccc_clr_done *resp;
+ struct hns_roce_sccc_clr *clr;
+ struct hns_roce_cmq_desc desc;
+ int ret, i;
+@@ -4390,7 +4390,6 @@ static int hns_roce_v2_qp_flow_control_i
+
+ /* set scc ctx clear done flag */
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
+- rst = (struct hns_roce_sccc_clr_done *)desc.data;
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret) {
+ dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret);
diff --git a/patches.drivers/RDMA-hns-Set-allocated-memory-to-zero-for-wrid.patch b/patches.drivers/RDMA-hns-Set-allocated-memory-to-zero-for-wrid.patch
new file mode 100644
index 0000000000..5141ca4e8d
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Set-allocated-memory-to-zero-for-wrid.patch
@@ -0,0 +1,33 @@
+From: Yixian Liu <liuyixian@huawei.com>
+Date: Sun, 3 Feb 2019 16:13:05 +0800
+Subject: RDMA/hns: Set allocated memory to zero for wrid
+Patch-mainline: v5.1-rc1
+Git-commit: f7f27a5f03cc9f47cc14f75a5be25f0f26b1b5ff
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+The memory allocated for wrid should be initialized to zero.
+
+Signed-off-by: Yixian Liu <liuyixian@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_qp.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -764,10 +764,10 @@ static int hns_roce_create_qp_common(str
+ goto err_mtt;
+ }
+
+- hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64),
+- GFP_KERNEL);
+- hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64),
+- GFP_KERNEL);
++ hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64),
++ GFP_KERNEL);
++ hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64),
++ GFP_KERNEL);
+ if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) {
+ ret = -ENOMEM;
+ goto err_wrid;
diff --git a/patches.drivers/RDMA-hns-Support-to-create-1M-srq-queue.patch b/patches.drivers/RDMA-hns-Support-to-create-1M-srq-queue.patch
new file mode 100644
index 0000000000..406090e514
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Support-to-create-1M-srq-queue.patch
@@ -0,0 +1,29 @@
+From: chenglang <chenglang@huawei.com>
+Date: Sun, 7 Apr 2019 13:23:37 +0800
+Subject: RDMA/hns: Support to create 1M srq queue
+Patch-mainline: v5.2-rc1
+Git-commit: 2b277dae0679c8177f161278dbad035688838d6e
+References: bsc#1104427 FATE#326416
+
+In mhop 0 mode, 64*bt_num queues can be supported.
+In mhop 1 mode, 32K*bt_num queues can be supported.
+Config srqc_hop_num to 1 to support 1M SRQ queues.
+
+Signed-off-by: chenglang <chenglang@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1559,7 +1559,7 @@ static int hns_roce_v2_profile(struct hn
+ caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
+ caps->srqc_ba_pg_sz = 0;
+ caps->srqc_buf_pg_sz = 0;
+- caps->srqc_hop_num = HNS_ROCE_HOP_NUM_0;
++ caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
+ caps->cqc_ba_pg_sz = 0;
+ caps->cqc_buf_pg_sz = 0;
+ caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
diff --git a/patches.drivers/RDMA-hns-Update-CQE-specifications.patch b/patches.drivers/RDMA-hns-Update-CQE-specifications.patch
new file mode 100644
index 0000000000..8ce6152688
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Update-CQE-specifications.patch
@@ -0,0 +1,29 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Fri, 24 May 2019 15:31:21 +0800
+Subject: RDMA/hns: Update CQE specifications
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
+Git-commit: 0502849d0bb133b492eed24fd270441e652c84cc
+References: bsc#1104427 FATE#326416 bsc#1137236
+
+According to hip08 UM, the maximum number of CQEs supported by each CQ is
+4M.
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -54,7 +54,7 @@
+ #define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
+ #define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
+ #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
+-#define HNS_ROCE_V2_MAX_CQE_NUM 0x10000
++#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
+ #define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000
+ #define HNS_ROCE_V2_MAX_RQ_SGE_NUM 0x100
+ #define HNS_ROCE_V2_MAX_SQ_SGE_NUM 0xff
diff --git a/patches.drivers/RDMA-hns-Update-the-range-of-raq_psn-field-of-qp-con.patch b/patches.drivers/RDMA-hns-Update-the-range-of-raq_psn-field-of-qp-con.patch
new file mode 100644
index 0000000000..cfa8610d7d
--- /dev/null
+++ b/patches.drivers/RDMA-hns-Update-the-range-of-raq_psn-field-of-qp-con.patch
@@ -0,0 +1,29 @@
+From: Lijun Ou <oulijun@huawei.com>
+Date: Sat, 23 Feb 2019 20:01:23 +0800
+Subject: RDMA/hns: Update the range of raq_psn field of qp context
+Patch-mainline: v5.2-rc1
+Git-commit: 834fa8cf6f7002706b02873fc0d16f9b06ef4819
+References: bsc#1104427 FATE#326416
+
+According to hip08 UM(User Manual), the raq_psn field size is [23:0].
+
+Signed-off-by: Lijun Ou <oulijun@huawei.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -719,8 +719,8 @@ struct hns_roce_v2_qp_context {
+ #define V2_QPC_BYTE_148_RAQ_SYNDROME_S 24
+ #define V2_QPC_BYTE_148_RAQ_SYNDROME_M GENMASK(31, 24)
+
+-#define V2_QPC_BYTE_152_RAQ_PSN_S 8
+-#define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(31, 8)
++#define V2_QPC_BYTE_152_RAQ_PSN_S 0
++#define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(23, 0)
+
+ #define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S 24
+ #define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M GENMASK(31, 24)
diff --git a/patches.drivers/RDMA-qedr-Fix-incorrect-device-rate.patch b/patches.drivers/RDMA-qedr-Fix-incorrect-device-rate.patch
new file mode 100644
index 0000000000..fa9bb4ed91
--- /dev/null
+++ b/patches.drivers/RDMA-qedr-Fix-incorrect-device-rate.patch
@@ -0,0 +1,89 @@
+From: Sagiv Ozeri <sagiv.ozeri@marvell.com>
+Date: Mon, 20 May 2019 12:33:20 +0300
+Subject: RDMA/qedr: Fix incorrect device rate.
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma.git
+Git-commit: 69054666df0a9b4e8331319f98b6b9a88bc3fcc4
+References: bsc#1136188
+
+Use the correct enum value introduced in commit 12113a35ada6 ("IB/core:
+Add HDR speed enum") Prior to this change a 50Gbps port would show 40Gbps.
+
+This patch also cleaned up the redundant redefiniton of ib speeds for
+qedr.
+
+Fixes: 12113a35ada6 ("IB/core: Add HDR speed enum")
+Signed-off-by: Sagiv Ozeri <sagiv.ozeri@marvell.com>
+Signed-off-by: Michal Kalderon <michal.kalderon@marvell.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/qedr/verbs.c | 25 +++++++++----------------
+ 1 file changed, 9 insertions(+), 16 deletions(-)
+
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -158,54 +158,47 @@ int qedr_query_device(struct ib_device *
+ return 0;
+ }
+
+-#define QEDR_SPEED_SDR (1)
+-#define QEDR_SPEED_DDR (2)
+-#define QEDR_SPEED_QDR (4)
+-#define QEDR_SPEED_FDR10 (8)
+-#define QEDR_SPEED_FDR (16)
+-#define QEDR_SPEED_EDR (32)
+-
+ static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
+ u8 *ib_width)
+ {
+ switch (speed) {
+ case 1000:
+- *ib_speed = QEDR_SPEED_SDR;
++ *ib_speed = IB_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+ case 10000:
+- *ib_speed = QEDR_SPEED_QDR;
++ *ib_speed = IB_SPEED_QDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 20000:
+- *ib_speed = QEDR_SPEED_DDR;
++ *ib_speed = IB_SPEED_DDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 25000:
+- *ib_speed = QEDR_SPEED_EDR;
++ *ib_speed = IB_SPEED_EDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 40000:
+- *ib_speed = QEDR_SPEED_QDR;
++ *ib_speed = IB_SPEED_QDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 50000:
+- *ib_speed = QEDR_SPEED_QDR;
+- *ib_width = IB_WIDTH_4X;
++ *ib_speed = IB_SPEED_HDR;
++ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 100000:
+- *ib_speed = QEDR_SPEED_EDR;
++ *ib_speed = IB_SPEED_EDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ default:
+ /* Unsupported */
+- *ib_speed = QEDR_SPEED_SDR;
++ *ib_speed = IB_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ }
+ }
diff --git a/patches.drivers/net-hns3-dump-more-information-when-tx-timeout-happe.patch b/patches.drivers/net-hns3-dump-more-information-when-tx-timeout-happe.patch
new file mode 100644
index 0000000000..3d449d076b
--- /dev/null
+++ b/patches.drivers/net-hns3-dump-more-information-when-tx-timeout-happe.patch
@@ -0,0 +1,166 @@
+From: Jian Shen <shenjian15@huawei.com>
+Date: Fri, 19 Apr 2019 11:05:42 +0800
+Subject: net: hns3: dump more information when tx timeout happens
+Patch-mainline: v5.2-rc1
+Git-commit: e511c97d0a26454dc2b4b478a7fd90802fca0b6a
+References: bsc#1104353 FATE#326415 bsc#1134990
+
+Currently we just print few information when tx timeout happens.
+In order to find out the cause of timeout, this patch prints more
+information about the packet statistics, tqp registers and
+napi state.
+
+Signed-off-by: Jian Shen <shenjian15@huawei.com>
+Signed-off-by: Peng Li <lipeng321@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hnae3.h | 3
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.c | 63 ++++++++++++++--
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 4 -
+ drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 11 ++
+ 4 files changed, 72 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+@@ -392,7 +392,8 @@ struct hnae3_ae_ops {
+ void (*update_stats)(struct hnae3_handle *handle,
+ struct net_device_stats *net_stats);
+ void (*get_stats)(struct hnae3_handle *handle, u64 *data);
+-
++ void (*get_mac_pause_stats)(struct hnae3_handle *handle, u64 *tx_cnt,
++ u64 *rx_cnt);
+ void (*get_strings)(struct hnae3_handle *handle,
+ u32 stringset, u8 *data);
+ int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -1628,9 +1628,15 @@ static int hns3_nic_change_mtu(struct ne
+ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
+ {
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
++ struct hnae3_handle *h = hns3_get_handle(ndev);
+ struct hns3_enet_ring *tx_ring = NULL;
++ struct napi_struct *napi;
+ int timeout_queue = 0;
+ int hw_head, hw_tail;
++ int fbd_num, fbd_oft;
++ int ebd_num, ebd_oft;
++ int bd_num, bd_err;
++ int ring_en, tc;
+ int i;
+
+ /* Find the stopped queue the same way the stack does */
+@@ -1658,20 +1664,63 @@ static bool hns3_get_tx_timeo_queue_info
+ priv->tx_timeout_count++;
+
+ tx_ring = priv->ring_data[timeout_queue].ring;
++ napi = &tx_ring->tqp_vector->napi;
++
++ netdev_info(ndev,
++ "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
++ priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
++ tx_ring->next_to_clean, napi->state);
++
++ netdev_info(ndev,
++ "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n",
++ tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
++ tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
++
++ netdev_info(ndev,
++ "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n",
++ tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
++ tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
++
++ /* When mac received many pause frames continuous, it's unable to send
++ * packets, which may cause tx timeout
++ */
++ if (h->ae_algo->ops->update_stats &&
++ h->ae_algo->ops->get_mac_pause_stats) {
++ u64 tx_pause_cnt, rx_pause_cnt;
++
++ h->ae_algo->ops->update_stats(h, &ndev->stats);
++ h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt,
++ &rx_pause_cnt);
++ netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
++ tx_pause_cnt, rx_pause_cnt);
++ }
+
+ hw_head = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_HEAD_REG);
+ hw_tail = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_TAIL_REG);
++ fbd_num = readl_relaxed(tx_ring->tqp->io_base +
++ HNS3_RING_TX_RING_FBDNUM_REG);
++ fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
++ HNS3_RING_TX_RING_OFFSET_REG);
++ ebd_num = readl_relaxed(tx_ring->tqp->io_base +
++ HNS3_RING_TX_RING_EBDNUM_REG);
++ ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
++ HNS3_RING_TX_RING_EBD_OFFSET_REG);
++ bd_num = readl_relaxed(tx_ring->tqp->io_base +
++ HNS3_RING_TX_RING_BD_NUM_REG);
++ bd_err = readl_relaxed(tx_ring->tqp->io_base +
++ HNS3_RING_TX_RING_BD_ERR_REG);
++ ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
++ tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
++
+ netdev_info(ndev,
+- "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
+- priv->tx_timeout_count,
+- timeout_queue,
+- tx_ring->next_to_use,
+- tx_ring->next_to_clean,
+- hw_head,
+- hw_tail,
++ "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
++ bd_num, hw_head, hw_tail, bd_err,
+ readl(tx_ring->tqp_vector->mask_addr));
++ netdev_info(ndev,
++ "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
++ ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
+
+ return true;
+ }
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+@@ -42,8 +42,10 @@ enum hns3_nic_state {
+ #define HNS3_RING_TX_RING_HEAD_REG 0x0005C
+ #define HNS3_RING_TX_RING_FBDNUM_REG 0x00060
+ #define HNS3_RING_TX_RING_OFFSET_REG 0x00064
++#define HNS3_RING_TX_RING_EBDNUM_REG 0x00068
+ #define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C
+-
++#define HNS3_RING_TX_RING_EBD_OFFSET_REG 0x00070
++#define HNS3_RING_TX_RING_BD_ERR_REG 0x00074
+ #define HNS3_RING_PREFETCH_EN_REG 0x0007C
+ #define HNS3_RING_CFG_VF_NUM_REG 0x00080
+ #define HNS3_RING_ASID_REG 0x0008C
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -699,6 +699,16 @@ static void hclge_get_stats(struct hnae3
+ p = hclge_tqps_get_stats(handle, p);
+ }
+
++static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt,
++ u64 *rx_cnt)
++{
++ struct hclge_vport *vport = hclge_get_vport(handle);
++ struct hclge_dev *hdev = vport->back;
++
++ *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num;
++ *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num;
++}
++
+ static int hclge_parse_func_status(struct hclge_dev *hdev,
+ struct hclge_func_status_cmd *status)
+ {
+@@ -8532,6 +8542,7 @@ static const struct hnae3_ae_ops hclge_o
+ .set_mtu = hclge_set_mtu,
+ .reset_queue = hclge_reset_tqp,
+ .get_stats = hclge_get_stats,
++ .get_mac_pause_stats = hclge_get_mac_pause_stat,
+ .update_stats = hclge_update_stats,
+ .get_strings = hclge_get_strings,
+ .get_sset_count = hclge_get_sset_count,
diff --git a/patches.drivers/net-hns3-fix-for-HNS3_RXD_GRO_SIZE_M-macro.patch b/patches.drivers/net-hns3-fix-for-HNS3_RXD_GRO_SIZE_M-macro.patch
new file mode 100644
index 0000000000..3ac384017d
--- /dev/null
+++ b/patches.drivers/net-hns3-fix-for-HNS3_RXD_GRO_SIZE_M-macro.patch
@@ -0,0 +1,32 @@
+From: Yunsheng Lin <linyunsheng@huawei.com>
+Date: Tue, 28 May 2019 17:02:52 +0800
+Subject: net: hns3: fix for HNS3_RXD_GRO_SIZE_M macro
+Patch-mainline: Queued in subsystem maintainer repository
+Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
+Git-commit: eff858c178fcc513e620bb803b4e3bfb9727856c
+References: bsc#1104353 FATE#326415 bsc#1137201
+
+According to hardware user menual, the GRO_SIZE is 14 bits width,
+the HNS3_RXD_GRO_SIZE_M is 10 bits width now, which may cause
+hardware GRO received packet error problem.
+
+Fixes: a6d53b97a2e7 ("net: hns3: Adds GRO params to SKB for the stack")
+Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
+Signed-off-by: Huazhong Tan <tanhuazhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/hisilicon/hns3/hns3_enet.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+@@ -145,7 +145,7 @@ enum hns3_nic_state {
+ #define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
+ #define HNS3_RXD_LKBK_B 15
+ #define HNS3_RXD_GRO_SIZE_S 16
+-#define HNS3_RXD_GRO_SIZE_M (0x3ff << HNS3_RXD_GRO_SIZE_S)
++#define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S)
+
+ #define HNS3_TXD_L3T_S 0
+ #define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
diff --git a/series.conf b/series.conf
index 745fd627c3..80ff7d13e3 100644
--- a/series.conf
+++ b/series.conf
@@ -45457,18 +45457,32 @@
patches.suse/msft-hv-1851-PCI-hv-Replace-hv_vp_set-with-hv_vpset.patch
patches.suse/msft-hv-1852-PCI-hv-Refactor-hv_irq_unmask-to-use-cpumask_to_vpse.patch
patches.drivers/iw_cxgb4-Check-for-send-WR-also-while-posting-write-.patch
+ patches.drivers/RDMA-hns-Fix-the-bug-with-updating-rq-head-pointer-w.patch
+ patches.drivers/RDMA-hns-Bugfix-for-the-scene-without-receiver-queue.patch
+ patches.drivers/RDMA-hns-Add-constraint-on-the-setting-of-local-ACK-.patch
+ patches.drivers/RDMA-hns-Modify-the-pbl-ba-page-size-for-hip08.patch
patches.drivers/RDMA-qedr-Fix-out-of-bounds-index-check-in-query-pke.patch
patches.drivers/RDMA-bnxt_re-fix-a-size-calculation.patch
+ patches.drivers/RDMA-hns-RDMA-hns-Assign-rq-head-pointer-when-enable.patch
patches.drivers/IB-rdmavt-Add-wc_flags-and-wc_immdata-to-cq-entry-tr.patch
patches.drivers/IB-hw-Remove-unneeded-semicolons.patch
+ patches.drivers/RDMA-hns-Add-the-process-of-AEQ-overflow-for-hip08.patch
patches.drivers/infiniband-hfi1-drop-crazy-DEBUGFS_SEQ_FILE_CREATE-m.patch
patches.drivers/infiniband-hfi1-no-need-to-check-return-value-of-deb.patch
patches.drivers/IB-core-Declare-local-functions-static.patch
patches.drivers/IB-mlx5-Declare-local-functions-static.patch
patches.drivers/RDMA-iw_cxgb4-Drop-__GFP_NOFAIL.patch
+ patches.drivers/RDMA-hns-Add-SCC-context-allocation-support-for-hip0.patch
+ patches.drivers/RDMA-hns-Add-SCC-context-clr-support-for-hip08.patch
+ patches.drivers/RDMA-hns-Add-timer-allocation-support-for-hip08.patch
patches.drivers/IB-core-Destroy-QP-if-XRC-QP-fails.patch
patches.drivers/RDMA-vmw_pvrdma-Support-upto-64-bit-PFNs.patch
patches.drivers/IB-ipoib-Make-ipoib_intercept_dev_id_attr-static.patch
+ patches.drivers/RDMA-hns-Remove-set-but-not-used-variable-rst.patch
+ patches.drivers/RDMA-hns-Make-some-function-static.patch
+ patches.drivers/RDMA-hns-Fix-the-Oops-during-rmmod-or-insmod-ko-when.patch
+ patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-mail.patch
+ patches.drivers/RDMA-hns-Fix-the-chip-hanging-caused-by-sending-door.patch
patches.drivers/iw_cxgb-kzalloc-the-iwcm-verbs-struct.patch
patches.drivers/IB-mlx5-Do-not-use-hw_access_flags-for-be-and-CPU-da.patch
patches.drivers/RDMA-bnxt_re-Add-chip-context-to-identify-57500-seri.patch
@@ -45490,7 +45504,14 @@
patches.drivers/iw_cxgb4-fix-srqidx-leak-during-connection-abort.patch
patches.drivers/RDMA-mlx5-Fix-memory-leak-in-case-we-fail-to-add-an-.patch
patches.drivers/RDMA-bnxt_re-fix-or-ing-of-data-into-an-uninitialize.patch
+ patches.drivers/RDMA-hns-Limit-minimum-ROCE-CQ-depth-to-64.patch
+ patches.drivers/RDMA-hns-Fix-the-state-of-rereg-mr.patch
+ patches.drivers/RDMA-hns-Set-allocated-memory-to-zero-for-wrid.patch
+ patches.drivers/RDMA-hns-Delete-useful-prints-for-aeq-subtype-event.patch
+ patches.drivers/RDMA-hns-Configure-capacity-of-hns-device.patch
patches.drivers/RDMA-cxgb4-Remove-kref-accounting-for-sync-operation.patch
+ patches.drivers/RDMA-hns-Modify-qp-cq-pd-specification-according-to-.patch
+ patches.drivers/RDMA-hns-Bugfix-for-set-hem-of-SCC.patch
patches.drivers/iw_cxgb4-Make-function-read_tcb-static.patch
patches.drivers/RDMA-iwcm-Fix-string-truncation-error.patch
patches.drivers/IB-rdmavt-Fix-loopback-send-with-invalidate-ordering.patch
@@ -45984,6 +46005,7 @@
patches.drivers/IB-hfi1-Eliminate-opcode-tests-on-mr-deref.patch
patches.drivers/IB-hfi1-Fix-the-allocation-of-RSM-table.patch
patches.drivers/RDMA-hns-Fix-bug-that-caused-srq-creation-to-fail.patch
+ patches.drivers/RDMA-hns-Bugfix-for-SCC-hem-free.patch
patches.suse/btrfs-do-not-allow-trimming-when-a-fs-is-mounted-wit.patch
patches.drm/0001-drm-udl-add-a-release-method-and-delay-modeset-teard.patch
patches.drm/0003-drm-mediatek-Fix-an-error-code-in-mtk_hdmi_dt_parse_.patch
@@ -46132,6 +46154,7 @@
patches.drivers/Input-synaptics-rmi4-write-config-register-values-to.patch
patches.drivers/dmaengine-sh-rcar-dmac-With-cyclic-DMA-residue-0-is-.patch
patches.drivers/IB-rdmavt-Fix-frwr-memory-registration.patch
+ patches.drivers/RDMA-hns-Bugfix-for-mapping-user-db.patch
patches.fixes/selinux-use-kernel-linux-socket.h-for-genheaders-and-mdp
patches.drivers/USB-core-Fix-unterminated-string-returned-by-usb_str.patch
patches.drivers/USB-core-Fix-bug-caused-by-duplicate-interface-PM-us.patch
@@ -46396,6 +46419,7 @@
patches.drivers/net-hns3-add-some-debug-info-for-hclgevf_get_mbx_res.patch
patches.drivers/net-hns3-refine-tx-timeout-count-handle.patch
patches.drivers/net-hns3-fix-loop-condition-of-hns3_get_tx_timeo_que.patch
+ patches.drivers/net-hns3-dump-more-information-when-tx-timeout-happe.patch
patches.drivers/net-hns3-Add-support-for-netif-message-level-setting.patch
patches.drivers/net-hns3-add-support-for-dump-ncl-config-by-debugfs.patch
patches.drivers/net-hns3-Add-handling-of-MAC-tunnel-interruption.patch
@@ -46604,13 +46628,23 @@
patches.drivers/ALSA-hda-realtek-Support-low-power-consumption-ALC256.patch
patches.drivers/dmaengine-axi-dmac-Don-t-check-the-number-of-frames-.patch
patches.drivers/dmaengine-tegra210-dma-free-dma-controller-in-remove.patch
+ patches.drivers/RDMA-hns-Only-assign-the-relatived-fields-of-psn-if-.patch
+ patches.drivers/RDMA-hns-Only-assign-the-fields-of-the-rq-psn-if-IB_.patch
+ patches.drivers/RDMA-hns-Update-the-range-of-raq_psn-field-of-qp-con.patch
+ patches.drivers/RDMA-hns-Only-assgin-some-fields-if-the-relatived-at.patch
+ patches.drivers/RDMA-hns-Hide-error-print-information-with-roce-vf-d.patch
+ patches.drivers/RDMA-hns-Bugfix-for-sending-with-invalidate.patch
+ patches.drivers/RDMA-hns-Delete-unused-variable-in-hns_roce_v2_modif.patch
patches.drivers/RDMA-i40iw-Handle-workqueue-allocation-failure.patch
patches.fixes/overflow-Fix-Wtype-limits-compilation-warnings.patch
+ patches.drivers/RDMA-hns-Limit-scope-of-hns_roce_cmq_send.patch
patches.drivers/IB-hfi1-Fix-two-format-strings.patch
patches.drivers/RDMA-cxbg-Use-correct-sizing-on-buffers-holding-page.patch
patches.drivers/RDMA-rdmavt-Use-correct-sizing-on-buffers-holding-pa.patch
+ patches.drivers/RDMA-hns-Fix-bad-endianess-of-port_pd-variable.patch
patches.drivers/IB-hfi1-Remove-WARN_ON-when-freeing-expected-receive.patch
patches.drivers/RDMA-iw_cxgb4-Always-disconnect-when-QP-is-transitio.patch
+ patches.drivers/RDMA-hns-Support-to-create-1M-srq-queue.patch
patches.drivers/RDMA-cxgb4-Fix-null-pointer-dereference-on-alloc_skb.patch
patches.drivers/RDMA-cxgb4-Fix-spelling-mistake-immedate-immediate.patch
patches.drivers/IB-hfi1-Add-debugfs-to-control-expansion-ROM-write-p.patch
@@ -46760,12 +46794,20 @@
patches.drivers/qed-Set-the-doorbell-address-correctly.patch
patches.drivers/qed-Add-qed-devlink-parameters-table.patch
patches.drivers/qed-Add-iWARP-100g-support.patch
+ patches.drivers/net-hns3-fix-for-HNS3_RXD_GRO_SIZE_M-macro.patch
patches.drivers/qed-Reduce-the-severity-of-ptp-debug-message.patch
patches.drivers/qede-Handle-infinite-driver-spinning-for-Tx-timestam.patch
patches.drivers/qed-fix-spelling-mistake-inculde-include.patch
patches.drivers/qed-Fix-static-checker-warning-8e2ea3ea.patch
patches.drivers/qed-remove-redundant-assignment-to-rc.patch
+ # rdma/rdma for-next
+ patches.drivers/RDMA-qedr-Fix-incorrect-device-rate.patch
+ patches.drivers/RDMA-hns-Update-CQE-specifications.patch
+ patches.drivers/RDMA-hns-Move-spin_lock_irqsave-to-the-correct-place.patch
+ patches.drivers/RDMA-hns-Remove-jiffies-operation-in-disable-interru.patch
+ patches.drivers/RDMA-hns-Bugfix-for-posting-multiple-srq-work-reques.patch
+
# dhowells/linux-fs keys-uefi
patches.suse/0001-KEYS-Allow-unrestricted-boot-time-addition-of-keys-t.patch
patches.suse/0002-efi-Add-EFI-signature-data-types.patch