summaryrefslogtreecommitdiff |
diff options
author | Petr Tesarik <ptesarik@suse.cz> | 2019-06-07 15:05:05 +0200 |
---|---|---|
committer | Petr Tesarik <ptesarik@suse.cz> | 2019-06-07 15:05:05 +0200 |
commit | c80d15dc4ab685f81a9f58d8152c86f85e570267 (patch) | |
tree | 7231620586d98ad478eaedb1cf4f9c550a924b19 | |
parent | 96baba769965571aa583c8bb5087f1b40417f58b (diff) | |
parent | 80170683545af31ab026f8826c25c8deb00bcf16 (diff) |
Merge branch 'SLE15-SP1' into SLE12-SP5rpm-4.12.14-100--sle12-sp5-garpm-4.12.14-100--SLE-12-SP5-Server-Beta2rpm-4.12.14-100--SLE-12-SP5-Server-Beta1rpm-4.12.14-100--SLE-12-SP5-SAP-Beta2rpm-4.12.14-100--SLE-12-SP5-SAP-Beta1rpm-4.12.14-100--SLE-12-SP5-HPC-Beta2rpm-4.12.14-100--SLE-12-SP5-HPC-Beta1rpm-4.12.14-100--SLE-12-SP5-Desktop-Beta2rpm-4.12.14-100--SLE-12-SP5-Desktop-Beta1rpm-4.12.14-100
- Delete patches.kabi/*
Conflicts:
patches.kabi/qla2xxx-kABI-fixes-for-v10.00.00.14-k.patch
series.conf
suse-commit: ddfc5320c10e71daa0be2db816360c736070d8ec
59 files changed, 5783 insertions, 2841 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index 9990dc9eb96a..8325e593d538 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c @@ -69,7 +69,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, HNS_ROCE_VLAN_SL_BIT_MASK) << HNS_ROCE_VLAN_SL_SHIFT; - ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn | + ah->av.port_pd = cpu_to_le32(to_hr_pd(ibpd)->pdn | (rdma_ah_get_port_num(ah_attr) << HNS_ROCE_PORT_NUM_SHIFT)); ah->av.gid_index = grh->sgid_index; diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c index a0ba19d4a10e..2acf946d02e5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.c +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c @@ -176,17 +176,33 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, unsigned long in_modifier, u8 op_modifier, u16 op, unsigned long timeout) { - if (hr_dev->is_reset) - return 0; + int ret; + + if (hr_dev->hw->rst_prc_mbox) { + ret = hr_dev->hw->rst_prc_mbox(hr_dev); + if (ret == CMD_RST_PRC_SUCCESS) + return 0; + else if (ret == CMD_RST_PRC_EBUSY) + return -EBUSY; + } if (hr_dev->cmd.use_events) - return hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, - in_modifier, op_modifier, op, - timeout); + ret = hns_roce_cmd_mbox_wait(hr_dev, in_param, out_param, + in_modifier, op_modifier, op, + timeout); else - return hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, - in_modifier, op_modifier, op, - timeout); + ret = hns_roce_cmd_mbox_poll(hr_dev, in_param, out_param, + in_modifier, op_modifier, op, + timeout); + + if (ret == CMD_RST_PRC_EBUSY) + return -EBUSY; + + if (ret && (hr_dev->hw->rst_prc_mbox && + hr_dev->hw->rst_prc_mbox(hr_dev) == CMD_RST_PRC_SUCCESS)) + return 0; + + return ret; } EXPORT_SYMBOL_GPL(hns_roce_cmd_mbox); diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h index 927701df5eff..059fd1da493e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.h +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h @@ -75,6 +75,10 @@ enum { HNS_ROCE_CMD_DESTROY_MPT_BT1 = 0x29, HNS_ROCE_CMD_DESTROY_MPT_BT2 = 0x2a, + /* CQC TIMER commands */ + HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0 = 0x23, + HNS_ROCE_CMD_READ_CQC_TIMER_BT0 = 0x27, + /* MPT commands */ HNS_ROCE_CMD_QUERY_MPT = 0x62, @@ -89,6 +93,10 @@ enum { HNS_ROCE_CMD_DESTROY_SRQC_BT1 = 0x39, HNS_ROCE_CMD_DESTROY_SRQC_BT2 = 0x3a, + /* QPC TIMER commands */ + HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0 = 0x33, + HNS_ROCE_CMD_READ_QPC_TIMER_BT0 = 0x37, + /* EQC commands */ HNS_ROCE_CMD_CREATE_AEQC = 0x80, HNS_ROCE_CMD_MODIFY_AEQC = 0x81, @@ -98,6 +106,10 @@ enum { HNS_ROCE_CMD_MODIFY_CEQC = 0x91, HNS_ROCE_CMD_QUERY_CEQC = 0x92, HNS_ROCE_CMD_DESTROY_CEQC = 0x93, + + /* SCC CTX BT commands */ + HNS_ROCE_CMD_READ_SCCC_BT0 = 0xa4, + HNS_ROCE_CMD_WRITE_SCCC_BT0 = 0xa5, }; enum { diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 83e307807978..1fd32064b9ee 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -202,6 +202,7 @@ enum { HNS_ROCE_CAP_FLAG_SRQ = BIT(5), HNS_ROCE_CAP_FLAG_MW = BIT(7), HNS_ROCE_CAP_FLAG_FRMR = BIT(8), + HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9), HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10), }; @@ -216,6 +217,32 @@ enum { HNS_ROCE_DB_PER_PAGE = PAGE_SIZE / 4 }; +enum hns_roce_reset_stage { + HNS_ROCE_STATE_NON_RST, + HNS_ROCE_STATE_RST_BEF_DOWN, + HNS_ROCE_STATE_RST_DOWN, + HNS_ROCE_STATE_RST_UNINIT, + HNS_ROCE_STATE_RST_INIT, + HNS_ROCE_STATE_RST_INITED, +}; + +enum hns_roce_instance_state { + HNS_ROCE_STATE_NON_INIT, + HNS_ROCE_STATE_INIT, + HNS_ROCE_STATE_INITED, + HNS_ROCE_STATE_UNINIT, +}; + +enum { + HNS_ROCE_RST_DIRECT_RETURN = 0, +}; + +enum { + CMD_RST_PRC_OTHERS, + CMD_RST_PRC_SUCCESS, + CMD_RST_PRC_EBUSY, +}; + #define HNS_ROCE_CMD_SUCCESS 1 #define HNS_ROCE_PORT_DOWN 0 @@ -445,7 +472,7 @@ struct hns_roce_idx_que { u32 buf_size; struct ib_umem *umem; struct hns_roce_mtt mtt; - u64 *bitmap; + unsigned long *bitmap; }; struct hns_roce_srq { @@ -482,6 +509,8 @@ struct hns_roce_qp_table { struct hns_roce_hem_table qp_table; struct hns_roce_hem_table irrl_table; struct hns_roce_hem_table trrl_table; + struct hns_roce_hem_table sccc_table; + struct mutex scc_mutex; }; struct hns_roce_cq_table { @@ -730,6 +759,8 @@ struct hns_roce_caps { u32 max_extend_sg; int num_qps; /* 256k */ int reserved_qps; + int num_qpc_timer; + int num_cqc_timer; u32 max_srq_sg; int num_srqs; u32 max_wqes; /* 16k */ @@ -769,6 +800,9 @@ struct hns_roce_caps { int irrl_entry_sz; int trrl_entry_sz; int cqc_entry_sz; + int sccc_entry_sz; + int qpc_timer_entry_sz; + int cqc_timer_entry_sz; int srqc_entry_sz; int idx_entry_sz; u32 pbl_ba_pg_sz; @@ -778,9 +812,12 @@ struct hns_roce_caps { int ceqe_depth; enum ib_mtu max_mtu; u32 qpc_bt_num; + u32 qpc_timer_bt_num; u32 srqc_bt_num; u32 cqc_bt_num; + u32 cqc_timer_bt_num; u32 mpt_bt_num; + u32 sccc_bt_num; u32 qpc_ba_pg_sz; u32 qpc_buf_pg_sz; u32 qpc_hop_num; @@ -796,6 +833,15 @@ struct hns_roce_caps { u32 mtt_ba_pg_sz; u32 mtt_buf_pg_sz; u32 mtt_hop_num; + u32 sccc_ba_pg_sz; + u32 sccc_buf_pg_sz; + u32 sccc_hop_num; + u32 qpc_timer_ba_pg_sz; + u32 qpc_timer_buf_pg_sz; + u32 qpc_timer_hop_num; + u32 cqc_timer_ba_pg_sz; + u32 cqc_timer_buf_pg_sz; + u32 cqc_timer_hop_num; u32 cqe_ba_pg_sz; u32 cqe_buf_pg_sz; u32 cqe_hop_num; @@ -835,6 +881,7 @@ struct hns_roce_hw { u64 out_param, u32 in_modifier, u8 op_modifier, u16 op, u16 token, int event); int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout); + int (*rst_prc_mbox)(struct hns_roce_dev *hr_dev); int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index, const union ib_gid *gid, const struct ib_gid_attr *attr); int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr); @@ -862,6 +909,8 @@ struct hns_roce_hw { int attr_mask, enum ib_qp_state cur_state, enum ib_qp_state new_state); int (*destroy_qp)(struct ib_qp *ibqp); + int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp); int (*post_send)(struct ib_qp *ibqp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr); int (*post_recv)(struct ib_qp *qp, const struct ib_recv_wr *recv_wr, @@ -897,6 +946,8 @@ struct hns_roce_dev { spinlock_t bt_cmd_lock; bool active; bool is_reset; + bool dis_db; + unsigned long reset_cnt; struct hns_roce_ib_iboe iboe; struct list_head pgdir_list; @@ -921,6 +972,8 @@ struct hns_roce_dev { struct hns_roce_srq_table srq_table; struct hns_roce_qp_table qp_table; struct hns_roce_eq_table eq_table; + struct hns_roce_hem_table qpc_timer_table; + struct hns_roce_hem_table cqc_timer_table; int cmd_mod; int loop_idc; diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index cae23364cfea..d0eacd8c2575 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -45,6 +45,9 @@ bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type) (hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) || (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) || (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) || + (hr_dev->caps.sccc_hop_num && type == HEM_TYPE_SCCC) || + (hr_dev->caps.qpc_timer_hop_num && type == HEM_TYPE_QPC_TIMER) || + (hr_dev->caps.cqc_timer_hop_num && type == HEM_TYPE_CQC_TIMER) || (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) || (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) || (hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) || @@ -125,6 +128,30 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, mhop->ba_l0_num = hr_dev->caps.cqc_bt_num; mhop->hop_num = hr_dev->caps.cqc_hop_num; break; + case HEM_TYPE_SCCC: + mhop->buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz + + PAGE_SHIFT); + mhop->bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz + + PAGE_SHIFT); + mhop->ba_l0_num = hr_dev->caps.sccc_bt_num; + mhop->hop_num = hr_dev->caps.sccc_hop_num; + break; + case HEM_TYPE_QPC_TIMER: + mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz + + PAGE_SHIFT); + mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz + + PAGE_SHIFT); + mhop->ba_l0_num = hr_dev->caps.qpc_timer_bt_num; + mhop->hop_num = hr_dev->caps.qpc_timer_hop_num; + break; + case HEM_TYPE_CQC_TIMER: + mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz + + PAGE_SHIFT); + mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz + + PAGE_SHIFT); + mhop->ba_l0_num = hr_dev->caps.cqc_timer_bt_num; + mhop->hop_num = hr_dev->caps.cqc_timer_hop_num; + break; case HEM_TYPE_SRQC: mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz + PAGE_SHIFT); @@ -175,7 +202,7 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev, return 0; /* - * QPC/MTPT/CQC/SRQC alloc hem for buffer pages. + * QPC/MTPT/CQC/SRQC/SCCC alloc hem for buffer pages. * MTT/CQE alloc hem for bt pages. */ bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num); @@ -349,18 +376,19 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; - end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies; - while (1) { - if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { - if (!(time_before(jiffies, end))) { - dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); - spin_unlock_irqrestore(lock, flags); - return -EBUSY; - } - } else { + end = HW_SYNC_TIMEOUT_MSECS; + while (end) { + if (!readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) break; - } + mdelay(HW_SYNC_SLEEP_TIME_INTERVAL); + end -= HW_SYNC_SLEEP_TIME_INTERVAL; + } + + if (end <= 0) { + dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); + spin_unlock_irqrestore(lock, flags); + return -EBUSY; } bt_cmd_l = (u32)bt_ba; @@ -486,7 +514,7 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev, } /* - * alloc buffer space chunk for QPC/MTPT/CQC/SRQC. + * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC. * alloc bt space chunk for MTT/CQE. */ size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size; @@ -593,6 +621,7 @@ out: mutex_unlock(&table->mutex); return ret; } +EXPORT_SYMBOL_GPL(hns_roce_table_get); static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, @@ -658,7 +687,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev, } /* - * free buffer space chunk for QPC/MTPT/CQC/SRQC. + * free buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC. * free bt space chunk for MTT/CQE. */ hns_roce_free_hem(hr_dev, table->hem[hem_idx]); @@ -735,6 +764,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev, mutex_unlock(&table->mutex); } +EXPORT_SYMBOL_GPL(hns_roce_table_put); void *hns_roce_table_find(struct hns_roce_dev *hr_dev, struct hns_roce_hem_table *table, @@ -906,6 +936,30 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, num_bt_l0 = hr_dev->caps.cqc_bt_num; hop_num = hr_dev->caps.cqc_hop_num; break; + case HEM_TYPE_SCCC: + buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz + + PAGE_SHIFT); + bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz + + PAGE_SHIFT); + num_bt_l0 = hr_dev->caps.sccc_bt_num; + hop_num = hr_dev->caps.sccc_hop_num; + break; + case HEM_TYPE_QPC_TIMER: + buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz + + PAGE_SHIFT); + bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz + + PAGE_SHIFT); + num_bt_l0 = hr_dev->caps.qpc_timer_bt_num; + hop_num = hr_dev->caps.qpc_timer_hop_num; + break; + case HEM_TYPE_CQC_TIMER: + buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz + + PAGE_SHIFT); + bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz + + PAGE_SHIFT); + num_bt_l0 = hr_dev->caps.cqc_timer_bt_num; + hop_num = hr_dev->caps.cqc_timer_hop_num; + break; case HEM_TYPE_SRQC: buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz + PAGE_SHIFT); @@ -1083,6 +1137,15 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev) hns_roce_cleanup_hem_table(hr_dev, &hr_dev->srq_table.table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); + if (hr_dev->caps.qpc_timer_entry_sz) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->qpc_timer_table); + if (hr_dev->caps.cqc_timer_entry_sz) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->cqc_timer_table); + if (hr_dev->caps.sccc_entry_sz) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->qp_table.sccc_table); if (hr_dev->caps.trrl_entry_sz) hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.trrl_table); diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h index a650278c6fbd..d9d668992e49 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.h +++ b/drivers/infiniband/hw/hns/hns_roce_hem.h @@ -44,6 +44,9 @@ enum { HEM_TYPE_MTPT, HEM_TYPE_CQC, HEM_TYPE_SRQC, + HEM_TYPE_SCCC, + HEM_TYPE_QPC_TIMER, + HEM_TYPE_CQC_TIMER, /* UNMAP HEM */ HEM_TYPE_MTT, diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index ca05810c92dc..a83a59f06665 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -960,8 +960,7 @@ static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev) struct hns_roce_free_mr *free_mr; struct hns_roce_v1_priv *priv; struct completion comp; - unsigned long end = - msecs_to_jiffies(HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS) + jiffies; + unsigned long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS; priv = (struct hns_roce_v1_priv *)hr_dev->priv; free_mr = &priv->free_mr; @@ -981,10 +980,11 @@ static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev) queue_work(free_mr->free_mr_wq, &(lp_qp_work->work)); - while (time_before_eq(jiffies, end)) { + while (end) { if (try_wait_for_completion(&comp)) return 0; msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE); + end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE; } lp_qp_work->comp_flag = 0; @@ -1098,8 +1098,7 @@ static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, struct hns_roce_free_mr *free_mr; struct hns_roce_v1_priv *priv; struct completion comp; - unsigned long end = - msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies; + unsigned long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS; unsigned long start = jiffies; int npages; int ret = 0; @@ -1129,10 +1128,11 @@ static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, queue_work(free_mr->free_mr_wq, &(mr_work->work)); - while (time_before_eq(jiffies, end)) { + while (end) { if (try_wait_for_completion(&comp)) goto free_mr; msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE); + end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE; } mr_work->comp_flag = 0; @@ -1780,11 +1780,14 @@ static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, int gid_index, const union ib_gid *gid, const struct ib_gid_attr *attr) { + unsigned long flags; u32 *p = NULL; u8 gid_idx = 0; gid_idx = hns_get_gid_index(hr_dev, port, gid_index); + spin_lock_irqsave(&hr_dev->iboe.lock, flags); + p = (u32 *)&gid->raw[0]; roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG + (HNS_ROCE_V1_GID_NUM * gid_idx)); @@ -1801,6 +1804,8 @@ static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port, roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG + (HNS_ROCE_V1_GID_NUM * gid_idx)); + spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); + return 0; } @@ -2497,10 +2502,10 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; - end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies; + end = HW_SYNC_TIMEOUT_MSECS; while (1) { if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { - if (!(time_before(jiffies, end))) { + if (end < 0) { dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags); @@ -2509,7 +2514,8 @@ static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, } else { break; } - msleep(HW_SYNC_SLEEP_TIME_INTERVAL); + mdelay(HW_SYNC_SLEEP_TIME_INTERVAL); + end -= HW_SYNC_SLEEP_TIME_INTERVAL; } bt_cmd_val[0] = (__le32)bt_ba; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 569b8710f571..9931ac8e1097 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -587,7 +587,7 @@ out: roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M, V2_DB_PARAMETER_SL_S, qp->sl); - hns_roce_write64_k((__le32 *)&sq_db, qp->sq.db_reg_l); + hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l); qp->sq_next_wqe = ind; qp->next_sge = sge_ind; @@ -712,6 +712,113 @@ out: return ret; } +static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev, + unsigned long instance_stage, + unsigned long reset_stage) +{ + /* When hardware reset has been completed once or more, we should stop + * sending mailbox&cmq&doorbell to hardware. If now in .init_instance() + * function, we should exit with error. If now at HNAE3_INIT_CLIENT + * stage of soft reset process, we should exit with error, and then + * HNAE3_INIT_CLIENT related process can rollback the operation like + * notifing hardware to free resources, HNAE3_INIT_CLIENT related + * process will exit with error to notify NIC driver to reschedule soft + * reset process once again. + */ + hr_dev->is_reset = true; + hr_dev->dis_db = true; + + if (reset_stage == HNS_ROCE_STATE_RST_INIT || + instance_stage == HNS_ROCE_STATE_INIT) + return CMD_RST_PRC_EBUSY; + + return CMD_RST_PRC_SUCCESS; +} + +static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev, + unsigned long instance_stage, + unsigned long reset_stage) +{ + struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; + struct hnae3_handle *handle = priv->handle; + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + + /* When hardware reset is detected, we should stop sending mailbox&cmq& + * doorbell to hardware. If now in .init_instance() function, we should + * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset + * process, we should exit with error, and then HNAE3_INIT_CLIENT + * related process can rollback the operation like notifing hardware to + * free resources, HNAE3_INIT_CLIENT related process will exit with + * error to notify NIC driver to reschedule soft reset process once + * again. + */ + hr_dev->dis_db = true; + if (!ops->get_hw_reset_stat(handle)) + hr_dev->is_reset = true; + + if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT || + instance_stage == HNS_ROCE_STATE_INIT) + return CMD_RST_PRC_EBUSY; + + return CMD_RST_PRC_SUCCESS; +} + +static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; + struct hnae3_handle *handle = priv->handle; + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + + /* When software reset is detected at .init_instance() function, we + * should stop sending mailbox&cmq&doorbell to hardware, and exit + * with error. + */ + hr_dev->dis_db = true; + if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) + hr_dev->is_reset = true; + + return CMD_RST_PRC_EBUSY; +} + +static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; + struct hnae3_handle *handle = priv->handle; + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + unsigned long instance_stage; /* the current instance stage */ + unsigned long reset_stage; /* the current reset stage */ + unsigned long reset_cnt; + bool sw_resetting; + bool hw_resetting; + + if (hr_dev->is_reset) + return CMD_RST_PRC_SUCCESS; + + /* Get information about reset from NIC driver or RoCE driver itself, + * the meaning of the following variables from NIC driver are described + * as below: + * reset_cnt -- The count value of completed hardware reset. + * hw_resetting -- Whether hardware device is resetting now. + * sw_resetting -- Whether NIC's software reset process is running now. + */ + instance_stage = handle->rinfo.instance_state; + reset_stage = handle->rinfo.reset_state; + reset_cnt = ops->ae_dev_reset_cnt(handle); + hw_resetting = ops->get_hw_reset_stat(handle); + sw_resetting = ops->ae_dev_resetting(handle); + + if (reset_cnt != hr_dev->reset_cnt) + return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage, + reset_stage); + else if (hw_resetting) + return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage, + reset_stage); + else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) + return hns_roce_v2_cmd_sw_resetting(hr_dev); + + return 0; +} + static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring) { int ntu = ring->next_to_use; @@ -892,8 +999,8 @@ static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev) return clean; } -static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, - struct hns_roce_cmq_desc *desc, int num) +static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, + struct hns_roce_cmq_desc *desc, int num) { struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq; @@ -905,9 +1012,6 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, int ret = 0; int ntc; - if (hr_dev->is_reset) - return 0; - spin_lock_bh(&csq->lock); if (num > hns_roce_cmq_space(csq)) { @@ -982,6 +1086,30 @@ static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, return ret; } +static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev, + struct hns_roce_cmq_desc *desc, int num) +{ + int retval; + int ret; + + ret = hns_roce_v2_rst_process_cmd(hr_dev); + if (ret == CMD_RST_PRC_SUCCESS) + return 0; + if (ret == CMD_RST_PRC_EBUSY) + return ret; + + ret = __hns_roce_cmq_send(hr_dev, desc, num); + if (ret) { + retval = hns_roce_v2_rst_process_cmd(hr_dev); + if (retval == CMD_RST_PRC_SUCCESS) + return 0; + else if (retval == CMD_RST_PRC_EBUSY) + return retval; + } + + return ret; +} + static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev) { struct hns_roce_query_version *resp; @@ -1078,6 +1206,44 @@ static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev) hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num, PF_RES_DATA_3_PF_SL_NUM_M, PF_RES_DATA_3_PF_SL_NUM_S); + hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num, + PF_RES_DATA_4_PF_SCCC_BT_NUM_M, + PF_RES_DATA_4_PF_SCCC_BT_NUM_S); + + return 0; +} + +static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_pf_timer_res_a *req_a; + struct hns_roce_cmq_desc desc[2]; + int ret, i; + + for (i = 0; i < 2; i++) { + hns_roce_cmq_setup_basic_desc(&desc[i], + HNS_ROCE_OPC_QUERY_PF_TIMER_RES, + true); + + if (i == 0) + desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); + else + desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT); + } + + ret = hns_roce_cmq_send(hr_dev, desc, 2); + if (ret) + return ret; + + req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data; + + hr_dev->caps.qpc_timer_bt_num = + roce_get_field(req_a->qpc_timer_bt_idx_num, + PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M, + PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S); + hr_dev->caps.cqc_timer_bt_num = + roce_get_field(req_a->cqc_timer_bt_idx_num, + PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M, + PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S); return 0; } @@ -1193,6 +1359,14 @@ static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev) VF_RES_B_DATA_3_VF_SL_NUM_M, VF_RES_B_DATA_3_VF_SL_NUM_S, HNS_ROCE_VF_SL_NUM); + + roce_set_field(req_b->vf_sccc_idx_num, + VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M, + VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0); + roce_set_field(req_b->vf_sccc_idx_num, + VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M, + VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S, + HNS_ROCE_VF_SCCC_BT_NUM); } } @@ -1205,6 +1379,7 @@ static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev) u8 qpc_hop_num = hr_dev->caps.qpc_hop_num; u8 cqc_hop_num = hr_dev->caps.cqc_hop_num; u8 mpt_hop_num = hr_dev->caps.mpt_hop_num; + u8 sccc_hop_num = hr_dev->caps.sccc_hop_num; struct hns_roce_cfg_bt_attr *req; struct hns_roce_cmq_desc desc; @@ -1252,6 +1427,20 @@ static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev) CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S, mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num); + roce_set_field(req->vf_sccc_cfg, + CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M, + CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S, + hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET); + roce_set_field(req->vf_sccc_cfg, + CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M, + CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S, + hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET); + roce_set_field(req->vf_sccc_cfg, + CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M, + CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S, + sccc_hop_num == + HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num); + return hns_roce_cmq_send(hr_dev, &desc, 1); } @@ -1289,6 +1478,16 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) return ret; } + if (hr_dev->pci_dev->revision == 0x21) { + ret = hns_roce_query_pf_timer_resource(hr_dev); + if (ret) { + dev_err(hr_dev->dev, + "Query pf timer resource fail, ret = %d.\n", + ret); + return ret; + } + } + ret = hns_roce_alloc_vf_resource(hr_dev); if (ret) { dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n", @@ -1313,6 +1512,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM; caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM; caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM; + caps->min_cqes = HNS_ROCE_MIN_CQE_NUM; caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM; caps->max_srqwqes = HNS_ROCE_V2_MAX_SRQWQE_NUM; caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM; @@ -1359,14 +1559,14 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; caps->srqc_ba_pg_sz = 0; caps->srqc_buf_pg_sz = 0; - caps->srqc_hop_num = HNS_ROCE_HOP_NUM_0; + caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; caps->cqc_ba_pg_sz = 0; caps->cqc_buf_pg_sz = 0; caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; caps->mpt_ba_pg_sz = 0; caps->mpt_buf_pg_sz = 0; caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM; - caps->pbl_ba_pg_sz = 0; + caps->pbl_ba_pg_sz = 2; caps->pbl_buf_pg_sz = 0; caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM; caps->mtt_ba_pg_sz = 0; @@ -1408,9 +1608,27 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev) caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR; caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE; - if (hr_dev->pci_dev->revision == 0x21) + if (hr_dev->pci_dev->revision == 0x21) { caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | - HNS_ROCE_CAP_FLAG_SRQ; + HNS_ROCE_CAP_FLAG_SRQ | + HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL; + + caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM; + caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ; + caps->qpc_timer_ba_pg_sz = 0; + caps->qpc_timer_buf_pg_sz = 0; + caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0; + caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM; + caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ; + caps->cqc_timer_ba_pg_sz = 0; + caps->cqc_timer_buf_pg_sz = 0; + caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0; + + caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ; + caps->sccc_ba_pg_sz = 0; + caps->sccc_buf_pg_sz = 0; + caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM; + } ret = hns_roce_v2_set_bt(hr_dev); if (ret) @@ -1611,7 +1829,8 @@ static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev, static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) { struct hns_roce_v2_priv *priv = hr_dev->priv; - int ret; + int qpc_count, cqc_count; + int ret, i; /* TSQ includes SQ doorbell and ack doorbell */ ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE); @@ -1626,8 +1845,40 @@ static int hns_roce_v2_init(struct hns_roce_dev *hr_dev) goto err_tpq_init_failed; } + /* Alloc memory for QPC Timer buffer space chunk*/ + for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num; + qpc_count++) { + ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table, + qpc_count); + if (ret) { + dev_err(hr_dev->dev, "QPC Timer get failed\n"); + goto err_qpc_timer_failed; + } + } + + /* Alloc memory for CQC Timer buffer space chunk*/ + for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num; + cqc_count++) { + ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table, + cqc_count); + if (ret) { + dev_err(hr_dev->dev, "CQC Timer get failed\n"); + goto err_cqc_timer_failed; + } + } + return 0; +err_cqc_timer_failed: + for (i = 0; i < cqc_count; i++) + hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i); + +err_qpc_timer_failed: + for (i = 0; i < qpc_count; i++) + hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i); + + hns_roce_free_link_table(hr_dev, &priv->tpq); + err_tpq_init_failed: hns_roce_free_link_table(hr_dev, &priv->tsq); @@ -1735,6 +1986,9 @@ static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev, status = hns_roce_v2_cmd_complete(hr_dev); if (status != 0x1) { + if (status == CMD_RST_PRC_EBUSY) + return status; + dev_err(dev, "mailbox status 0x%x!\n", status); return -EBUSY; } @@ -1901,7 +2155,7 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, V2_MPT_BYTE_4_PD_S, mr->pd); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0); - roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1); + roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1); roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S, (mr->access & IB_ACCESS_MW_BIND ? 1 : 0)); @@ -1941,6 +2195,9 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev, struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf; int ret = 0; + roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M, + V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID); + if (flags & IB_MR_REREG_PD) { roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M, V2_MPT_BYTE_4_PD_S, pdn); @@ -2086,15 +2343,10 @@ static void *get_srq_wqe(struct hns_roce_srq *srq, int n) static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index) { - u32 bitmap_num; - int bit_num; - /* always called with interrupts disabled. */ spin_lock(&srq->lock); - bitmap_num = wqe_index / (sizeof(u64) * 8); - bit_num = wqe_index % (sizeof(u64) * 8); - srq->idx_que.bitmap[bitmap_num] |= (1ULL << bit_num); + bitmap_clear(srq->idx_que.bitmap, wqe_index, 1); srq->tail++; spin_unlock(&srq->lock); @@ -2245,6 +2497,7 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { + struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); u32 notification_flag; u32 doorbell[2]; @@ -2270,7 +2523,7 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq, roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S, notification_flag); - hns_roce_write64_k(doorbell, hr_cq->cq_db_l); + hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l); return 0; } @@ -2663,17 +2916,33 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev, case HEM_TYPE_SRQC: op = HNS_ROCE_CMD_WRITE_SRQC_BT0; break; + case HEM_TYPE_SCCC: + op = HNS_ROCE_CMD_WRITE_SCCC_BT0; + break; + case HEM_TYPE_QPC_TIMER: + op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0; + break; + case HEM_TYPE_CQC_TIMER: + op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0; + break; default: dev_warn(dev, "Table %d not to be written by mailbox!\n", table->type); return 0; } + + if (table->type == HEM_TYPE_SCCC && step_idx) + return 0; + op += step_idx; mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); + if (table->type == HEM_TYPE_SCCC) + obj = mhop.l0_idx; + if (check_whether_last_step(hop_num, step_idx)) { hem = table->hem[hem_idx]; for (hns_roce_hem_first(hem, &iter); @@ -2722,6 +2991,10 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev, case HEM_TYPE_CQC: op = HNS_ROCE_CMD_DESTROY_CQC_BT0; break; + case HEM_TYPE_SCCC: + case HEM_TYPE_QPC_TIMER: + case HEM_TYPE_CQC_TIMER: + break; case HEM_TYPE_SRQC: op = HNS_ROCE_CMD_DESTROY_SRQC_BT0; break; @@ -2730,6 +3003,12 @@ static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev, table->type); return 0; } + + if (table->type == HEM_TYPE_SCCC || + table->type == HEM_TYPE_QPC_TIMER || + table->type == HEM_TYPE_CQC_TIMER) + return 0; + op += step_idx; mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); @@ -2892,12 +3171,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0); roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0); - if (attr_mask & IB_QP_QKEY) { - context->qkey_xrcd = attr->qkey; - qpc_mask->qkey_xrcd = 0; - hr_qp->qkey = attr->qkey; - } - if (hr_qp->rdb_en) { roce_set_bit(context->byte_68_rq_db, V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1); @@ -3109,7 +3382,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp, 0); hr_qp->access_flags = attr->qp_access_flags; - hr_qp->pkey_index = attr->pkey_index; roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M, V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn); roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M, @@ -3233,11 +3505,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0); } - if (attr_mask & IB_QP_QKEY) { - context->qkey_xrcd = attr->qkey; - qpc_mask->qkey_xrcd = 0; - } - roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn); roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, @@ -3357,13 +3624,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M, V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0); - roce_set_field(context->byte_80_rnr_rx_cqn, - V2_QPC_BYTE_80_MIN_RNR_TIME_M, - V2_QPC_BYTE_80_MIN_RNR_TIME_S, attr->min_rnr_timer); - roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, - V2_QPC_BYTE_80_MIN_RNR_TIME_M, - V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0); - page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT); context->rq_cur_blk_addr = (u32)(mtts[hr_qp->rq.offset / page_size] >> PAGE_ADDR_SHIFT); @@ -3391,13 +3651,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M, V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0); - roce_set_field(context->byte_108_rx_reqepsn, - V2_QPC_BYTE_108_RX_REQ_EPSN_M, - V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn); - roce_set_field(qpc_mask->byte_108_rx_reqepsn, - V2_QPC_BYTE_108_RX_REQ_EPSN_M, - V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0); - roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M, V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4); roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M, @@ -3436,15 +3689,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0); } - if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) && - attr->max_dest_rd_atomic) { - roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M, - V2_QPC_BYTE_140_RR_MAX_S, - fls(attr->max_dest_rd_atomic - 1)); - roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M, - V2_QPC_BYTE_140_RR_MAX_S, 0); - } - if (attr_mask & IB_QP_DEST_QPN) { roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num); @@ -3505,11 +3749,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, context->rq_rnr_timer = 0; qpc_mask->rq_rnr_timer = 0; - roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M, - V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1); - roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M, - V2_QPC_BYTE_152_RAQ_PSN_S, 0); - roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M, V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0); roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M, @@ -3607,13 +3846,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, V2_QPC_BYTE_240_RX_ACK_MSN_M, V2_QPC_BYTE_240_RX_ACK_MSN_S, 0); - roce_set_field(context->byte_244_rnr_rxack, - V2_QPC_BYTE_244_RX_ACK_EPSN_M, - V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn); - roce_set_field(qpc_mask->byte_244_rnr_rxack, - V2_QPC_BYTE_244_RX_ACK_EPSN_M, - V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0); - roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M, V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0); @@ -3627,27 +3859,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, V2_QPC_BYTE_240_IRRL_TAIL_REAL_M, V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0); - roce_set_field(context->byte_220_retry_psn_msn, - V2_QPC_BYTE_220_RETRY_MSG_PSN_M, - V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn); - roce_set_field(qpc_mask->byte_220_retry_psn_msn, - V2_QPC_BYTE_220_RETRY_MSG_PSN_M, - V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0); - - roce_set_field(context->byte_224_retry_msg, - V2_QPC_BYTE_224_RETRY_MSG_PSN_M, - V2_QPC_BYTE_224_RETRY_MSG_PSN_S, attr->sq_psn >> 16); - roce_set_field(qpc_mask->byte_224_retry_msg, - V2_QPC_BYTE_224_RETRY_MSG_PSN_M, - V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0); - - roce_set_field(context->byte_224_retry_msg, - V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M, - V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, attr->sq_psn); - roce_set_field(qpc_mask->byte_224_retry_msg, - V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M, - V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0); - roce_set_field(qpc_mask->byte_220_retry_psn_msn, V2_QPC_BYTE_220_RETRY_MSG_MSN_M, V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0); @@ -3658,60 +3869,14 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M, V2_QPC_BYTE_212_CHECK_FLG_S, 0); - roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M, - V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt); - roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_CNT_M, - V2_QPC_BYTE_212_RETRY_CNT_S, 0); - - roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M, - V2_QPC_BYTE_212_RETRY_NUM_INIT_S, attr->retry_cnt); - roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_RETRY_NUM_INIT_M, - V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0); - - roce_set_field(context->byte_244_rnr_rxack, - V2_QPC_BYTE_244_RNR_NUM_INIT_M, - V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry); - roce_set_field(qpc_mask->byte_244_rnr_rxack, - V2_QPC_BYTE_244_RNR_NUM_INIT_M, - V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0); - - roce_set_field(context->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M, - V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry); - roce_set_field(qpc_mask->byte_244_rnr_rxack, V2_QPC_BYTE_244_RNR_CNT_M, - V2_QPC_BYTE_244_RNR_CNT_S, 0); - roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M, V2_QPC_BYTE_212_LSN_S, 0x100); roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M, V2_QPC_BYTE_212_LSN_S, 0); - if (attr_mask & IB_QP_TIMEOUT) { - roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_AT_M, - V2_QPC_BYTE_28_AT_S, attr->timeout); - roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_AT_M, - V2_QPC_BYTE_28_AT_S, 0); - } - - roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M, - V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn); - roce_set_field(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_SQ_CUR_PSN_M, - V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0); - roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M, V2_QPC_BYTE_196_IRRL_HEAD_S, 0); - roce_set_field(context->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M, - V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn); - roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_SQ_MAX_PSN_M, - V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0); - if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) { - roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M, - V2_QPC_BYTE_208_SR_MAX_S, - fls(attr->max_rd_atomic - 1)); - roce_set_field(qpc_mask->byte_208_irrl, - V2_QPC_BYTE_208_SR_MAX_M, - V2_QPC_BYTE_208_SR_MAX_S, 0); - } return 0; } @@ -3789,20 +3954,22 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, roce_set_field(qpc_mask->byte_160_sq_ci_pi, V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M, V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0); - roce_set_field(context->byte_84_rq_ci_pi, + + if (!ibqp->srq) { + roce_set_field(context->byte_84_rq_ci_pi, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head); - roce_set_field(qpc_mask->byte_84_rq_ci_pi, + roce_set_field(qpc_mask->byte_84_rq_ci_pi, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M, V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0); + } } if (attr_mask & IB_QP_AV) { const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); const struct ib_gid_attr *gid_attr = NULL; - u8 src_mac[ETH_ALEN]; int is_roce_protocol; u16 vlan = 0xffff; u8 ib_port; @@ -3817,7 +3984,6 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, if (is_roce_protocol) { gid_attr = attr->ah_attr.grh.sgid_attr; vlan = rdma_vlan_dev_vlan_id(gid_attr->ndev); - memcpy(src_mac, gid_attr->ndev->dev_addr, ETH_ALEN); } if (is_vlan_dev(gid_attr->ndev)) { @@ -3902,9 +4068,152 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); } + if (attr_mask & IB_QP_TIMEOUT) { + if (attr->timeout < 31) { + roce_set_field(context->byte_28_at_fl, + V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S, + attr->timeout); + roce_set_field(qpc_mask->byte_28_at_fl, + V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S, + 0); + } else { + dev_warn(dev, "Local ACK timeout shall be 0 to 30.\n"); + } + } + + if (attr_mask & IB_QP_RETRY_CNT) { + roce_set_field(context->byte_212_lsn, + V2_QPC_BYTE_212_RETRY_NUM_INIT_M, + V2_QPC_BYTE_212_RETRY_NUM_INIT_S, + attr->retry_cnt); + roce_set_field(qpc_mask->byte_212_lsn, + V2_QPC_BYTE_212_RETRY_NUM_INIT_M, + V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0); + + roce_set_field(context->byte_212_lsn, + V2_QPC_BYTE_212_RETRY_CNT_M, + V2_QPC_BYTE_212_RETRY_CNT_S, + attr->retry_cnt); + roce_set_field(qpc_mask->byte_212_lsn, + V2_QPC_BYTE_212_RETRY_CNT_M, + V2_QPC_BYTE_212_RETRY_CNT_S, 0); + } + + if (attr_mask & IB_QP_RNR_RETRY) { + roce_set_field(context->byte_244_rnr_rxack, + V2_QPC_BYTE_244_RNR_NUM_INIT_M, + V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry); + roce_set_field(qpc_mask->byte_244_rnr_rxack, + V2_QPC_BYTE_244_RNR_NUM_INIT_M, + V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0); + + roce_set_field(context->byte_244_rnr_rxack, + V2_QPC_BYTE_244_RNR_CNT_M, + V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry); + roce_set_field(qpc_mask->byte_244_rnr_rxack, + V2_QPC_BYTE_244_RNR_CNT_M, + V2_QPC_BYTE_244_RNR_CNT_S, 0); + } + + if (attr_mask & IB_QP_SQ_PSN) { + roce_set_field(context->byte_172_sq_psn, + V2_QPC_BYTE_172_SQ_CUR_PSN_M, + V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn); + roce_set_field(qpc_mask->byte_172_sq_psn, + V2_QPC_BYTE_172_SQ_CUR_PSN_M, + V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0); + + roce_set_field(context->byte_196_sq_psn, + V2_QPC_BYTE_196_SQ_MAX_PSN_M, + V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn); + roce_set_field(qpc_mask->byte_196_sq_psn, + V2_QPC_BYTE_196_SQ_MAX_PSN_M, + V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0); + + roce_set_field(context->byte_220_retry_psn_msn, + V2_QPC_BYTE_220_RETRY_MSG_PSN_M, + V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn); + roce_set_field(qpc_mask->byte_220_retry_psn_msn, + V2_QPC_BYTE_220_RETRY_MSG_PSN_M, + V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0); + + roce_set_field(context->byte_224_retry_msg, + V2_QPC_BYTE_224_RETRY_MSG_PSN_M, + V2_QPC_BYTE_224_RETRY_MSG_PSN_S, + attr->sq_psn >> 16); + roce_set_field(qpc_mask->byte_224_retry_msg, + V2_QPC_BYTE_224_RETRY_MSG_PSN_M, + V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0); + + roce_set_field(context->byte_224_retry_msg, + V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M, + V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, + attr->sq_psn); + roce_set_field(qpc_mask->byte_224_retry_msg, + V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M, + V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0); + + roce_set_field(context->byte_244_rnr_rxack, + V2_QPC_BYTE_244_RX_ACK_EPSN_M, + V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn); + roce_set_field(qpc_mask->byte_244_rnr_rxack, + V2_QPC_BYTE_244_RX_ACK_EPSN_M, + V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0); + } + + if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) && + attr->max_dest_rd_atomic) { + roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M, + V2_QPC_BYTE_140_RR_MAX_S, + fls(attr->max_dest_rd_atomic - 1)); + roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M, + V2_QPC_BYTE_140_RR_MAX_S, 0); + } + + if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) { + roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M, + V2_QPC_BYTE_208_SR_MAX_S, + fls(attr->max_rd_atomic - 1)); + roce_set_field(qpc_mask->byte_208_irrl, + V2_QPC_BYTE_208_SR_MAX_M, + V2_QPC_BYTE_208_SR_MAX_S, 0); + } + if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask); + if (attr_mask & IB_QP_MIN_RNR_TIMER) { + roce_set_field(context->byte_80_rnr_rx_cqn, + V2_QPC_BYTE_80_MIN_RNR_TIME_M, + V2_QPC_BYTE_80_MIN_RNR_TIME_S, + attr->min_rnr_timer); + roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, + V2_QPC_BYTE_80_MIN_RNR_TIME_M, + V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0); + } + + /* RC&UC required attr */ + if (attr_mask & IB_QP_RQ_PSN) { + roce_set_field(context->byte_108_rx_reqepsn, + V2_QPC_BYTE_108_RX_REQ_EPSN_M, + V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn); + roce_set_field(qpc_mask->byte_108_rx_reqepsn, + V2_QPC_BYTE_108_RX_REQ_EPSN_M, + V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0); + + roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M, + V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1); + roce_set_field(qpc_mask->byte_152_raq, + V2_QPC_BYTE_152_RAQ_PSN_M, + V2_QPC_BYTE_152_RAQ_PSN_S, 0); + } + + if (attr_mask & IB_QP_QKEY) { + context->qkey_xrcd = attr->qkey; + qpc_mask->qkey_xrcd = 0; + hr_qp->qkey = attr->qkey; + } + roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S, ibqp->srq ? 1 : 0); roce_set_bit(qpc_mask->byte_108_rx_reqepsn, @@ -4224,6 +4533,59 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp) return 0; } +static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp) +{ + struct hns_roce_sccc_clr_done *resp; + struct hns_roce_sccc_clr *clr; + struct hns_roce_cmq_desc desc; + int ret, i; + + mutex_lock(&hr_dev->qp_table.scc_mutex); + + /* set scc ctx clear done flag */ + hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false); + ret = hns_roce_cmq_send(hr_dev, &desc, 1); + if (ret) { + dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret); + goto out; + } + + /* clear scc context */ + hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false); + clr = (struct hns_roce_sccc_clr *)desc.data; + clr->qpn = cpu_to_le32(hr_qp->qpn); + ret = hns_roce_cmq_send(hr_dev, &desc, 1); + if (ret) { + dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret); + goto out; + } + + /* query scc context clear is done or not */ + resp = (struct hns_roce_sccc_clr_done *)desc.data; + for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) { + hns_roce_cmq_setup_basic_desc(&desc, + HNS_ROCE_OPC_QUERY_SCCC, true); + ret = hns_roce_cmq_send(hr_dev, &desc, 1); + if (ret) { + dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret); + goto out; + } + + if (resp->clr_done) + goto out; + + msleep(20); + } + + dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n"); + ret = -ETIMEDOUT; + +out: + mutex_unlock(&hr_dev->qp_table.scc_mutex); + return ret; +} + static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) { struct hns_roce_dev *hr_dev = to_hr_dev(cq->device); @@ -4281,7 +4643,8 @@ static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn) if (hr_qp->ibqp.uobject) { if (hr_qp->sdb_en == 1) { hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); - hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); + if (hr_qp->rdb_en == 1) + hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); } else { dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n"); return; @@ -4319,64 +4682,19 @@ static void hns_roce_irq_work_handle(struct work_struct *work) dev_warn(dev, "Send queue drained.\n"); break; case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: - dev_err(dev, "Local work queue catastrophic error.\n"); + dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n", + qpn, irq_work->sub_type); hns_roce_set_qps_to_err(irq_work->hr_dev, qpn); - switch (irq_work->sub_type) { - case HNS_ROCE_LWQCE_QPC_ERROR: - dev_err(dev, "QP %d, QPC error.\n", qpn); - break; - case HNS_ROCE_LWQCE_MTU_ERROR: - dev_err(dev, "QP %d, MTU error.\n", qpn); - break; - case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR: - dev_err(dev, "QP %d, WQE BA addr error.\n", qpn); - break; - case HNS_ROCE_LWQCE_WQE_ADDR_ERROR: - dev_err(dev, "QP %d, WQE addr error.\n", qpn); - break; - case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR: - dev_err(dev, "QP %d, WQE shift error.\n", qpn); - break; - default: - dev_err(dev, "Unhandled sub_event type %d.\n", - irq_work->sub_type); - break; - } break; case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: - dev_err(dev, "Invalid request local work queue error.\n"); + dev_err(dev, "Invalid request local work queue 0x%x error.\n", + qpn); hns_roce_set_qps_to_err(irq_work->hr_dev, qpn); break; case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: - dev_err(dev, "Local access violation work queue error.\n"); + dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n", + qpn, irq_work->sub_type); hns_roce_set_qps_to_err(irq_work->hr_dev, qpn); - switch (irq_work->sub_type) { - case HNS_ROCE_LAVWQE_R_KEY_VIOLATION: - dev_err(dev, "QP %d, R_key violation.\n", qpn); - break; - case HNS_ROCE_LAVWQE_LENGTH_ERROR: - dev_err(dev, "QP %d, length error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_VA_ERROR: - dev_err(dev, "QP %d, VA error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_PD_ERROR: - dev_err(dev, "QP %d, PD error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_RW_ACC_ERROR: - dev_err(dev, "QP %d, rw acc error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_KEY_STATE_ERROR: - dev_err(dev, "QP %d, key state error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR: - dev_err(dev, "QP %d, MR operation error.\n", qpn); - break; - default: - dev_err(dev, "Unhandled sub_event type %d.\n", - irq_work->sub_type); - break; - } break; case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: dev_warn(dev, "SRQ limit reach.\n"); @@ -4427,6 +4745,7 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev, static void set_eq_cons_index_v2(struct hns_roce_eq *eq) { + struct hns_roce_dev *hr_dev = eq->hr_dev; u32 doorbell[2]; doorbell[0] = 0; @@ -4453,7 +4772,7 @@ static void set_eq_cons_index_v2(struct hns_roce_eq *eq) HNS_ROCE_V2_EQ_DB_PARA_S, (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M)); - hns_roce_write64_k(doorbell, eq->doorbell); + hns_roce_write64(hr_dev, doorbell, eq->doorbell); } static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry) @@ -4692,11 +5011,22 @@ static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id) int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG); if (roce_get_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) { + struct pci_dev *pdev = hr_dev->pci_dev; + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + const struct hnae3_ae_ops *ops = ae_dev->ops; + dev_err(dev, "AEQ overflow!\n"); roce_set_bit(int_st, HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S, 1); roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st); + /* Set reset level for reset_event() */ + if (ops->set_default_reset_request) + ops->set_default_reset_request(ae_dev, + HNAE3_FUNC_RESET); + if (ops->reset_event) + ops->reset_event(pdev, NULL); + roce_set_bit(int_en, HNS_ROCE_V2_VF_ABN_INT_EN_S, 1); roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en); @@ -5599,7 +5929,7 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq, return 0; } -int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) +static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) { struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); struct hns_roce_srq *srq = to_hr_srq(ibsrq); @@ -5636,18 +5966,19 @@ out: return ret; } -static int find_empty_entry(struct hns_roce_idx_que *idx_que) +static int find_empty_entry(struct hns_roce_idx_que *idx_que, + unsigned long size) { - int bit_num; - int i; + int wqe_idx; - /* bitmap[i] is set zero if all bits are allocated */ - for (i = 0; idx_que->bitmap[i] == 0; ++i) - ; - bit_num = ffs(idx_que->bitmap[i]); - idx_que->bitmap[i] &= ~(1ULL << (bit_num - 1)); + if (unlikely(bitmap_full(idx_que->bitmap, size))) + return -ENOSPC; + + wqe_idx = find_first_zero_bit(idx_que->bitmap, size); + + bitmap_set(idx_que->bitmap, wqe_idx, 1); - return i * sizeof(u64) * 8 + (bit_num - 1); + return wqe_idx; } static void fill_idx_queue(struct hns_roce_idx_que *idx_que, @@ -5664,6 +5995,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { + struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); struct hns_roce_srq *srq = to_hr_srq(ibsrq); struct hns_roce_v2_wqe_data_seg *dseg; struct hns_roce_v2_db srq_db; @@ -5692,7 +6024,13 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, break; } - wqe_idx = find_empty_entry(&srq->idx_que); + wqe_idx = find_empty_entry(&srq->idx_que, srq->max); + if (wqe_idx < 0) { + ret = -ENOMEM; + *bad_wr = wr; + break; + } + fill_idx_queue(&srq->idx_que, ind, wqe_idx); wqe = get_srq_wqe(srq, wqe_idx); dseg = (struct hns_roce_v2_wqe_data_seg *)wqe; @@ -5725,7 +6063,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, srq_db.byte_4 = HNS_ROCE_V2_SRQ_DB << 24 | srq->srqn; srq_db.parameter = srq->head; - hns_roce_write64_k((__le32 *)&srq_db, srq->db_reg_l); + hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l); } @@ -5742,6 +6080,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = { .hw_exit = hns_roce_v2_exit, .post_mbox = hns_roce_v2_post_mbox, .chk_mbox = hns_roce_v2_chk_mbox, + .rst_prc_mbox = hns_roce_v2_rst_process_cmd, .set_gid = hns_roce_v2_set_gid, .set_mac = hns_roce_v2_set_mac, .write_mtpt = hns_roce_v2_write_mtpt, @@ -5754,6 +6093,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = { .modify_qp = hns_roce_v2_modify_qp, .query_qp = hns_roce_v2_query_qp, .destroy_qp = hns_roce_v2_destroy_qp, + .qp_flow_control_init = hns_roce_v2_qp_flow_control_init, .modify_cq = hns_roce_v2_modify_cq, .post_send = hns_roce_v2_post_send, .post_recv = hns_roce_v2_post_recv, @@ -5782,15 +6122,9 @@ MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl); static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, struct hnae3_handle *handle) { - const struct pci_device_id *id; + struct hns_roce_v2_priv *priv = hr_dev->priv; int i; - id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev); - if (!id) { - dev_err(hr_dev->dev, "device is not compatible!\n"); - return -ENXIO; - } - hr_dev->hw = &hns_roce_hw_v2; hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG; hr_dev->odb_offset = hr_dev->sdb_offset; @@ -5812,10 +6146,13 @@ static int hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev, hr_dev->cmd_mod = 1; hr_dev->loop_idc = 0; + hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle); + priv->handle = handle; + return 0; } -static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) +static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) { struct hns_roce_dev *hr_dev; int ret; @@ -5832,7 +6169,6 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) hr_dev->pci_dev = handle->pdev; hr_dev->dev = &handle->pdev->dev; - handle->priv = hr_dev; ret = hns_roce_hw_v2_get_cfg(hr_dev, handle); if (ret) { @@ -5846,6 +6182,8 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) goto error_failed_get_cfg; } + handle->priv = hr_dev; + return 0; error_failed_get_cfg: @@ -5857,7 +6195,7 @@ error_failed_kzalloc: return ret; } -static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, +static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, bool reset) { struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv; @@ -5865,24 +6203,84 @@ static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, if (!hr_dev) return; + handle->priv = NULL; hns_roce_exit(hr_dev); kfree(hr_dev->priv); ib_dealloc_device(&hr_dev->ib_dev); } +static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) +{ + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + const struct pci_device_id *id; + struct device *dev = &handle->pdev->dev; + int ret; + + handle->rinfo.instance_state = HNS_ROCE_STATE_INIT; + + if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) { + handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; + goto reset_chk_err; + } + + id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev); + if (!id) + return 0; + + ret = __hns_roce_hw_v2_init_instance(handle); + if (ret) { + handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; + dev_err(dev, "RoCE instance init failed! ret = %d\n", ret); + if (ops->ae_dev_resetting(handle) || + ops->get_hw_reset_stat(handle)) + goto reset_chk_err; + else + return ret; + } + + handle->rinfo.instance_state = HNS_ROCE_STATE_INITED; + + + return 0; + +reset_chk_err: + dev_err(dev, "Device is busy in resetting state.\n" + "please retry later.\n"); + + return -EBUSY; +} + +static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle, + bool reset) +{ + if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) + return; + + handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT; + + __hns_roce_hw_v2_uninit_instance(handle, reset); + + handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; +} static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle) { - struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv; + struct hns_roce_dev *hr_dev; struct ib_event event; - if (!hr_dev) { - dev_err(&handle->pdev->dev, - "Input parameter handle->priv is NULL!\n"); - return -EINVAL; + if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) { + set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state); + return 0; } + handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN; + clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state); + + hr_dev = (struct hns_roce_dev *)handle->priv; + if (!hr_dev) + return 0; + hr_dev->active = false; - hr_dev->is_reset = true; + hr_dev->dis_db = true; event.event = IB_EVENT_DEVICE_FATAL; event.device = &hr_dev->ib_dev; @@ -5894,17 +6292,29 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle) static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle) { + struct device *dev = &handle->pdev->dev; int ret; - ret = hns_roce_hw_v2_init_instance(handle); + if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN, + &handle->rinfo.state)) { + handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED; + return 0; + } + + handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT; + + dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n"); + ret = __hns_roce_hw_v2_init_instance(handle); if (ret) { /* when reset notify type is HNAE3_INIT_CLIENT In reset notify * callback function, RoCE Engine reinitialize. If RoCE reinit * failed, we should inform NIC driver. */ handle->priv = NULL; - dev_err(&handle->pdev->dev, - "In reset process RoCE reinit failed %d.\n", ret); + dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret); + } else { + handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED; + dev_info(dev, "Reset done, RoCE client reinit finished.\n"); } return ret; @@ -5912,8 +6322,14 @@ static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle) static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle) { + if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state)) + return 0; + + handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT; + dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n"); msleep(100); - hns_roce_hw_v2_uninit_instance(handle, false); + __hns_roce_hw_v2_uninit_instance(handle, false); + return 0; } diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index b72d0443c835..37d4292a2396 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -36,6 +36,7 @@ #include <linux/bitops.h> #define HNS_ROCE_VF_QPC_BT_NUM 256 +#define HNS_ROCE_VF_SCCC_BT_NUM 64 #define HNS_ROCE_VF_SRQC_BT_NUM 64 #define HNS_ROCE_VF_CQC_BT_NUM 64 #define HNS_ROCE_VF_MPT_BT_NUM 64 @@ -44,14 +45,16 @@ #define HNS_ROCE_VF_SGID_NUM 32 #define HNS_ROCE_VF_SL_NUM 8 -#define HNS_ROCE_V2_MAX_QP_NUM 0x2000 +#define HNS_ROCE_V2_MAX_QP_NUM 0x100000 +#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200 #define HNS_ROCE_V2_MAX_WQE_NUM 0x8000 #define HNS_ROCE_V2_MAX_SRQ 0x100000 #define HNS_ROCE_V2_MAX_SRQ_WR 0x8000 #define HNS_ROCE_V2_MAX_SRQ_SGE 0x100 -#define HNS_ROCE_V2_MAX_CQ_NUM 0x8000 +#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000 +#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100 #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000 -#define HNS_ROCE_V2_MAX_CQE_NUM 0x10000 +#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000 #define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000 #define HNS_ROCE_V2_MAX_RQ_SGE_NUM 0x100 #define HNS_ROCE_V2_MAX_SQ_SGE_NUM 0xff @@ -64,7 +67,7 @@ #define HNS_ROCE_V2_COMP_VEC_NUM 63 #define HNS_ROCE_V2_AEQE_VEC_NUM 1 #define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1 -#define HNS_ROCE_V2_MAX_MTPT_NUM 0x8000 +#define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000 #define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000 @@ -83,6 +86,9 @@ #define HNS_ROCE_V2_MTPT_ENTRY_SZ 64 #define HNS_ROCE_V2_MTT_ENTRY_SZ 64 #define HNS_ROCE_V2_CQE_ENTRY_SIZE 32 +#define HNS_ROCE_V2_SCCC_ENTRY_SZ 32 +#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ 4096 +#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ 4096 #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000 #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2 #define HNS_ROCE_INVALID_LKEY 0x100 @@ -90,7 +96,10 @@ #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2 #define HNS_ROCE_V2_RSV_QPS 8 +#define HNS_ROCE_V2_HW_RST_TIMEOUT 1000 + #define HNS_ROCE_CONTEXT_HOP_NUM 1 +#define HNS_ROCE_SCCC_HOP_NUM 1 #define HNS_ROCE_MTT_HOP_NUM 1 #define HNS_ROCE_CQE_HOP_NUM 1 #define HNS_ROCE_SRQWQE_HOP_NUM 1 @@ -120,6 +129,8 @@ #define HNS_ROCE_CMQ_EN_B 16 #define HNS_ROCE_CMQ_ENABLE BIT(HNS_ROCE_CMQ_EN_B) +#define HNS_ROCE_CMQ_SCC_CLR_DONE_CNT 5 + #define check_whether_last_step(hop_num, step_idx) \ ((step_idx == 0 && hop_num == HNS_ROCE_HOP_NUM_0) || \ (step_idx == 1 && hop_num == 1) || \ @@ -224,11 +235,15 @@ enum hns_roce_opcode_type { HNS_ROCE_OPC_ALLOC_VF_RES = 0x8401, HNS_ROCE_OPC_CFG_EXT_LLM = 0x8403, HNS_ROCE_OPC_CFG_TMOUT_LLM = 0x8404, + HNS_ROCE_OPC_QUERY_PF_TIMER_RES = 0x8406, HNS_ROCE_OPC_CFG_SGID_TB = 0x8500, HNS_ROCE_OPC_CFG_SMAC_TB = 0x8501, HNS_ROCE_OPC_POST_MB = 0x8504, HNS_ROCE_OPC_QUERY_MB_ST = 0x8505, HNS_ROCE_OPC_CFG_BT_ATTR = 0x8506, + HNS_ROCE_OPC_CLR_SCCC = 0x8509, + HNS_ROCE_OPC_QUERY_SCCC = 0x850a, + HNS_ROCE_OPC_RESET_SCCC = 0x850b, HNS_SWITCH_PARAMETER_CFG = 0x1033, }; @@ -704,8 +719,8 @@ struct hns_roce_v2_qp_context { #define V2_QPC_BYTE_148_RAQ_SYNDROME_S 24 #define V2_QPC_BYTE_148_RAQ_SYNDROME_M GENMASK(31, 24) -#define V2_QPC_BYTE_152_RAQ_PSN_S 8 -#define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(31, 8) +#define V2_QPC_BYTE_152_RAQ_PSN_S 0 +#define V2_QPC_BYTE_152_RAQ_PSN_M GENMASK(23, 0) #define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S 24 #define V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M GENMASK(31, 24) @@ -1300,7 +1315,8 @@ struct hns_roce_pf_res_b { __le32 smac_idx_num; __le32 sgid_idx_num; __le32 qid_idx_sl_num; - __le32 rsv[2]; + __le32 sccc_bt_idx_num; + __le32 rsv; }; #define PF_RES_DATA_1_PF_SMAC_IDX_S 0 @@ -1321,6 +1337,31 @@ struct hns_roce_pf_res_b { #define PF_RES_DATA_3_PF_SL_NUM_S 16 #define PF_RES_DATA_3_PF_SL_NUM_M GENMASK(26, 16) +#define PF_RES_DATA_4_PF_SCCC_BT_IDX_S 0 +#define PF_RES_DATA_4_PF_SCCC_BT_IDX_M GENMASK(8, 0) + +#define PF_RES_DATA_4_PF_SCCC_BT_NUM_S 9 +#define PF_RES_DATA_4_PF_SCCC_BT_NUM_M GENMASK(17, 9) + +struct hns_roce_pf_timer_res_a { + __le32 rsv0; + __le32 qpc_timer_bt_idx_num; + __le32 cqc_timer_bt_idx_num; + __le32 rsv[3]; +}; + +#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_S 0 +#define PF_RES_DATA_1_PF_QPC_TIMER_BT_IDX_M GENMASK(11, 0) + +#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S 16 +#define PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M GENMASK(28, 16) + +#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_S 0 +#define PF_RES_DATA_2_PF_CQC_TIMER_BT_IDX_M GENMASK(10, 0) + +#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S 16 +#define PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M GENMASK(27, 16) + struct hns_roce_vf_res_a { __le32 vf_id; __le32 vf_qpc_bt_idx_num; @@ -1365,7 +1406,8 @@ struct hns_roce_vf_res_b { __le32 vf_smac_idx_num; __le32 vf_sgid_idx_num; __le32 vf_qid_idx_sl_num; - __le32 rsv[2]; + __le32 vf_sccc_idx_num; + __le32 rsv1; }; #define VF_RES_B_DATA_0_VF_ID_S 0 @@ -1389,6 +1431,12 @@ struct hns_roce_vf_res_b { #define VF_RES_B_DATA_3_VF_SL_NUM_S 16 #define VF_RES_B_DATA_3_VF_SL_NUM_M GENMASK(19, 16) +#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S 0 +#define VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M GENMASK(8, 0) + +#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S 9 +#define VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M GENMASK(17, 9) + struct hns_roce_vf_switch { __le32 rocee_sel; __le32 fun_id; @@ -1424,7 +1472,8 @@ struct hns_roce_cfg_bt_attr { __le32 vf_srqc_cfg; __le32 vf_cqc_cfg; __le32 vf_mpt_cfg; - __le32 rsv[2]; + __le32 vf_sccc_cfg; + __le32 rsv; }; #define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S 0 @@ -1463,6 +1512,15 @@ struct hns_roce_cfg_bt_attr { #define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S 8 #define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8) +#define CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S 0 +#define CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M GENMASK(3, 0) + +#define CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S 4 +#define CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M GENMASK(7, 4) + +#define CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S 8 +#define CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M GENMASK(9, 8) + struct hns_roce_cfg_sgid_tb { __le32 table_idx_rsv; __le32 vf_sgid_l; @@ -1546,6 +1604,7 @@ struct hns_roce_link_table_entry { #define HNS_ROCE_LINK_TABLE_NXT_PTR_M GENMASK(31, 20) struct hns_roce_v2_priv { + struct hnae3_handle *handle; struct hns_roce_v2_cmq cmq; struct hns_roce_link_table tsq; struct hns_roce_link_table tpq; @@ -1730,4 +1789,25 @@ struct hns_roce_wqe_atomic_seg { __le64 cmp_data; }; +struct hns_roce_sccc_clr { + __le32 qpn; + __le32 rsv[5]; +}; + +struct hns_roce_sccc_clr_done { + __le32 clr_done; + __le32 rsv[5]; +}; + +static inline void hns_roce_write64(struct hns_roce_dev *hr_dev, __le32 val[2], + void __iomem *dest) +{ + struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; + struct hnae3_handle *handle = priv->handle; + const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + + if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle)) + hns_roce_write64_k(val, dest); +} + #endif diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 65ba43cee810..f2f68eac4233 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -78,18 +78,13 @@ static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context) { struct hns_roce_dev *hr_dev = to_hr_dev(attr->device); u8 port = attr->port_num - 1; - unsigned long flags; int ret; if (port >= hr_dev->caps.num_ports) return -EINVAL; - spin_lock_irqsave(&hr_dev->iboe.lock, flags); - ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr); - spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); - return ret; } @@ -98,18 +93,13 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context) struct hns_roce_dev *hr_dev = to_hr_dev(attr->device); struct ib_gid_attr zattr = { }; u8 port = attr->port_num - 1; - unsigned long flags; int ret; if (port >= hr_dev->caps.num_ports) return -EINVAL; - spin_lock_irqsave(&hr_dev->iboe.lock, flags); - ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &zgid, &zattr); - spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); - return ret; } @@ -226,6 +216,11 @@ static int hns_roce_query_device(struct ib_device *ib_dev, props->max_srq_sge = hr_dev->caps.max_srq_sges; } + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) { + props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; + props->max_fast_reg_page_list_len = HNS_ROCE_FRMR_MAX_PA; + } + return 0; } @@ -704,8 +699,62 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev) } } + if (hr_dev->caps.sccc_entry_sz) { + ret = hns_roce_init_hem_table(hr_dev, + &hr_dev->qp_table.sccc_table, + HEM_TYPE_SCCC, + hr_dev->caps.sccc_entry_sz, + hr_dev->caps.num_qps, 1); + if (ret) { + dev_err(dev, + "Failed to init SCC context memory, aborting.\n"); + goto err_unmap_idx; + } + } + + if (hr_dev->caps.qpc_timer_entry_sz) { + ret = hns_roce_init_hem_table(hr_dev, + &hr_dev->qpc_timer_table, + HEM_TYPE_QPC_TIMER, + hr_dev->caps.qpc_timer_entry_sz, + hr_dev->caps.num_qpc_timer, 1); + if (ret) { + dev_err(dev, + "Failed to init QPC timer memory, aborting.\n"); + goto err_unmap_ctx; + } + } + + if (hr_dev->caps.cqc_timer_entry_sz) { + ret = hns_roce_init_hem_table(hr_dev, + &hr_dev->cqc_timer_table, + HEM_TYPE_CQC_TIMER, + hr_dev->caps.cqc_timer_entry_sz, + hr_dev->caps.num_cqc_timer, 1); + if (ret) { + dev_err(dev, + "Failed to init CQC timer memory, aborting.\n"); + goto err_unmap_qpc_timer; + } + } + return 0; +err_unmap_qpc_timer: + if (hr_dev->caps.qpc_timer_entry_sz) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->qpc_timer_table); + +err_unmap_ctx: + if (hr_dev->caps.sccc_entry_sz) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->qp_table.sccc_table); + +err_unmap_idx: + if (hr_dev->caps.num_idx_segs) + hns_roce_cleanup_hem_table(hr_dev, + &hr_dev->mr_table.mtt_idx_table); + err_unmap_srqwqe: if (hr_dev->caps.num_srqwqe_segs) hns_roce_cleanup_hem_table(hr_dev, diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 52d2b299b3be..6a3bbf87d1aa 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -209,13 +209,23 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, } } + if (hr_dev->caps.sccc_entry_sz) { + /* Alloc memory for SCC CTX */ + ret = hns_roce_table_get(hr_dev, &qp_table->sccc_table, + hr_qp->qpn); + if (ret) { + dev_err(dev, "SCC CTX table get failed\n"); + goto err_put_trrl; + } + } + spin_lock_irq(&qp_table->lock); ret = radix_tree_insert(&hr_dev->qp_table_tree, hr_qp->qpn & (hr_dev->caps.num_qps - 1), hr_qp); spin_unlock_irq(&qp_table->lock); if (ret) { dev_err(dev, "QPC radix_tree_insert failed\n"); - goto err_put_trrl; + goto err_put_sccc; } atomic_set(&hr_qp->refcount, 1); @@ -223,6 +233,11 @@ static int hns_roce_qp_alloc(struct hns_roce_dev *hr_dev, unsigned long qpn, return 0; +err_put_sccc: + if (hr_dev->caps.sccc_entry_sz) + hns_roce_table_put(hr_dev, &qp_table->sccc_table, + hr_qp->qpn); + err_put_trrl: if (hr_dev->caps.trrl_entry_sz) hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); @@ -517,7 +532,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr) { - if (attr->qp_type == IB_QPT_XRC_TGT) + if (attr->qp_type == IB_QPT_XRC_TGT || !attr->cap.max_send_wr) return 0; return 1; @@ -526,7 +541,8 @@ static int hns_roce_qp_has_sq(struct ib_qp_init_attr *attr) static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr) { if (attr->qp_type == IB_QPT_XRC_INI || - attr->qp_type == IB_QPT_XRC_TGT || attr->srq) + attr->qp_type == IB_QPT_XRC_TGT || attr->srq || + !attr->cap.max_recv_wr) return 0; return 1; @@ -675,6 +691,10 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, dev_err(dev, "rq record doorbell map failed!\n"); goto err_sq_dbmap; } + + /* indicate kernel supports rq record db */ + resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB; + hr_qp->rdb_en = 1; } } else { if (init_attr->create_flags & @@ -741,10 +761,10 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, goto err_mtt; } - hr_qp->sq.wrid = kmalloc_array(hr_qp->sq.wqe_cnt, sizeof(u64), - GFP_KERNEL); - hr_qp->rq.wrid = kmalloc_array(hr_qp->rq.wqe_cnt, sizeof(u64), - GFP_KERNEL); + hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), + GFP_KERNEL); + hr_qp->rq.wrid = kcalloc(hr_qp->rq.wqe_cnt, sizeof(u64), + GFP_KERNEL); if (!hr_qp->sq.wrid || !hr_qp->rq.wrid) { ret = -ENOMEM; goto err_wrid; @@ -783,17 +803,19 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, else hr_qp->doorbell_qpn = cpu_to_le64(hr_qp->qpn); - if (ib_pd->uobject && (udata->outlen >= sizeof(resp)) && - (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)) { - - /* indicate kernel supports rq record db */ - resp.cap_flags |= HNS_ROCE_SUPPORT_RQ_RECORD_DB; - ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); + if (udata) { + ret = ib_copy_to_udata(udata, &resp, + min(udata->outlen, sizeof(resp))); if (ret) goto err_qp; + } - hr_qp->rdb_en = 1; + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) { + ret = hr_dev->hw->qp_flow_control_init(hr_dev, hr_qp); + if (ret) + goto err_qp; } + hr_qp->event = hns_roce_ib_qp_event; return 0; @@ -969,7 +991,9 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, (attr_mask & IB_QP_STATE) && new_state == IB_QPS_ERR) { if (hr_qp->sdb_en == 1) { hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); - hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); + + if (hr_qp->rdb_en == 1) + hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr); } else { dev_warn(dev, "flush cqe is not supported in userspace!\n"); goto out; @@ -1133,6 +1157,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) int reserved_from_bot; int ret; + mutex_init(&qp_table->scc_mutex); spin_lock_init(&qp_table->lock); INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC); diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index eca7bf5fa3dc..8920d35acc19 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -79,9 +79,9 @@ static int hns_roce_hw2sw_srq(struct hns_roce_dev *dev, HNS_ROCE_CMD_TIMEOUT_MSECS); } -int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn, u16 xrcd, - struct hns_roce_mtt *hr_mtt, u64 db_rec_addr, - struct hns_roce_srq *srq) +static int hns_roce_srq_alloc(struct hns_roce_dev *hr_dev, u32 pdn, u32 cqn, + u16 xrcd, struct hns_roce_mtt *hr_mtt, + u64 db_rec_addr, struct hns_roce_srq *srq) { struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; struct hns_roce_cmd_mailbox *mailbox; @@ -160,7 +160,8 @@ err_out: return ret; } -void hns_roce_srq_free(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) +static void hns_roce_srq_free(struct hns_roce_dev *hr_dev, + struct hns_roce_srq *srq) { struct hns_roce_srq_table *srq_table = &hr_dev->srq_table; int ret; @@ -187,28 +188,19 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq, { struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); struct hns_roce_idx_que *idx_que = &srq->idx_que; - u32 bitmap_num; - int i; - bitmap_num = HNS_ROCE_ALOGN_UP(srq->max, 8 * sizeof(u64)); - - idx_que->bitmap = kcalloc(1, bitmap_num / 8, GFP_KERNEL); + idx_que->bitmap = bitmap_zalloc(srq->max, GFP_KERNEL); if (!idx_que->bitmap) return -ENOMEM; - bitmap_num = bitmap_num / (8 * sizeof(u64)); - idx_que->buf_size = srq->idx_que.buf_size; if (hns_roce_buf_alloc(hr_dev, idx_que->buf_size, (1 << page_shift) * 2, &idx_que->idx_buf, page_shift)) { - kfree(idx_que->bitmap); + bitmap_free(idx_que->bitmap); return -ENOMEM; } - for (i = 0; i < bitmap_num; i++) - idx_que->bitmap[i] = ~(0UL); - return 0; } @@ -414,7 +406,7 @@ err_idx_mtt: err_create_idx: hns_roce_buf_free(hr_dev, srq->idx_que.buf_size, &srq->idx_que.idx_buf); - kfree(srq->idx_que.bitmap); + bitmap_free(srq->idx_que.bitmap); err_srq_mtt: hns_roce_mtt_cleanup(hr_dev, &srq->mtt); diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 29ea779826f4..30fe3f6e0c7d 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -158,54 +158,47 @@ int qedr_query_device(struct ib_device *ibdev, return 0; } -#define QEDR_SPEED_SDR (1) -#define QEDR_SPEED_DDR (2) -#define QEDR_SPEED_QDR (4) -#define QEDR_SPEED_FDR10 (8) -#define QEDR_SPEED_FDR (16) -#define QEDR_SPEED_EDR (32) - static inline void get_link_speed_and_width(int speed, u8 *ib_speed, u8 *ib_width) { switch (speed) { case 1000: - *ib_speed = QEDR_SPEED_SDR; + *ib_speed = IB_SPEED_SDR; *ib_width = IB_WIDTH_1X; break; case 10000: - *ib_speed = QEDR_SPEED_QDR; + *ib_speed = IB_SPEED_QDR; *ib_width = IB_WIDTH_1X; break; case 20000: - *ib_speed = QEDR_SPEED_DDR; + *ib_speed = IB_SPEED_DDR; *ib_width = IB_WIDTH_4X; break; case 25000: - *ib_speed = QEDR_SPEED_EDR; + *ib_speed = IB_SPEED_EDR; *ib_width = IB_WIDTH_1X; break; case 40000: - *ib_speed = QEDR_SPEED_QDR; + *ib_speed = IB_SPEED_QDR; *ib_width = IB_WIDTH_4X; break; case 50000: - *ib_speed = QEDR_SPEED_QDR; - *ib_width = IB_WIDTH_4X; + *ib_speed = IB_SPEED_HDR; + *ib_width = IB_WIDTH_1X; break; case 100000: - *ib_speed = QEDR_SPEED_EDR; + *ib_speed = IB_SPEED_EDR; *ib_width = IB_WIDTH_4X; break; default: /* Unsupported */ - *ib_speed = QEDR_SPEED_SDR; + *ib_speed = IB_SPEED_SDR; *ib_width = IB_WIDTH_1X; } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 299b277bc7ae..83e19c6b974e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -43,6 +43,8 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */ HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */ HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */ + HLCGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */ + HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */ }; @@ -62,6 +64,8 @@ enum hclge_mbx_vlan_cfg_subcode { HCLGE_MBX_VLAN_FILTER = 0, /* set vlan filter */ HCLGE_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */ HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */ + HCLGE_MBX_PORT_BASE_VLAN_CFG, /* set port based vlan configuration */ + HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */ }; #define HCLGE_MBX_MAX_MSG_SIZE 16 @@ -80,12 +84,15 @@ struct hclgevf_mbx_resp_status { struct hclge_mbx_vf_to_pf_cmd { u8 rsv; u8 mbx_src_vfid; /* Auto filled by IMP */ - u8 rsv1[2]; + u8 mbx_need_resp; + u8 rsv1[1]; u8 msg_len; u8 rsv2[3]; u8 msg[HCLGE_MBX_MAX_MSG_SIZE]; }; +#define HCLGE_MBX_NEED_RESP_BIT BIT(0) + struct hclge_mbx_pf_to_vf_cmd { u8 dest_vfid; u8 rsv[3]; @@ -107,7 +114,7 @@ struct hclgevf_mbx_arq_ring { struct hclgevf_dev *hdev; u32 head; u32 tail; - u32 count; + atomic_t count; u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE]; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c index 17ab4f4af6ad..fa8b8506b120 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c @@ -76,8 +76,8 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client, return inited; } -static int hnae3_match_n_instantiate(struct hnae3_client *client, - struct hnae3_ae_dev *ae_dev, bool is_reg) +static int hnae3_init_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) { int ret; @@ -87,23 +87,27 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client, return 0; } - /* now, (un-)instantiate client by calling lower layer */ - if (is_reg) { - ret = ae_dev->ops->init_client_instance(client, ae_dev); - if (ret) - dev_err(&ae_dev->pdev->dev, - "fail to instantiate client, ret = %d\n", ret); + ret = ae_dev->ops->init_client_instance(client, ae_dev); + if (ret) + dev_err(&ae_dev->pdev->dev, + "fail to instantiate client, ret = %d\n", ret); - return ret; - } + return ret; +} + +static void hnae3_uninit_client_instance(struct hnae3_client *client, + struct hnae3_ae_dev *ae_dev) +{ + /* check if this client matches the type of ae_dev */ + if (!(hnae3_client_match(client->type, ae_dev->dev_type) && + hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) + return; if (hnae3_get_client_init_flag(client, ae_dev)) { ae_dev->ops->uninit_client_instance(client, ae_dev); hnae3_set_client_init_flag(client, ae_dev, 0); } - - return 0; } int hnae3_register_client(struct hnae3_client *client) @@ -129,7 +133,7 @@ int hnae3_register_client(struct hnae3_client *client) /* if the client could not be initialized on current port, for * any error reasons, move on to next available port */ - ret = hnae3_match_n_instantiate(client, ae_dev, true); + ret = hnae3_init_client_instance(client, ae_dev); if (ret) dev_err(&ae_dev->pdev->dev, "match and instantiation failed for port, ret = %d\n", @@ -153,7 +157,7 @@ void hnae3_unregister_client(struct hnae3_client *client) mutex_lock(&hnae3_common_lock); /* un-initialize the client on every matched port */ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { - hnae3_match_n_instantiate(client, ae_dev, false); + hnae3_uninit_client_instance(client, ae_dev); } list_del(&client->node); @@ -205,7 +209,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo) * initialize the figure out client instance */ list_for_each_entry(client, &hnae3_client_list, node) { - ret = hnae3_match_n_instantiate(client, ae_dev, true); + ret = hnae3_init_client_instance(client, ae_dev); if (ret) dev_err(&ae_dev->pdev->dev, "match and instantiation failed, ret = %d\n", @@ -243,7 +247,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo) * un-initialize the figure out client instance */ list_for_each_entry(client, &hnae3_client_list, node) - hnae3_match_n_instantiate(client, ae_dev, false); + hnae3_uninit_client_instance(client, ae_dev); ae_algo->ops->uninit_ae_dev(ae_dev); hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); @@ -301,7 +305,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev) * initialize the figure out client instance */ list_for_each_entry(client, &hnae3_client_list, node) { - ret = hnae3_match_n_instantiate(client, ae_dev, true); + ret = hnae3_init_client_instance(client, ae_dev); if (ret) dev_err(&ae_dev->pdev->dev, "match and instantiation failed, ret = %d\n", @@ -343,7 +347,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev) continue; list_for_each_entry(client, &hnae3_client_list, node) - hnae3_match_n_instantiate(client, ae_dev, false); + hnae3_uninit_client_instance(client, ae_dev); ae_algo->ops->uninit_ae_dev(ae_dev); hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 38b430f11fc1..dce68d3d7907 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -147,6 +147,13 @@ enum hnae3_flr_state { HNAE3_FLR_DONE, }; +enum hnae3_port_base_vlan_state { + HNAE3_PORT_BASE_VLAN_DISABLE, + HNAE3_PORT_BASE_VLAN_ENABLE, + HNAE3_PORT_BASE_VLAN_MODIFY, + HNAE3_PORT_BASE_VLAN_NOCHANGE, +}; + struct hnae3_vector_info { u8 __iomem *io_addr; int vector; @@ -385,7 +392,8 @@ struct hnae3_ae_ops { void (*update_stats)(struct hnae3_handle *handle, struct net_device_stats *net_stats); void (*get_stats)(struct hnae3_handle *handle, u64 *data); - + void (*get_mac_pause_stats)(struct hnae3_handle *handle, u64 *tx_cnt, + u64 *rx_cnt); void (*get_strings)(struct hnae3_handle *handle, u32 stringset, u8 *data); int (*get_sset_count)(struct hnae3_handle *handle, int stringset); @@ -578,8 +586,13 @@ struct hnae3_handle { u32 numa_node_mask; /* for multi-chip support */ + enum hnae3_port_base_vlan_state port_base_vlan_state; + u8 netdev_flags; struct dentry *hnae3_dbgfs; + + /* Network interface message level enabled bits */ + u32 msg_enable; }; #define hnae3_set_field(origin, mask, shift, val) \ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 0de543faa5b1..fc4917ac44be 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -239,6 +239,10 @@ static void hns3_dbg_help(struct hnae3_handle *h) dev_info(&h->pdev->dev, "queue info [number]\n"); dev_info(&h->pdev->dev, "queue map\n"); dev_info(&h->pdev->dev, "bd info [q_num] <bd index>\n"); + + if (!hns3_is_phys_func(h->pdev)) + return; + dev_info(&h->pdev->dev, "dump fd tcam\n"); dev_info(&h->pdev->dev, "dump tc\n"); dev_info(&h->pdev->dev, "dump tm map [q_num]\n"); @@ -247,6 +251,9 @@ static void hns3_dbg_help(struct hnae3_handle *h) dev_info(&h->pdev->dev, "dump qos pri map\n"); dev_info(&h->pdev->dev, "dump qos buf cfg\n"); dev_info(&h->pdev->dev, "dump mng tbl\n"); + dev_info(&h->pdev->dev, "dump reset info\n"); + dev_info(&h->pdev->dev, "dump ncl_config <offset> <length>(in hex)\n"); + dev_info(&h->pdev->dev, "dump mac tnl status\n"); memset(printf_buf, 0, HNS3_DBG_BUF_LEN); strncat(printf_buf, "dump reg [[bios common] [ssu <prt_id>]", @@ -341,6 +348,8 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer, ret = hns3_dbg_bd_info(handle, cmd_buf); else if (handle->ae_algo->ops->dbg_run_cmd) ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf); + else + ret = -EOPNOTSUPP; if (ret) hns3_dbg_help(handle); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 65bd07a0835b..7477809c35aa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -35,6 +35,13 @@ static const char hns3_driver_string[] = static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation."; static struct hnae3_client client; +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, " Network interface message level setting"); + +#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ + NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) + /* hns3_pci_tbl - PCI Device ID Table * * Last entry must be all 0s @@ -67,7 +74,7 @@ static irqreturn_t hns3_irq_handle(int irq, void *vector) { struct hns3_enet_tqp_vector *tqp_vector = vector; - napi_schedule(&tqp_vector->napi); + napi_schedule_irqoff(&tqp_vector->napi); return IRQ_HANDLED; } @@ -730,95 +737,6 @@ static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto, return 0; } -static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto, - u8 il4_proto, u32 *type_cs_vlan_tso, - u32 *ol_type_vlan_len_msec) -{ - union l3_hdr_info l3; - union l4_hdr_info l4; - unsigned char *l2_hdr; - u8 l4_proto = ol4_proto; - u32 ol2_len; - u32 ol3_len; - u32 ol4_len; - u32 l2_len; - u32 l3_len; - - l3.hdr = skb_network_header(skb); - l4.hdr = skb_transport_header(skb); - - /* compute L2 header size for normal packet, defined in 2 Bytes */ - l2_len = l3.hdr - skb->data; - hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); - - /* tunnel packet*/ - if (skb->encapsulation) { - /* compute OL2 header size, defined in 2 Bytes */ - ol2_len = l2_len; - hns3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_L2LEN_S, ol2_len >> 1); - - /* compute OL3 header size, defined in 4 Bytes */ - ol3_len = l4.hdr - l3.hdr; - hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, - ol3_len >> 2); - - /* MAC in UDP, MAC in GRE (0x6558)*/ - if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) { - /* switch MAC header ptr from outer to inner header.*/ - l2_hdr = skb_inner_mac_header(skb); - - /* compute OL4 header size, defined in 4 Bytes. */ - ol4_len = l2_hdr - l4.hdr; - hns3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_L4LEN_S, ol4_len >> 2); - - /* switch IP header ptr from outer to inner header */ - l3.hdr = skb_inner_network_header(skb); - - /* compute inner l2 header size, defined in 2 Bytes. */ - l2_len = l3.hdr - l2_hdr; - hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, - l2_len >> 1); - } else { - /* skb packet types not supported by hardware, - * txbd len fild doesn't be filled. - */ - return; - } - - /* switch L4 header pointer from outer to inner */ - l4.hdr = skb_inner_transport_header(skb); - - l4_proto = il4_proto; - } - - /* compute inner(/normal) L3 header size, defined in 4 Bytes */ - l3_len = l4.hdr - l3.hdr; - hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); - - /* compute inner(/normal) L4 header size, defined in 4 Bytes */ - switch (l4_proto) { - case IPPROTO_TCP: - hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, - l4.tcp->doff); - break; - case IPPROTO_SCTP: - hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, - (sizeof(struct sctphdr) >> 2)); - break; - case IPPROTO_UDP: - hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, - (sizeof(struct udphdr) >> 2)); - break; - default: - /* skb packet types not supported by hardware, - * txbd len fild doesn't be filled. - */ - return; - } -} - /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL * and it is udp packet, which has a dest port as the IANA assigned. * the hardware is expected to do the checksum offload, but the @@ -840,46 +758,71 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb) return true; } -static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, - u8 il4_proto, u32 *type_cs_vlan_tso, - u32 *ol_type_vlan_len_msec) +static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto, + u32 *ol_type_vlan_len_msec) { + u32 l2_len, l3_len, l4_len; + unsigned char *il2_hdr; union l3_hdr_info l3; - u32 l4_proto = ol4_proto; + union l4_hdr_info l4; l3.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); - /* define OL3 type and tunnel type(OL4).*/ - if (skb->encapsulation) { - /* define outer network header type.*/ - if (skb->protocol == htons(ETH_P_IP)) { - if (skb_is_gso(skb)) - hns3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_CSUM); - else - hns3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV4_NO_CSUM); - - } else if (skb->protocol == htons(ETH_P_IPV6)) { - hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, - HNS3_OL3T_IPV6); - } + /* compute OL2 header size, defined in 2 Bytes */ + l2_len = l3.hdr - skb->data; + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1); - /* define tunnel type(OL4).*/ - switch (l4_proto) { - case IPPROTO_UDP: + /* compute OL3 header size, defined in 4 Bytes */ + l3_len = l4.hdr - l3.hdr; + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2); + + il2_hdr = skb_inner_mac_header(skb); + /* compute OL4 header size, defined in 4 Bytes. */ + l4_len = il2_hdr - l4.hdr; + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2); + + /* define outer network header type */ + if (skb->protocol == htons(ETH_P_IP)) { + if (skb_is_gso(skb)) hns3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_MAC_IN_UDP); - break; - case IPPROTO_GRE: + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_CSUM); + else hns3_set_field(*ol_type_vlan_len_msec, - HNS3_TXD_TUNTYPE_S, - HNS3_TUN_NVGRE); - break; - default: + HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV4_NO_CSUM); + + } else if (skb->protocol == htons(ETH_P_IPV6)) { + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S, + HNS3_OL3T_IPV6); + } + + if (ol4_proto == IPPROTO_UDP) + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, + HNS3_TUN_MAC_IN_UDP); + else if (ol4_proto == IPPROTO_GRE) + hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S, + HNS3_TUN_NVGRE); +} + +static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto, + u8 il4_proto, u32 *type_cs_vlan_tso, + u32 *ol_type_vlan_len_msec) +{ + unsigned char *l2_hdr = skb->data; + u32 l4_proto = ol4_proto; + union l4_hdr_info l4; + union l3_hdr_info l3; + u32 l2_len, l3_len; + + l4.hdr = skb_transport_header(skb); + l3.hdr = skb_network_header(skb); + + /* handle encapsulation skb */ + if (skb->encapsulation) { + /* If this is a not UDP/GRE encapsulation skb */ + if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) { /* drop the skb tunnel packet if hardware don't support, * because hardware can't calculate csum when TSO. */ @@ -893,7 +836,12 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, return 0; } + hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec); + + /* switch to inner header */ + l2_hdr = skb_inner_mac_header(skb); l3.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); l4_proto = il4_proto; } @@ -911,11 +859,22 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, HNS3_L3T_IPV6); } + /* compute inner(/normal) L2 header size, defined in 2 Bytes */ + l2_len = l3.hdr - l2_hdr; + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1); + + /* compute inner(/normal) L3 header size, defined in 4 Bytes */ + l3_len = l4.hdr - l3.hdr; + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2); + + /* compute inner(/normal) L4 header size, defined in 4 Bytes */ switch (l4_proto) { case IPPROTO_TCP: hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, HNS3_L4T_TCP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + l4.tcp->doff); break; case IPPROTO_UDP: if (hns3_tunnel_csum_bug(skb)) @@ -924,11 +883,15 @@ static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto, hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, HNS3_L4T_UDP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + (sizeof(struct udphdr) >> 2)); break; case IPPROTO_SCTP: hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1); hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S, HNS3_L4T_SCTP); + hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S, + (sizeof(struct sctphdr) >> 2)); break; default: /* drop the skb tunnel packet if hardware don't support, @@ -963,6 +926,16 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, { #define HNS3_TX_VLAN_PRIO_SHIFT 13 + struct hnae3_handle *handle = tx_ring->tqp->handle; + + /* Since HW limitation, if port based insert VLAN enabled, only one VLAN + * header is allowed in skb, otherwise it will cause RAS error. + */ + if (unlikely(skb_vlan_tagged_multi(skb) && + handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_ENABLE)) + return -EINVAL; + if (skb->protocol == htons(ETH_P_8021Q) && !(tx_ring->tqp->handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { @@ -984,8 +957,16 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb, * and use inner_vtag in one tag case. */ if (skb->protocol == htons(ETH_P_8021Q)) { - hns3_set_field(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1); - *out_vtag = vlan_tag; + if (handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_DISABLE){ + hns3_set_field(*out_vlan_flag, + HNS3_TXD_OVLAN_B, 1); + *out_vtag = vlan_tag; + } else { + hns3_set_field(*inner_vlan_flag, + HNS3_TXD_VLAN_B, 1); + *inner_vtag = vlan_tag; + } } else { hns3_set_field(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1); *inner_vtag = vlan_tag; @@ -1012,7 +993,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; struct hns3_desc *desc = &ring->desc[ring->next_to_use]; struct device *dev = ring_to_dev(ring); - u16 bdtp_fe_sc_vld_ra_ri = 0; struct skb_frag_struct *frag; unsigned int frag_buf_num; int k, sizeoflast; @@ -1042,12 +1022,10 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); if (unlikely(ret)) return ret; - hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto, - &type_cs_vlan_tso, - &ol_type_vlan_len_msec); - ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto, - &type_cs_vlan_tso, - &ol_type_vlan_len_msec); + + ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, + &type_cs_vlan_tso, + &ol_type_vlan_len_msec); if (unlikely(ret)) return ret; @@ -1073,19 +1051,37 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); } - if (unlikely(dma_mapping_error(ring->dev, dma))) { + if (unlikely(dma_mapping_error(dev, dma))) { ring->stats.sw_err_cnt++; return -ENOMEM; } desc_cb->length = size; + if (likely(size <= HNS3_MAX_BD_SIZE)) { + u16 bdtp_fe_sc_vld_ra_ri = 0; + + desc_cb->priv = priv; + desc_cb->dma = dma; + desc_cb->type = type; + desc->addr = cpu_to_le64(dma); + desc->tx.send_size = cpu_to_le16(size); + hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end); + desc->tx.bdtp_fe_sc_vld_ra_ri = + cpu_to_le16(bdtp_fe_sc_vld_ra_ri); + + ring_ptr_move_fw(ring, next_to_use); + return 0; + } + frag_buf_num = hns3_tx_bd_count(size); sizeoflast = size & HNS3_TX_LAST_SIZE_M; sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; /* When frag size is bigger than hardware limit, split this frag */ for (k = 0; k < frag_buf_num; k++) { + u16 bdtp_fe_sc_vld_ra_ri = 0; + /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */ desc_cb->priv = priv; desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k; @@ -1112,64 +1108,92 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, return 0; } -static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, - struct hns3_enet_ring *ring) +static int hns3_nic_bd_num(struct sk_buff *skb) { - struct sk_buff *skb = *out_skb; - struct sk_buff *new_skb = NULL; - struct skb_frag_struct *frag; - int bdnum_for_frag; - int frag_num; - int buf_num; - int size; - int i; + int size = skb_headlen(skb); + int i, bd_num; - size = skb_headlen(skb); - buf_num = hns3_tx_bd_count(size); + /* if the total len is within the max bd limit */ + if (likely(skb->len <= HNS3_MAX_BD_SIZE)) + return skb_shinfo(skb)->nr_frags + 1; + + bd_num = hns3_tx_bd_count(size); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + int frag_bd_num; - frag_num = skb_shinfo(skb)->nr_frags; - for (i = 0; i < frag_num; i++) { - frag = &skb_shinfo(skb)->frags[i]; size = skb_frag_size(frag); - bdnum_for_frag = hns3_tx_bd_count(size); - if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)) + frag_bd_num = hns3_tx_bd_count(size); + + if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG)) return -ENOMEM; - buf_num += bdnum_for_frag; + bd_num += frag_bd_num; } - if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { - buf_num = hns3_tx_bd_count(skb->len); - if (ring_space(ring) < buf_num) - return -EBUSY; - /* manual split the send packet */ - new_skb = skb_copy(skb, GFP_ATOMIC); - if (!new_skb) - return -ENOMEM; - dev_kfree_skb_any(skb); - *out_skb = new_skb; - } + return bd_num; +} - if (unlikely(ring_space(ring) < buf_num)) - return -EBUSY; +static unsigned int hns3_gso_hdr_len(struct sk_buff *skb) +{ + if (!skb->encapsulation) + return skb_transport_offset(skb) + tcp_hdrlen(skb); - *bnum = buf_num; - return 0; + return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); } -static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, - struct hns3_enet_ring *ring) +/* HW need every continuous 8 buffer data to be larger than MSS, + * we simplify it by ensuring skb_headlen + the first continuous + * 7 frags to to be larger than gso header len + mss, and the remaining + * continuous 7 frags to be larger than MSS except the last 7 frags. + */ +static bool hns3_skb_need_linearized(struct sk_buff *skb) +{ + int bd_limit = HNS3_MAX_BD_PER_FRAG - 1; + unsigned int tot_len = 0; + int i; + + for (i = 0; i < bd_limit; i++) + tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i]); + + /* ensure headlen + the first 7 frags is greater than mss + header + * and the first 7 frags is greater than mss. + */ + if (((tot_len + skb_headlen(skb)) < (skb_shinfo(skb)->gso_size + + hns3_gso_hdr_len(skb))) || (tot_len < skb_shinfo(skb)->gso_size)) + return true; + + /* ensure the remaining continuous 7 buffer is greater than mss */ + for (i = 0; i < (skb_shinfo(skb)->nr_frags - bd_limit - 1); i++) { + tot_len -= skb_frag_size(&skb_shinfo(skb)->frags[i]); + tot_len += skb_frag_size(&skb_shinfo(skb)->frags[i + bd_limit]); + + if (tot_len < skb_shinfo(skb)->gso_size) + return true; + } + + return false; +} + +static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, + struct sk_buff **out_skb) { struct sk_buff *skb = *out_skb; - struct sk_buff *new_skb = NULL; - int buf_num; + int bd_num; - /* No. of segments (plus a header) */ - buf_num = skb_shinfo(skb)->nr_frags + 1; + bd_num = hns3_nic_bd_num(skb); + if (bd_num < 0) + return bd_num; - if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { - buf_num = hns3_tx_bd_count(skb->len); - if (ring_space(ring) < buf_num) + if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) { + struct sk_buff *new_skb; + + if (skb_is_gso(skb) && !hns3_skb_need_linearized(skb)) + goto out; + + bd_num = hns3_tx_bd_count(skb->len); + if (unlikely(ring_space(ring) < bd_num)) return -EBUSY; /* manual split the send packet */ new_skb = skb_copy(skb, GFP_ATOMIC); @@ -1177,14 +1201,17 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, return -ENOMEM; dev_kfree_skb_any(skb); *out_skb = new_skb; + + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_copy++; + u64_stats_update_end(&ring->syncp); } - if (unlikely(ring_space(ring) < buf_num)) +out: + if (unlikely(ring_space(ring) < bd_num)) return -EBUSY; - *bnum = buf_num; - - return 0; + return bd_num; } static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) @@ -1197,6 +1224,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) if (ring->next_to_use == next_to_use_orig) break; + /* rollback one */ + ring_ptr_move_bw(ring, next_to_use); + /* unmap the descriptor dma address */ if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB) dma_unmap_single(dev, @@ -1210,9 +1240,7 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) DMA_TO_DEVICE); ring->desc_cb[ring->next_to_use].length = 0; - - /* rollback one */ - ring_ptr_move_bw(ring, next_to_use); + ring->desc_cb[ring->next_to_use].dma = 0; } } @@ -1225,7 +1253,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) struct netdev_queue *dev_queue; struct skb_frag_struct *frag; int next_to_use_head; - int next_to_use_frag; int buf_num; int seg_num; int size; @@ -1235,22 +1262,23 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) /* Prefetch the data used later */ prefetch(skb->data); - switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { - case -EBUSY: - u64_stats_update_begin(&ring->syncp); - ring->stats.tx_busy++; - u64_stats_update_end(&ring->syncp); + buf_num = hns3_nic_maybe_stop_tx(ring, &skb); + if (unlikely(buf_num <= 0)) { + if (buf_num == -EBUSY) { + u64_stats_update_begin(&ring->syncp); + ring->stats.tx_busy++; + u64_stats_update_end(&ring->syncp); + goto out_net_tx_busy; + } else if (buf_num == -ENOMEM) { + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + } - goto out_net_tx_busy; - case -ENOMEM: - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); - netdev_err(netdev, "no memory to xmit!\n"); + if (net_ratelimit()) + netdev_err(netdev, "xmit error: %d!\n", buf_num); goto out_err_tx_ok; - default: - break; } /* No. of segments (plus a header) */ @@ -1263,9 +1291,8 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) ret = hns3_fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0, DESC_TYPE_SKB); if (unlikely(ret)) - goto head_fill_err; + goto fill_err; - next_to_use_frag = ring->next_to_use; /* Fill the fragments */ for (i = 1; i < seg_num; i++) { frag = &skb_shinfo(skb)->frags[i - 1]; @@ -1276,7 +1303,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) DESC_TYPE_PAGE); if (unlikely(ret)) - goto frag_fill_err; + goto fill_err; } /* Complete translate all packets */ @@ -1289,10 +1316,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_OK; -frag_fill_err: - hns3_clear_desc(ring, next_to_use_frag); - -head_fill_err: +fill_err: hns3_clear_desc(ring, next_to_use_head); out_err_tx_ok: @@ -1355,13 +1379,6 @@ static int hns3_nic_set_features(struct net_device *netdev, bool enable; int ret; - if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { - if (features & (NETIF_F_TSO | NETIF_F_TSO6)) - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; - else - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; - } - if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { enable = !!(features & NETIF_F_GRO_HW); ret = h->ae_algo->ops->set_gro_en(h, enable); @@ -1574,6 +1591,9 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) struct hnae3_handle *h = hns3_get_handle(netdev); int ret; + if (hns3_nic_resetting(netdev)) + return -EBUSY; + if (!h->ae_algo->ops->set_mtu) return -EOPNOTSUPP; @@ -1590,13 +1610,19 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) { struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = hns3_get_handle(ndev); struct hns3_enet_ring *tx_ring = NULL; + struct napi_struct *napi; int timeout_queue = 0; int hw_head, hw_tail; + int fbd_num, fbd_oft; + int ebd_num, ebd_oft; + int bd_num, bd_err; + int ring_en, tc; int i; /* Find the stopped queue the same way the stack does */ - for (i = 0; i < ndev->real_num_tx_queues; i++) { + for (i = 0; i < ndev->num_tx_queues; i++) { struct netdev_queue *q; unsigned long trans_start; @@ -1617,21 +1643,66 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) return false; } + priv->tx_timeout_count++; + tx_ring = priv->ring_data[timeout_queue].ring; + napi = &tx_ring->tqp_vector->napi; + + netdev_info(ndev, + "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n", + priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use, + tx_ring->next_to_clean, napi->state); + + netdev_info(ndev, + "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n", + tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, + tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt); + + netdev_info(ndev, + "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n", + tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt, + tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); + + /* When mac received many pause frames continuous, it's unable to send + * packets, which may cause tx timeout + */ + if (h->ae_algo->ops->update_stats && + h->ae_algo->ops->get_mac_pause_stats) { + u64 tx_pause_cnt, rx_pause_cnt; + + h->ae_algo->ops->update_stats(h, &ndev->stats); + h->ae_algo->ops->get_mac_pause_stats(h, &tx_pause_cnt, + &rx_pause_cnt); + netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n", + tx_pause_cnt, rx_pause_cnt); + } hw_head = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); hw_tail = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); + fbd_num = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_FBDNUM_REG); + fbd_oft = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_OFFSET_REG); + ebd_num = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_EBDNUM_REG); + ebd_oft = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_EBD_OFFSET_REG); + bd_num = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_BD_NUM_REG); + bd_err = readl_relaxed(tx_ring->tqp->io_base + + HNS3_RING_TX_RING_BD_ERR_REG); + ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG); + tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG); + netdev_info(ndev, - "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n", - priv->tx_timeout_count, - timeout_queue, - tx_ring->next_to_use, - tx_ring->next_to_clean, - hw_head, - hw_tail, + "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n", + bd_num, hw_head, hw_tail, bd_err, readl(tx_ring->tqp_vector->mask_addr)); + netdev_info(ndev, + "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n", + ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft); return true; } @@ -1644,8 +1715,6 @@ static void hns3_nic_net_timeout(struct net_device *ndev) if (!hns3_get_tx_timeo_queue_info(ndev)) return; - priv->tx_timeout_count++; - /* request the reset, and let the hclge to determine * which reset level should be done */ @@ -1670,7 +1739,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan, }; -static bool hns3_is_phys_func(struct pci_dev *pdev) +bool hns3_is_phys_func(struct pci_dev *pdev) { u32 dev_id = pdev->device; @@ -2118,17 +2187,30 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) ring->desc[i].rx.bd_base_info = 0; } -static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, - int *pkts) +static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head, + int *bytes, int *pkts) { - struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; + int ntc = ring->next_to_clean; + struct hns3_desc_cb *desc_cb; - (*pkts) += (desc_cb->type == DESC_TYPE_SKB); - (*bytes) += desc_cb->length; - /* desc_cb will be cleaned, after hnae3_free_buffer_detach*/ - hns3_free_buffer_detach(ring, ring->next_to_clean); + while (head != ntc) { + desc_cb = &ring->desc_cb[ntc]; + (*pkts) += (desc_cb->type == DESC_TYPE_SKB); + (*bytes) += desc_cb->length; + /* desc_cb will be cleaned, after hnae3_free_buffer_detach */ + hns3_free_buffer_detach(ring, ntc); - ring_ptr_move_fw(ring, next_to_clean); + if (++ntc == ring->desc_num) + ntc = 0; + + /* Issue prefetch for next Tx descriptor */ + prefetch(&ring->desc_cb[ntc]); + } + + /* This smp_store_release() pairs with smp_load_acquire() in + * ring_space called by hns3_nic_net_xmit. + */ + smp_store_release(&ring->next_to_clean, ntc); } static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) @@ -2168,11 +2250,7 @@ void hns3_clean_tx_ring(struct hns3_enet_ring *ring) bytes = 0; pkts = 0; - while (head != ring->next_to_clean) { - hns3_nic_reclaim_one_desc(ring, &bytes, &pkts); - /* Issue prefetch for next Tx descriptor */ - prefetch(&ring->desc_cb[ring->next_to_clean]); - } + hns3_nic_reclaim_desc(ring, head, &bytes, &pkts); ring->tqp_vector->tx_group.total_bytes += bytes; ring->tqp_vector->tx_group.total_packets += pkts; @@ -2234,6 +2312,10 @@ hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) break; } hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); + + u64_stats_update_begin(&ring->syncp); + ring->stats.non_reuse_pg++; + u64_stats_update_end(&ring->syncp); } ring_ptr_move_fw(ring, next_to_use); @@ -2247,64 +2329,78 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, struct hns3_enet_ring *ring, int pull_len, struct hns3_desc_cb *desc_cb) { - struct hns3_desc *desc; - u32 truesize; - int size; - int last_offset; - bool twobufs; - - twobufs = ((PAGE_SIZE < 8192) && - hnae3_buf_size(ring) == HNS3_BUFFER_SIZE_2048); - - desc = &ring->desc[ring->next_to_clean]; - size = le16_to_cpu(desc->rx.size); - - truesize = hnae3_buf_size(ring); - - if (!twobufs) - last_offset = hnae3_page_size(ring) - hnae3_buf_size(ring); + struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; + int size = le16_to_cpu(desc->rx.size); + u32 truesize = hnae3_buf_size(ring); skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len, size - pull_len, truesize); - /* Avoid re-using remote pages,flag default unreuse */ - if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id())) - return; - - if (twobufs) { - /* If we are only owner of page we can reuse it */ - if (likely(page_count(desc_cb->priv) == 1)) { - /* Flip page offset to other buffer */ - desc_cb->page_offset ^= truesize; - - desc_cb->reuse_flag = 1; - /* bump ref count on page before it is given*/ - get_page(desc_cb->priv); - } + /* Avoid re-using remote pages, or the stack is still using the page + * when page_offset rollback to zero, flag default unreuse + */ + if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()) || + (!desc_cb->page_offset && page_count(desc_cb->priv) > 1)) return; - } /* Move offset up to the next cache line */ desc_cb->page_offset += truesize; - if (desc_cb->page_offset <= last_offset) { + if (desc_cb->page_offset + truesize <= hnae3_page_size(ring)) { desc_cb->reuse_flag = 1; /* Bump ref count on page before it is given*/ get_page(desc_cb->priv); + } else if (page_count(desc_cb->priv) == 1) { + desc_cb->reuse_flag = 1; + desc_cb->page_offset = 0; + get_page(desc_cb->priv); } } +static int hns3_gro_complete(struct sk_buff *skb) +{ + __be16 type = skb->protocol; + struct tcphdr *th; + int depth = 0; + + while (type == htons(ETH_P_8021Q)) { + struct vlan_hdr *vh; + + if ((depth + VLAN_HLEN) > skb_headlen(skb)) + return -EFAULT; + + vh = (struct vlan_hdr *)(skb->data + depth); + type = vh->h_vlan_encapsulated_proto; + depth += VLAN_HLEN; + } + + if (type == htons(ETH_P_IP)) { + depth += sizeof(struct iphdr); + } else if (type == htons(ETH_P_IPV6)) { + depth += sizeof(struct ipv6hdr); + } else { + netdev_err(skb->dev, + "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n", + be16_to_cpu(type), depth); + return -EFAULT; + } + + th = (struct tcphdr *)(skb->data + depth); + skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; + if (th->cwr) + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + + return 0; +} + static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, - struct hns3_desc *desc) + u32 l234info, u32 bd_base_info, u32 ol_info) { struct net_device *netdev = ring->tqp->handle->kinfo.netdev; int l3_type, l4_type; - u32 bd_base_info; int ol4_type; - u32 l234info; - - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - l234info = le32_to_cpu(desc->rx.l234_info); skb->ip_summed = CHECKSUM_NONE; @@ -2313,12 +2409,6 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, if (!(netdev->features & NETIF_F_RXCSUM)) return; - /* We MUST enable hardware checksum before enabling hardware GRO */ - if (skb_shinfo(skb)->gso_size) { - skb->ip_summed = CHECKSUM_UNNECESSARY; - return; - } - /* check if hardware has done checksum */ if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B))) return; @@ -2333,7 +2423,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, return; } - ol4_type = hnae3_get_field(l234info, HNS3_RXD_OL4ID_M, + ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S); switch (ol4_type) { case HNS3_OL4_TYPE_MAC_IN_UDP: @@ -2371,6 +2461,7 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, struct hns3_desc *desc, u32 l234info, u16 *vlan_tag) { + struct hnae3_handle *handle = ring->tqp->handle; struct pci_dev *pdev = ring->tqp->handle->pdev; if (pdev->revision == 0x20) { @@ -2383,15 +2474,36 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, #define HNS3_STRP_OUTER_VLAN 0x1 #define HNS3_STRP_INNER_VLAN 0x2 +#define HNS3_STRP_BOTH 0x3 + /* Hardware always insert VLAN tag into RX descriptor when + * remove the tag from packet, driver needs to determine + * reporting which tag to stack. + */ switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M, HNS3_RXD_STRP_TAGP_S)) { case HNS3_STRP_OUTER_VLAN: + if (handle->port_base_vlan_state != + HNAE3_PORT_BASE_VLAN_DISABLE) + return false; + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); return true; case HNS3_STRP_INNER_VLAN: + if (handle->port_base_vlan_state != + HNAE3_PORT_BASE_VLAN_DISABLE) + return false; + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); return true; + case HNS3_STRP_BOTH: + if (handle->port_base_vlan_state == + HNAE3_PORT_BASE_VLAN_DISABLE) + *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); + else + *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); + + return true; default: return false; } @@ -2513,8 +2625,9 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, return 0; } -static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info, - u32 bd_base_info) +static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, + struct sk_buff *skb, u32 l234info, + u32 bd_base_info, u32 ol_info) { u16 gro_count; u32 l3_type; @@ -2522,12 +2635,11 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info, gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M, HNS3_RXD_GRO_COUNT_S); /* if there is no HW GRO, do not set gro params */ - if (!gro_count) - return; + if (!gro_count) { + hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info); + return 0; + } - /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count - * to skb_shinfo(skb)->gso_segs - */ NAPI_GRO_CB(skb)->count = gro_count; l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, @@ -2537,47 +2649,121 @@ static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info, else if (l3_type == HNS3_L3_TYPE_IPV6) skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; else - return; + return -EFAULT; skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, HNS3_RXD_GRO_SIZE_M, HNS3_RXD_GRO_SIZE_S); - if (skb_shinfo(skb)->gso_size) - tcp_gro_complete(skb); + + return hns3_gro_complete(skb); } static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, - struct sk_buff *skb) + struct sk_buff *skb, u32 rss_hash) { struct hnae3_handle *handle = ring->tqp->handle; enum pkt_hash_types rss_type; - struct hns3_desc *desc; - int last_bd; - - /* When driver handle the rss type, ring->next_to_clean indicates the - * first descriptor of next packet, need -1 here. - */ - last_bd = (ring->next_to_clean - 1 + ring->desc_num) % ring->desc_num; - desc = &ring->desc[last_bd]; - if (le32_to_cpu(desc->rx.rss_hash)) + if (rss_hash) rss_type = handle->kinfo.rss_type; else rss_type = PKT_HASH_TYPE_NONE; - skb_set_hash(skb, le32_to_cpu(desc->rx.rss_hash), rss_type); + skb_set_hash(skb, rss_hash, rss_type); } -static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, - struct sk_buff **out_skb) +static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) { struct net_device *netdev = ring->tqp->handle->kinfo.netdev; enum hns3_pkt_l2t_type l2_frame_type; + u32 bd_base_info, l234info, ol_info; + struct hns3_desc *desc; + unsigned int len; + int pre_ntc, ret; + + /* bdinfo handled below is only valid on the last BD of the + * current packet, and ring->next_to_clean indicates the first + * descriptor of next packet, so need - 1 below. + */ + pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) : + (ring->desc_num - 1); + desc = &ring->desc[pre_ntc]; + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + l234info = le32_to_cpu(desc->rx.l234_info); + ol_info = le32_to_cpu(desc->rx.ol_info); + + /* Based on hw strategy, the tag offloaded will be stored at + * ot_vlan_tag in two layer tag case, and stored at vlan_tag + * in one layer tag case. + */ + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { + u16 vlan_tag; + + if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + vlan_tag); + } + + if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) { + u64_stats_update_begin(&ring->syncp); + ring->stats.non_vld_descs++; + u64_stats_update_end(&ring->syncp); + + return -EINVAL; + } + + if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | + BIT(HNS3_RXD_L2E_B))))) { + u64_stats_update_begin(&ring->syncp); + if (l234info & BIT(HNS3_RXD_L2E_B)) + ring->stats.l2_err++; + else + ring->stats.err_pkt_len++; + u64_stats_update_end(&ring->syncp); + + return -EFAULT; + } + + len = skb->len; + + /* Do update ip stack process */ + skb->protocol = eth_type_trans(skb, netdev); + + /* This is needed in order to enable forwarding support */ + ret = hns3_set_gro_and_checksum(ring, skb, l234info, + bd_base_info, ol_info); + if (unlikely(ret)) { + u64_stats_update_begin(&ring->syncp); + ring->stats.rx_err_cnt++; + u64_stats_update_end(&ring->syncp); + return ret; + } + + l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, + HNS3_RXD_DMAC_S); + + u64_stats_update_begin(&ring->syncp); + ring->stats.rx_pkts++; + ring->stats.rx_bytes += len; + + if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) + ring->stats.rx_multicast++; + + u64_stats_update_end(&ring->syncp); + + ring->tqp_vector->rx_group.total_bytes += len; + + hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash)); + return 0; +} + +static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, + struct sk_buff **out_skb) +{ struct sk_buff *skb = ring->skb; struct hns3_desc_cb *desc_cb; struct hns3_desc *desc; u32 bd_base_info; - u32 l234info; int length; int ret; @@ -2637,64 +2823,13 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ALIGN(ring->pull_len, sizeof(long))); } - l234info = le32_to_cpu(desc->rx.l234_info); - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - - /* Based on hw strategy, the tag offloaded will be stored at - * ot_vlan_tag in two layer tag case, and stored at vlan_tag - * in one layer tag case. - */ - if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { - u16 vlan_tag; - - if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag)) - __vlan_hwaccel_put_tag(skb, - htons(ETH_P_8021Q), - vlan_tag); - } - - if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B)))) { - u64_stats_update_begin(&ring->syncp); - ring->stats.non_vld_descs++; - u64_stats_update_end(&ring->syncp); - - dev_kfree_skb_any(skb); - return -EINVAL; - } - - if (unlikely((!desc->rx.pkt_len) || - (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | - BIT(HNS3_RXD_L2E_B))))) { - u64_stats_update_begin(&ring->syncp); - if (l234info & BIT(HNS3_RXD_L2E_B)) - ring->stats.l2_err++; - else - ring->stats.err_pkt_len++; - u64_stats_update_end(&ring->syncp); - + ret = hns3_handle_bdinfo(ring, skb); + if (unlikely(ret)) { dev_kfree_skb_any(skb); - return -EFAULT; + return ret; } - - l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M, - HNS3_RXD_DMAC_S); - u64_stats_update_begin(&ring->syncp); - if (l2_frame_type == HNS3_L2_TYPE_MULTICAST) - ring->stats.rx_multicast++; - - ring->stats.rx_pkts++; - ring->stats.rx_bytes += skb->len; - u64_stats_update_end(&ring->syncp); - - ring->tqp_vector->rx_group.total_bytes += skb->len; - - /* This is needed in order to enable forwarding support */ - hns3_set_gro_param(skb, l234info, bd_base_info); - - hns3_rx_checksum(ring, skb, desc); *out_skb = skb; - hns3_set_rx_skb_rss_type(ring, skb); return 0; } @@ -2704,9 +2839,8 @@ int hns3_clean_rx_ring( void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) { #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 - struct net_device *netdev = ring->tqp->handle->kinfo.netdev; int recv_pkts, recv_bds, clean_count, err; - int unused_count = hns3_desc_unused(ring) - ring->pending_buf; + int unused_count = hns3_desc_unused(ring); struct sk_buff *skb = ring->skb; int num; @@ -2715,6 +2849,7 @@ int hns3_clean_rx_ring( recv_pkts = 0, recv_bds = 0, clean_count = 0; num -= unused_count; + unused_count -= ring->pending_buf; while (recv_pkts < budget && recv_bds < num) { /* Reuse or realloc buffers */ @@ -2741,8 +2876,6 @@ int hns3_clean_rx_ring( continue; } - /* Do update ip stack process */ - skb->protocol = eth_type_trans(skb, netdev); rx_fn(ring, skb); recv_bds += ring->pending_buf; clean_count += ring->pending_buf; @@ -2892,7 +3025,7 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) struct hns3_enet_tqp_vector *tqp_vector = container_of(napi, struct hns3_enet_tqp_vector, napi); bool clean_complete = true; - int rx_budget; + int rx_budget = budget; if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { napi_complete(napi); @@ -2906,7 +3039,8 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) hns3_clean_tx_ring(ring); /* make sure rx ring budget not smaller than 1 */ - rx_budget = max(budget / tqp_vector->num_tqps, 1); + if (tqp_vector->num_tqps > 1) + rx_budget = max(budget / tqp_vector->num_tqps, 1); hns3_for_each_ring(ring, tqp_vector->rx_group) { int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget, @@ -3315,6 +3449,7 @@ err: } devm_kfree(&pdev->dev, priv->ring_data); + priv->ring_data = NULL; return ret; } @@ -3323,12 +3458,16 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv) struct hnae3_handle *h = priv->ae_handle; int i; + if (!priv->ring_data) + return; + for (i = 0; i < h->kinfo.num_tqps; i++) { devm_kfree(priv->dev, priv->ring_data[i].ring); devm_kfree(priv->dev, priv->ring_data[i + h->kinfo.num_tqps].ring); } devm_kfree(priv->dev, priv->ring_data); + priv->ring_data = NULL; } static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) @@ -3338,8 +3477,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) if (ring->desc_num <= 0 || ring->buf_size <= 0) return -EINVAL; - ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]), - GFP_KERNEL); + ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num, + sizeof(ring->desc_cb[0]), GFP_KERNEL); if (!ring->desc_cb) { ret = -ENOMEM; goto out; @@ -3360,7 +3499,7 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) out_with_desc: hns3_free_desc(ring); out_with_desc_cb: - kfree(ring->desc_cb); + devm_kfree(ring_to_dev(ring), ring->desc_cb); ring->desc_cb = NULL; out: return ret; @@ -3369,7 +3508,7 @@ out: static void hns3_fini_ring(struct hns3_enet_ring *ring) { hns3_free_desc(ring); - kfree(ring->desc_cb); + devm_kfree(ring_to_dev(ring), ring->desc_cb); ring->desc_cb = NULL; ring->next_to_clean = 0; ring->next_to_use = 0; @@ -3556,17 +3695,6 @@ static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) h->ae_algo->ops->del_all_fd_entries(h, clear_list); } -static void hns3_nic_set_priv_ops(struct net_device *netdev) -{ - struct hns3_nic_priv *priv = netdev_priv(netdev); - - if ((netdev->features & NETIF_F_TSO) || - (netdev->features & NETIF_F_TSO6)) - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; - else - priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; -} - static int hns3_client_start(struct hnae3_handle *handle) { if (!handle->ae_algo->ops->client_start) @@ -3583,6 +3711,21 @@ static void hns3_client_stop(struct hnae3_handle *handle) handle->ae_algo->ops->client_stop(handle); } +static void hns3_info_show(struct hns3_nic_priv *priv) +{ + struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; + + dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr); + dev_info(priv->dev, "Task queue pairs numbers: %d\n", kinfo->num_tqps); + dev_info(priv->dev, "RSS size: %d\n", kinfo->rss_size); + dev_info(priv->dev, "Allocated RSS size: %d\n", kinfo->req_rss_size); + dev_info(priv->dev, "RX buffer length: %d\n", kinfo->rx_buf_len); + dev_info(priv->dev, "Desc num per TX queue: %d\n", kinfo->num_tx_desc); + dev_info(priv->dev, "Desc num per RX queue: %d\n", kinfo->num_rx_desc); + dev_info(priv->dev, "Total number of enabled TCs: %d\n", kinfo->num_tc); + dev_info(priv->dev, "Max mtu size: %d\n", priv->netdev->max_mtu); +} + static int hns3_client_init(struct hnae3_handle *handle) { struct pci_dev *pdev = handle->pdev; @@ -3604,6 +3747,8 @@ static int hns3_client_init(struct hnae3_handle *handle) priv->tx_timeout_count = 0; set_bit(HNS3_NIC_STATE_DOWN, &priv->state); + handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); + handle->kinfo.netdev = netdev; handle->priv = (void *)priv; @@ -3616,7 +3761,6 @@ static int hns3_client_init(struct hnae3_handle *handle) netdev->netdev_ops = &hns3_nic_netdev_ops; SET_NETDEV_DEV(netdev, &pdev->dev); hns3_ethtool_set_ops(netdev); - hns3_nic_set_priv_ops(netdev); /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); @@ -3670,6 +3814,9 @@ static int hns3_client_init(struct hnae3_handle *handle) set_bit(HNS3_NIC_STATE_INITED, &priv->state); + if (netif_msg_drv(handle)) + hns3_info_show(priv); + return ret; out_client_start: @@ -3696,13 +3843,13 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; - hns3_client_stop(handle); - hns3_remove_hw_addr(netdev); if (netdev->reg_state != NETREG_UNINITIALIZED) unregister_netdev(netdev); + hns3_client_stop(handle); + if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { netdev_warn(netdev, "already uninitialized\n"); goto out_netdev_free; @@ -3728,8 +3875,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) hns3_dbg_uninit(handle); - priv->ring_data = NULL; - out_netdev_free: free_netdev(netdev); } @@ -3744,11 +3889,13 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup) if (linkup) { netif_carrier_on(netdev); netif_tx_wake_all_queues(netdev); - netdev_info(netdev, "link up\n"); + if (netif_msg_link(handle)) + netdev_info(netdev, "link up\n"); } else { netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); - netdev_info(netdev, "link down\n"); + if (netif_msg_link(handle)) + netdev_info(netdev, "link down\n"); } } @@ -3772,12 +3919,13 @@ static int hns3_recover_hw_addr(struct net_device *ndev) struct netdev_hw_addr *ha, *tmp; int ret = 0; + netif_addr_lock_bh(ndev); /* go through and sync uc_addr entries to the device */ list = &ndev->uc; list_for_each_entry_safe(ha, tmp, &list->list, list) { ret = hns3_nic_uc_sync(ndev, ha->addr); if (ret) - return ret; + goto out; } /* go through and sync mc_addr entries to the device */ @@ -3785,9 +3933,11 @@ static int hns3_recover_hw_addr(struct net_device *ndev) list_for_each_entry_safe(ha, tmp, &list->list, list) { ret = hns3_nic_mc_sync(ndev, ha->addr); if (ret) - return ret; + goto out; } +out: + netif_addr_unlock_bh(ndev); return ret; } @@ -3798,6 +3948,7 @@ static void hns3_remove_hw_addr(struct net_device *netdev) hns3_nic_uc_unsync(netdev, netdev->dev_addr); + netif_addr_lock_bh(netdev); /* go through and unsync uc_addr entries to the device */ list = &netdev->uc; list_for_each_entry_safe(ha, tmp, &list->list, list) @@ -3808,6 +3959,8 @@ static void hns3_remove_hw_addr(struct net_device *netdev) list_for_each_entry_safe(ha, tmp, &list->list, list) if (ha->refcount > 1) hns3_nic_mc_unsync(netdev, ha->addr); + + netif_addr_unlock_bh(netdev); } static void hns3_clear_tx_ring(struct hns3_enet_ring *ring) @@ -3849,6 +4002,13 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) ring_ptr_move_fw(ring, next_to_use); } + /* Free the pending skb in rx ring */ + if (ring->skb) { + dev_kfree_skb_any(ring->skb); + ring->skb = NULL; + ring->pending_buf = 0; + } + return 0; } @@ -4047,18 +4207,24 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) if (ret) goto err_uninit_vector; + ret = hns3_client_start(handle); + if (ret) { + dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); + goto err_uninit_ring; + } + set_bit(HNS3_NIC_STATE_INITED, &priv->state); return ret; +err_uninit_ring: + hns3_uninit_all_ring(priv); err_uninit_vector: hns3_nic_uninit_vector_data(priv); - priv->ring_data = NULL; err_dealloc_vector: hns3_nic_dealloc_vector_data(priv); err_put_ring: hns3_put_ring_config(priv); - priv->ring_data = NULL; return ret; } @@ -4100,7 +4266,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; - if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) { + if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { netdev_warn(netdev, "already uninitialized\n"); return 0; } @@ -4120,9 +4286,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) netdev_err(netdev, "uninit ring error\n"); hns3_put_ring_config(priv); - priv->ring_data = NULL; - - clear_bit(HNS3_NIC_STATE_INITED, &priv->state); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 75669cd0c311..408efd55ba48 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -42,8 +42,10 @@ enum hns3_nic_state { #define HNS3_RING_TX_RING_HEAD_REG 0x0005C #define HNS3_RING_TX_RING_FBDNUM_REG 0x00060 #define HNS3_RING_TX_RING_OFFSET_REG 0x00064 +#define HNS3_RING_TX_RING_EBDNUM_REG 0x00068 #define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C - +#define HNS3_RING_TX_RING_EBD_OFFSET_REG 0x00070 +#define HNS3_RING_TX_RING_BD_ERR_REG 0x00074 #define HNS3_RING_PREFETCH_EN_REG 0x0007C #define HNS3_RING_CFG_VF_NUM_REG 0x00080 #define HNS3_RING_ASID_REG 0x0008C @@ -143,7 +145,7 @@ enum hns3_nic_state { #define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S) #define HNS3_RXD_LKBK_B 15 #define HNS3_RXD_GRO_SIZE_S 16 -#define HNS3_RXD_GRO_SIZE_M (0x3ff << HNS3_RXD_GRO_SIZE_S) +#define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S) #define HNS3_TXD_L3T_S 0 #define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S) @@ -374,6 +376,7 @@ struct ring_stats { u64 tx_err_cnt; u64 restart_queue; u64 tx_busy; + u64 tx_copy; }; struct { u64 rx_pkts; @@ -386,6 +389,7 @@ struct ring_stats { u64 l2_err; u64 l3l4_csum_err; u64 rx_multicast; + u64 non_reuse_pg; }; }; }; @@ -397,7 +401,6 @@ struct hns3_enet_ring { struct hns3_enet_ring *next; struct hns3_enet_tqp_vector *tqp_vector; struct hnae3_queue *tqp; - char ring_name[HNS3_RING_NAME_LEN]; struct device *dev; /* will be used for DMA mapping of descriptors */ /* statistic */ @@ -407,9 +410,6 @@ struct hns3_enet_ring { dma_addr_t desc_dma_addr; u32 buf_size; /* size for hnae_desc->addr, preset by AE */ u16 desc_num; /* total number of desc */ - u16 max_desc_num_per_pkt; - u16 max_raw_data_sz_per_desc; - u16 max_pkt_size; int next_to_use; /* idx of next spare desc */ /* idx of lastest sent desc, the ring is empty when equal to @@ -423,9 +423,6 @@ struct hns3_enet_ring { u32 flag; /* ring attribute */ - int numa_node; - cpumask_t affinity_mask; - int pending_buf; struct sk_buff *skb; struct sk_buff *tail_skb; @@ -442,11 +439,6 @@ struct hns3_nic_ring_data { void (*fini_process)(struct hns3_nic_ring_data *); }; -struct hns3_nic_ops { - int (*maybe_stop_tx)(struct sk_buff **out_skb, - int *bnum, struct hns3_enet_ring *ring); -}; - enum hns3_flow_level_range { HNS3_FLOW_LOW = 0, HNS3_FLOW_MID = 1, @@ -536,7 +528,6 @@ struct hns3_nic_priv { u32 port_id; struct net_device *netdev; struct device *dev; - struct hns3_nic_ops ops; /** * the cb for nic to manage the ring buffer, the first half of the @@ -577,18 +568,16 @@ union l4_hdr_info { unsigned char *hdr; }; -/* the distance between [begin, end) in a ring buffer - * note: there is a unuse slot between the begin and the end - */ -static inline int ring_dist(struct hns3_enet_ring *ring, int begin, int end) -{ - return (end - begin + ring->desc_num) % ring->desc_num; -} - static inline int ring_space(struct hns3_enet_ring *ring) { - return ring->desc_num - - ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1; + /* This smp_load_acquire() pairs with smp_store_release() in + * hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring. + */ + int begin = smp_load_acquire(&ring->next_to_clean); + int end = READ_ONCE(ring->next_to_use); + + return ((end >= begin) ? (ring->desc_num - end + begin) : + (begin - end)) - 1; } static inline int is_ring_empty(struct hns3_enet_ring *ring) @@ -633,7 +622,7 @@ static inline bool hns3_nic_resetting(struct net_device *netdev) #define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \ (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG) -#define ring_to_dev(ring) (&(ring)->tqp->handle->pdev->dev) +#define ring_to_dev(ring) ((ring)->dev) #define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \ DMA_TO_DEVICE : DMA_FROM_DEVICE) @@ -666,6 +655,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv); int hns3_uninit_all_ring(struct hns3_nic_priv *priv); int hns3_nic_reset_all_ring(struct hnae3_handle *h); netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev); +bool hns3_is_phys_func(struct pci_dev *pdev); int hns3_clean_rx_ring( struct hns3_enet_ring *ring, int budget, void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 359d4731fb2d..f10f7df81ae6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -29,6 +29,7 @@ static const struct hns3_stats hns3_txq_stats[] = { HNS3_TQP_STAT("errors", tx_err_cnt), HNS3_TQP_STAT("wake", restart_queue), HNS3_TQP_STAT("busy", tx_busy), + HNS3_TQP_STAT("copy", tx_copy), }; #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) @@ -48,6 +49,7 @@ static const struct hns3_stats hns3_rxq_stats[] = { HNS3_TQP_STAT("l2_err", l2_err), HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err), HNS3_TQP_STAT("multicast", rx_multicast), + HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg), }; #define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats) @@ -483,6 +485,11 @@ static void hns3_get_stats(struct net_device *netdev, struct hnae3_handle *h = hns3_get_handle(netdev); u64 *p = data; + if (hns3_nic_resetting(netdev)) { + netdev_err(netdev, "dev resetting, could not get stats\n"); + return; + } + if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) { netdev_err(netdev, "could not get any statistics\n"); return; @@ -648,6 +655,10 @@ static int hns3_get_link_ksettings(struct net_device *netdev, static int hns3_set_link_ksettings(struct net_device *netdev, const struct ethtool_link_ksettings *cmd) { + /* Chip doesn't support this mode. */ + if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF) + return -EINVAL; + /* Only support ksettings_set for netdev with phy attached for now */ if (netdev->phydev) return phy_ethtool_ksettings_set(netdev->phydev, cmd); @@ -1101,6 +1112,20 @@ static int hns3_set_phys_id(struct net_device *netdev, return h->ae_algo->ops->set_led_id(h, state); } +static u32 hns3_get_msglevel(struct net_device *netdev) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + return h->msg_enable; +} + +static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level) +{ + struct hnae3_handle *h = hns3_get_handle(netdev); + + h->msg_enable = msg_level; +} + static const struct ethtool_ops hns3vf_ethtool_ops = { .get_drvinfo = hns3_get_drvinfo, .get_ringparam = hns3_get_ringparam, @@ -1121,6 +1146,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { .get_regs_len = hns3_get_regs_len, .get_regs = hns3_get_regs, .get_link = hns3_get_link, + .get_msglevel = hns3_get_msglevel, + .set_msglevel = hns3_set_msglevel, }; static const struct ethtool_ops hns3_ethtool_ops = { @@ -1150,6 +1177,8 @@ static const struct ethtool_ops hns3_ethtool_ops = { .get_regs_len = hns3_get_regs_len, .get_regs = hns3_get_regs, .set_phys_id = hns3_set_phys_id, + .get_msglevel = hns3_get_msglevel, + .set_msglevel = hns3_set_msglevel, }; void hns3_ethtool_set_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 64687b0d1c5f..c838cdd99105 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -356,7 +356,7 @@ int hclge_cmd_init(struct hclge_dev *hdev) int ret; spin_lock_bh(&hdev->hw.cmq.csq.lock); - spin_lock_bh(&hdev->hw.cmq.crq.lock); + spin_lock(&hdev->hw.cmq.crq.lock); hdev->hw.cmq.csq.next_to_clean = 0; hdev->hw.cmq.csq.next_to_use = 0; @@ -365,7 +365,7 @@ int hclge_cmd_init(struct hclge_dev *hdev) hclge_cmd_init_regs(&hdev->hw); - spin_unlock_bh(&hdev->hw.cmq.crq.lock); + spin_unlock(&hdev->hw.cmq.crq.lock); spin_unlock_bh(&hdev->hw.cmq.csq.lock); clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); @@ -374,21 +374,26 @@ int hclge_cmd_init(struct hclge_dev *hdev) * reset may happen when lower level reset is being processed. */ if ((hclge_is_reset_pending(hdev))) { - set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); - return -EBUSY; + ret = -EBUSY; + goto err_cmd_init; } ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); if (ret) { dev_err(&hdev->pdev->dev, "firmware version query failed %d\n", ret); - return ret; + goto err_cmd_init; } hdev->fw_version = version; dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); return 0; + +err_cmd_init: + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + + return ret; } static void hclge_cmd_uninit_regs(struct hclge_hw *hw) @@ -412,7 +417,7 @@ static void hclge_destroy_queue(struct hclge_cmq_ring *ring) spin_unlock(&ring->lock); } -void hclge_destroy_cmd_queue(struct hclge_hw *hw) +static void hclge_destroy_cmd_queue(struct hclge_hw *hw) { hclge_destroy_queue(&hw->cmq.csq); hclge_destroy_queue(&hw->cmq.crq); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 3714733c96d9..d01f93eee845 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -109,6 +109,9 @@ enum hclge_opcode_type { HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, + HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310, + HCLGE_OPC_MAC_TNL_INT_EN = 0x0311, + HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312, HCLGE_OPC_SERDES_LOOPBACK = 0x0315, /* PFC/Pause commands */ @@ -237,6 +240,9 @@ enum hclge_opcode_type { /* Led command */ HCLGE_OPC_LED_STATUS_CFG = 0xB000, + /* NCL config command */ + HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011, + /* SFP command */ HCLGE_OPC_SFP_GET_SPEED = 0x7104, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 1192cf6f2321..a9ffb57c4607 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -901,6 +901,109 @@ static void hclge_dbg_fd_tcam(struct hclge_dev *hdev) } } +static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev) +{ + dev_info(&hdev->pdev->dev, "PF reset count: %d\n", + hdev->rst_stats.pf_rst_cnt); + dev_info(&hdev->pdev->dev, "FLR reset count: %d\n", + hdev->rst_stats.flr_rst_cnt); + dev_info(&hdev->pdev->dev, "CORE reset count: %d\n", + hdev->rst_stats.core_rst_cnt); + dev_info(&hdev->pdev->dev, "GLOBAL reset count: %d\n", + hdev->rst_stats.global_rst_cnt); + dev_info(&hdev->pdev->dev, "IMP reset count: %d\n", + hdev->rst_stats.imp_rst_cnt); + dev_info(&hdev->pdev->dev, "reset done count: %d\n", + hdev->rst_stats.reset_done_cnt); + dev_info(&hdev->pdev->dev, "HW reset done count: %d\n", + hdev->rst_stats.hw_reset_done_cnt); + dev_info(&hdev->pdev->dev, "reset count: %d\n", + hdev->rst_stats.reset_cnt); +} + +/* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file + * @hdev: pointer to struct hclge_dev + * @cmd_buf: string that contains offset and length + */ +static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev, char *cmd_buf) +{ +#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096 +#define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4) +#define HCLGE_CMD_DATA_NUM 6 + + struct hclge_desc desc[5]; + u32 byte_offset; + int bd_num = 5; + int offset; + int length; + int data0; + int ret; + int i; + int j; + + ret = sscanf(cmd_buf, "%x %x", &offset, &length); + if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET || + length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) { + dev_err(&hdev->pdev->dev, "Invalid offset or length.\n"); + return; + } + if (offset < 0 || length <= 0) { + dev_err(&hdev->pdev->dev, "Non-positive offset or length.\n"); + return; + } + + dev_info(&hdev->pdev->dev, "offset | data\n"); + + while (length > 0) { + data0 = offset; + if (length >= HCLGE_MAX_NCL_CONFIG_LENGTH) + data0 |= HCLGE_MAX_NCL_CONFIG_LENGTH << 16; + else + data0 |= length << 16; + ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num, + HCLGE_OPC_QUERY_NCL_CONFIG); + if (ret) + return; + + byte_offset = offset; + for (i = 0; i < bd_num; i++) { + for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) { + if (i == 0 && j == 0) + continue; + + dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n", + byte_offset, + le32_to_cpu(desc[i].data[j])); + byte_offset += sizeof(u32); + length -= sizeof(u32); + if (length <= 0) + return; + } + } + offset += HCLGE_MAX_NCL_CONFIG_LENGTH; + } +} + +/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt + * @hdev: pointer to struct hclge_dev + */ +static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev) +{ +#define HCLGE_BILLION_NANO_SECONDS 1000000000 + + struct hclge_mac_tnl_stats stats; + unsigned long rem_nsec; + + dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n"); + + while (kfifo_get(&hdev->mac_tnl_log, &stats)) { + rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS); + dev_info(&hdev->pdev->dev, "[%07lu.%03lu]status = 0x%x\n", + (unsigned long)stats.time, rem_nsec / 1000, + stats.status); + } +} + int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -924,6 +1027,13 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf) hclge_dbg_dump_mng_table(hdev); } else if (strncmp(cmd_buf, "dump reg", 8) == 0) { hclge_dbg_dump_reg_cmd(hdev, cmd_buf); + } else if (strncmp(cmd_buf, "dump reset info", 15) == 0) { + hclge_dbg_dump_rst_info(hdev); + } else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) { + hclge_dbg_dump_ncl_config(hdev, + &cmd_buf[sizeof("dump ncl_config")]); + } else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) { + hclge_dbg_dump_mac_tnl_status(hdev); } else { dev_info(&hdev->pdev->dev, "unknown command\n"); return -EINVAL; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 1f52d11f77b5..4ac80634c984 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -4,287 +4,468 @@ #include "hclge_err.h" static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = { - { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" }, - { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" }, - { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" }, - { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" }, - { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" }, - { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" }, - { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" }, - { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = { - { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" }, - { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" }, - { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" }, - { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" }, - { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" }, - { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" }, - { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" }, - { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" }, - { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" }, - { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" }, - { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" }, - { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" }, - { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" }, - { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" }, - { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = { - { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" }, - { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" }, - { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" }, - { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err" }, - { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err" }, - { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err" }, + { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = { - { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_igu_int[] = { - { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" }, + { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err", + .reset_level = HNAE3_CORE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = { - { .int_msk = BIT(0), .msg = "rx_buf_overflow" }, - { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" }, - { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" }, - { .int_msk = BIT(3), .msg = "tx_buf_overflow" }, - { .int_msk = BIT(4), .msg = "tx_buf_underrun" }, - { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow" }, + { .int_msk = BIT(0), .msg = "rx_buf_overflow", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(3), .msg = "tx_buf_overflow", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(4), .msg = "tx_buf_underrun", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow", + .reset_level = HNAE3_CORE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ncsi_err_int[] = { - { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { - { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" }, - { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err" }, - { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err" }, - { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err" }, - { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err" }, - { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err" }, - { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err" }, - { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err" }, - { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err" }, - { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err" }, - { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err" }, - { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err" }, - { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err" }, - { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err" }, - { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err" }, - { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err" }, - { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err" }, - { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err" }, - { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err" }, - { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err" }, - { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err" }, - { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err" }, - { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err" }, - { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err" }, - { .int_msk = BIT(27), - .msg = "flow_director_ad_mem0_ecc_mbit_err" }, - { .int_msk = BIT(28), - .msg = "flow_director_ad_mem1_ecc_mbit_err" }, - { .int_msk = BIT(29), - .msg = "rx_vlan_tag_memory_ecc_mbit_err" }, - { .int_msk = BIT(30), - .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err" }, + { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(27), .msg = "flow_director_ad_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(28), .msg = "flow_director_ad_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(29), .msg = "rx_vlan_tag_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(30), .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = { - { .int_msk = BIT(0), .msg = "tx_vlan_tag_err" }, - { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" }, + { .int_msk = BIT(0), .msg = "tx_vlan_tag_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = { - { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" }, - { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err" }, - { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err" }, - { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err" }, + { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_tm_sch_rint[] = { - { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err" }, - { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err" }, - { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err" }, - { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err" }, - { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err" }, - { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err" }, - { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err" }, - { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err" }, - { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err" }, - { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err" }, - { .int_msk = BIT(12), - .msg = "tm_sch_port_shap_offset_fifo_wr_err" }, - { .int_msk = BIT(13), - .msg = "tm_sch_port_shap_offset_fifo_rd_err" }, - { .int_msk = BIT(14), - .msg = "tm_sch_pg_pshap_offset_fifo_wr_err" }, - { .int_msk = BIT(15), - .msg = "tm_sch_pg_pshap_offset_fifo_rd_err" }, - { .int_msk = BIT(16), - .msg = "tm_sch_pg_cshap_offset_fifo_wr_err" }, - { .int_msk = BIT(17), - .msg = "tm_sch_pg_cshap_offset_fifo_rd_err" }, - { .int_msk = BIT(18), - .msg = "tm_sch_pri_pshap_offset_fifo_wr_err" }, - { .int_msk = BIT(19), - .msg = "tm_sch_pri_pshap_offset_fifo_rd_err" }, - { .int_msk = BIT(20), - .msg = "tm_sch_pri_cshap_offset_fifo_wr_err" }, - { .int_msk = BIT(21), - .msg = "tm_sch_pri_cshap_offset_fifo_rd_err" }, - { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err" }, - { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err" }, - { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err" }, - { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err" }, - { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err" }, - { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err" }, - { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err" }, - { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err" }, - { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err" }, - { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err" }, + { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "tm_sch_port_shap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "tm_sch_port_shap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "tm_sch_pg_pshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "tm_sch_pg_pshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "tm_sch_pg_cshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "tm_sch_pg_cshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(18), .msg = "tm_sch_pri_pshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "tm_sch_pri_pshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(20), .msg = "tm_sch_pri_cshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "tm_sch_pri_cshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_qcn_fifo_rint[] = { - { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err" }, - { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err" }, - { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err" }, - { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err" }, - { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err" }, - { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err" }, - { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err" }, - { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err" }, - { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err" }, - { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err" }, - { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err" }, - { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err" }, - { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err" }, - { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err" }, - { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err" }, - { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err" }, - { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err" }, - { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err" }, + { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_qcn_ecc_rint[] = { - { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" }, - { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" }, - { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" }, - { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" }, - { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" }, - { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" }, - { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" }, - { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" }, - { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" }, - { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" }, + { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = { - { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err" }, - { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err" }, - { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err" }, - { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err" }, - { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err" }, - { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err" }, - { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err" }, - { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err" }, - { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err" }, - { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err" }, - { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err" }, - { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err" }, + { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = { - { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err" }, - { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err" }, - { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err" }, - { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err" }, - { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err" }, - { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err" }, - { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err" }, - { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err" }, - { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err" }, - { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err" }, - { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err" }, - { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err" }, - { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err" }, - { .int_msk = BIT(26), .msg = "rd_bus_err" }, - { .int_msk = BIT(27), .msg = "wr_bus_err" }, - { .int_msk = BIT(28), .msg = "reg_search_miss" }, - { .int_msk = BIT(29), .msg = "rx_q_search_miss" }, - { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect" }, - { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl" }, + { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(26), .msg = "rd_bus_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(27), .msg = "wr_bus_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(28), .msg = "reg_search_miss", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(29), .msg = "rx_q_search_miss", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = { - { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err" }, - { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err" }, - { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err" }, + { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err", + .reset_level = HNAE3_CORE_RESET }, + { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err", + .reset_level = HNAE3_CORE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = { - { .int_msk = BIT(0), .msg = "over_8bd_no_fe" }, - { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err" }, - { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err" }, - { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison" }, - { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison" }, - { .int_msk = BIT(5), .msg = "buf_wait_timeout" }, + { .int_msk = BIT(0), .msg = "over_8bd_no_fe", + .reset_level = HNAE3_FUNC_RESET }, + { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison", + .reset_level = HNAE3_FUNC_RESET }, + { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison", + .reset_level = HNAE3_FUNC_RESET }, + { .int_msk = BIT(5), .msg = "buf_wait_timeout", + .reset_level = HNAE3_NONE_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ssu_com_err_int[] = { - { .int_msk = BIT(0), .msg = "buf_sum_err" }, - { .int_msk = BIT(1), .msg = "ppp_mb_num_err" }, - { .int_msk = BIT(2), .msg = "ppp_mbid_err" }, - { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err" }, - { .int_msk = BIT(4), .msg = "ppp_rlt_host_err" }, - { .int_msk = BIT(5), .msg = "cks_edit_position_err" }, - { .int_msk = BIT(6), .msg = "cks_edit_condition_err" }, - { .int_msk = BIT(7), .msg = "vlan_edit_condition_err" }, - { .int_msk = BIT(8), .msg = "vlan_num_ot_err" }, - { .int_msk = BIT(9), .msg = "vlan_num_in_err" }, + { .int_msk = BIT(0), .msg = "buf_sum_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(1), .msg = "ppp_mb_num_err", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(2), .msg = "ppp_mbid_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "ppp_rlt_host_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "cks_edit_position_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "cks_edit_condition_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "vlan_edit_condition_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "vlan_num_ot_err", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "vlan_num_in_err", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; #define HCLGE_SSU_MEM_ECC_ERR(x) \ - { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err" } + { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err", \ + .reset_level = HNAE3_GLOBAL_RESET } static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = { HCLGE_SSU_MEM_ECC_ERR(0), @@ -323,62 +504,106 @@ static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = { }; static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = { - { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" }, - { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" }, - { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port" }, - { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port" }, - { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port" }, - { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port" }, - { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port" }, - { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port" }, - { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port" }, - { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port" }, - { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port" }, - { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port" }, - { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port" }, + { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = { - { .int_msk = BIT(0), .msg = "ig_mac_inf_int" }, - { .int_msk = BIT(1), .msg = "ig_host_inf_int" }, - { .int_msk = BIT(2), .msg = "ig_roc_buf_int" }, - { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int" }, - { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int" }, - { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int" }, - { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int" }, - { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int" }, - { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int" }, - { .int_msk = BIT(9), .msg = "qm_eof_fifo_int" }, - { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int" }, - { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int" }, - { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int" }, - { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int" }, - { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int" }, - { .int_msk = BIT(15), .msg = "host_cmd_fifo_int" }, - { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int" }, - { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int" }, - { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int" }, - { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int" }, - { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int" }, - { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int" }, - { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int" }, - { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int" }, + { .int_msk = BIT(0), .msg = "ig_mac_inf_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "ig_host_inf_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "ig_roc_buf_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "qm_eof_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(15), .msg = "host_cmd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = { - { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg" }, - { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg" }, - { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg" }, - { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg" }, + { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = { - { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" }, - { .int_msk = BIT(9), .msg = "low_water_line_err_port" }, - { .int_msk = BIT(10), .msg = "hi_water_line_err_port" }, + { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET }, + { .int_msk = BIT(9), .msg = "low_water_line_err_port", + .reset_level = HNAE3_NONE_RESET }, + { .int_msk = BIT(10), .msg = "hi_water_line_err_port", + .reset_level = HNAE3_GLOBAL_RESET }, { /* sentinel */ } }; @@ -406,16 +631,29 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = { { /* sentinel */ } }; -static void hclge_log_error(struct device *dev, char *reg, - const struct hclge_hw_error *err, - u32 err_sts) +static enum hnae3_reset_type hclge_log_error(struct device *dev, char *reg, + const struct hclge_hw_error *err, + u32 err_sts) { + enum hnae3_reset_type reset_level = HNAE3_FUNC_RESET; + bool need_reset = false; + while (err->msg) { - if (err->int_msk & err_sts) + if (err->int_msk & err_sts) { dev_warn(dev, "%s %s found [error status=0x%x]\n", reg, err->msg, err_sts); + if (err->reset_level != HNAE3_NONE_RESET && + err->reset_level >= reset_level) { + reset_level = err->reset_level; + need_reset = true; + } + } err++; } + if (need_reset) + return reset_level; + else + return HNAE3_NONE_RESET; } /* hclge_cmd_query_error: read the error information @@ -454,6 +692,16 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev, return ret; } +static int hclge_clear_mac_tnl_int(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_MAC_TNL_INT, false); + desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_CLR); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en) { struct device *dev = &hdev->pdev->dev; @@ -673,6 +921,21 @@ static int hclge_config_mac_err_int(struct hclge_dev *hdev, bool en) return ret; } +int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en) +{ + struct hclge_desc desc; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_TNL_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN); + else + desc.data[0] = 0; + + desc.data[1] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN_MASK); + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, bool en) { @@ -826,6 +1089,7 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, int num) { struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + enum hnae3_reset_type reset_level; struct device *dev = &hdev->pdev->dev; __le32 *desc_data; u32 status; @@ -845,78 +1109,94 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, /* log HNS common errors */ status = le32_to_cpu(desc[0].data[0]); if (status) { - hclge_log_error(dev, "IMP_TCM_ECC_INT_STS", - &hclge_imp_tcm_ecc_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + reset_level = hclge_log_error(dev, "IMP_TCM_ECC_INT_STS", + &hclge_imp_tcm_ecc_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(desc[0].data[1]); if (status) { - hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS", - &hclge_cmdq_nic_mem_ecc_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + reset_level = hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS", + &hclge_cmdq_nic_mem_ecc_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) { dev_warn(dev, "imp_rd_data_poison_err found\n"); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_NONE_RESET); } status = le32_to_cpu(desc[0].data[3]); if (status) { - hclge_log_error(dev, "TQP_INT_ECC_INT_STS", - &hclge_tqp_int_ecc_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "TQP_INT_ECC_INT_STS", + &hclge_tqp_int_ecc_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(desc[0].data[4]); if (status) { - hclge_log_error(dev, "MSIX_ECC_INT_STS", - &hclge_msix_sram_ecc_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "MSIX_ECC_INT_STS", + &hclge_msix_sram_ecc_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* log SSU(Storage Switch Unit) errors */ desc_data = (__le32 *)&desc[2]; status = le32_to_cpu(*(desc_data + 2)); if (status) { - hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0", - &hclge_ssu_mem_ecc_err_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0", + &hclge_ssu_mem_ecc_err_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(*(desc_data + 3)) & BIT(0); if (status) { dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n", status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); } status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK; if (status) { - hclge_log_error(dev, "SSU_COMMON_ERR_INT", - &hclge_ssu_com_err_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + reset_level = hclge_log_error(dev, "SSU_COMMON_ERR_INT", + &hclge_ssu_com_err_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* log IGU(Ingress Unit) errors */ desc_data = (__le32 *)&desc[3]; status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK; - if (status) - hclge_log_error(dev, "IGU_INT_STS", - &hclge_igu_int[0], status); + if (status) { + reset_level = hclge_log_error(dev, "IGU_INT_STS", + &hclge_igu_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); + } /* log PPP(Programmable Packet Process) errors */ desc_data = (__le32 *)&desc[4]; status = le32_to_cpu(*(desc_data + 1)); - if (status) - hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1", - &hclge_ppp_mpf_abnormal_int_st1[0], status); + if (status) { + reset_level = + hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1", + &hclge_ppp_mpf_abnormal_int_st1[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); + } status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK; - if (status) - hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3", - &hclge_ppp_mpf_abnormal_int_st3[0], status); + if (status) { + reset_level = + hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3", + &hclge_ppp_mpf_abnormal_int_st3[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); + } /* log PPU(RCB) errors */ desc_data = (__le32 *)&desc[5]; @@ -924,55 +1204,60 @@ static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, if (status) { dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n", "rpu_rx_pkt_ecc_mbit_err"); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); } status = le32_to_cpu(*(desc_data + 2)); if (status) { - hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", - &hclge_ppu_mpf_abnormal_int_st2[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", + &hclge_ppu_mpf_abnormal_int_st2[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK; if (status) { - hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3", - &hclge_ppu_mpf_abnormal_int_st3[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3", + &hclge_ppu_mpf_abnormal_int_st3[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* log TM(Traffic Manager) errors */ desc_data = (__le32 *)&desc[6]; status = le32_to_cpu(*desc_data); if (status) { - hclge_log_error(dev, "TM_SCH_RINT", - &hclge_tm_sch_rint[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "TM_SCH_RINT", + &hclge_tm_sch_rint[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* log QCN(Quantized Congestion Control) errors */ desc_data = (__le32 *)&desc[7]; status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK; if (status) { - hclge_log_error(dev, "QCN_FIFO_RINT", - &hclge_qcn_fifo_rint[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "QCN_FIFO_RINT", + &hclge_qcn_fifo_rint[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK; if (status) { - hclge_log_error(dev, "QCN_ECC_RINT", - &hclge_qcn_ecc_rint[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "QCN_ECC_RINT", + &hclge_qcn_ecc_rint[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* log NCSI errors */ desc_data = (__le32 *)&desc[9]; status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK; if (status) { - hclge_log_error(dev, "NCSI_ECC_INT_RPT", - &hclge_ncsi_err_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + reset_level = hclge_log_error(dev, "NCSI_ECC_INT_RPT", + &hclge_ncsi_err_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* clear all main PF RAS errors */ @@ -1000,6 +1285,7 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, { struct hnae3_ae_dev *ae_dev = hdev->ae_dev; struct device *dev = &hdev->pdev->dev; + enum hnae3_reset_type reset_level; __le32 *desc_data; u32 status; int ret; @@ -1018,38 +1304,47 @@ static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, /* log SSU(Storage Switch Unit) errors */ status = le32_to_cpu(desc[0].data[0]); if (status) { - hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", - &hclge_ssu_port_based_err_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", + &hclge_ssu_port_based_err_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(desc[0].data[1]); if (status) { - hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT", - &hclge_ssu_fifo_overflow_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + reset_level = hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT", + &hclge_ssu_fifo_overflow_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } status = le32_to_cpu(desc[0].data[2]); if (status) { - hclge_log_error(dev, "SSU_ETS_TCG_INT", - &hclge_ssu_ets_tcg_int[0], status); - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + reset_level = hclge_log_error(dev, "SSU_ETS_TCG_INT", + &hclge_ssu_ets_tcg_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); } /* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */ desc_data = (__le32 *)&desc[1]; status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK; - if (status) - hclge_log_error(dev, "IGU_EGU_TNL_INT_STS", - &hclge_igu_egu_tnl_int[0], status); + if (status) { + reset_level = hclge_log_error(dev, "IGU_EGU_TNL_INT_STS", + &hclge_igu_egu_tnl_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); + } /* log PPU(RCB) errors */ desc_data = (__le32 *)&desc[3]; status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK; - if (status) - hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0", - &hclge_ppu_pf_abnormal_int[0], status); + if (status) { + reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0", + &hclge_ppu_pf_abnormal_int[0], + status); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_level); + } /* clear all PF RAS errors */ hclge_cmd_reuse_desc(&desc[0], false); @@ -1341,16 +1636,15 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev) int hclge_handle_hw_msix_error(struct hclge_dev *hdev, unsigned long *reset_requests) { + struct hclge_mac_tnl_stats mac_tnl_stats; struct device *dev = &hdev->pdev->dev; u32 mpf_bd_num, pf_bd_num, bd_num; + enum hnae3_reset_type reset_level; struct hclge_desc desc_bd; struct hclge_desc *desc; __le32 *desc_data; - int ret = 0; u32 status; - - /* set default handling */ - set_bit(HNAE3_FUNC_RESET, reset_requests); + int ret; /* query the number of bds for the MSIx int status */ hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM, @@ -1359,8 +1653,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, if (ret) { dev_err(dev, "fail(%d) to query msix int status bd num\n", ret); - /* reset everything for now */ - set_bit(HNAE3_GLOBAL_RESET, reset_requests); return ret; } @@ -1381,8 +1673,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, if (ret) { dev_err(dev, "query all mpf msix int cmd failed (%d)\n", ret); - /* reset everything for now */ - set_bit(HNAE3_GLOBAL_RESET, reset_requests); goto msi_error; } @@ -1390,9 +1680,10 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, desc_data = (__le32 *)&desc[1]; status = le32_to_cpu(*desc_data); if (status) { - hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R", - &hclge_mac_afifo_tnl_int[0], status); - set_bit(HNAE3_GLOBAL_RESET, reset_requests); + reset_level = hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R", + &hclge_mac_afifo_tnl_int[0], + status); + set_bit(reset_level, reset_requests); } /* log PPU(RCB) MPF errors */ @@ -1400,9 +1691,11 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, status = le32_to_cpu(*(desc_data + 2)) & HCLGE_PPU_MPF_INT_ST2_MSIX_MASK; if (status) { - hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", - &hclge_ppu_mpf_abnormal_int_st2[0], status); - set_bit(HNAE3_CORE_RESET, reset_requests); + reset_level = + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", + &hclge_ppu_mpf_abnormal_int_st2[0], + status); + set_bit(reset_level, reset_requests); } /* clear all main PF MSIx errors */ @@ -1413,8 +1706,6 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, if (ret) { dev_err(dev, "clear all mpf msix int cmd failed (%d)\n", ret); - /* reset everything for now */ - set_bit(HNAE3_GLOBAL_RESET, reset_requests); goto msi_error; } @@ -1428,32 +1719,37 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, if (ret) { dev_err(dev, "query all pf msix int cmd failed (%d)\n", ret); - /* reset everything for now */ - set_bit(HNAE3_GLOBAL_RESET, reset_requests); goto msi_error; } /* log SSU PF errors */ status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK; if (status) { - hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", - &hclge_ssu_port_based_pf_int[0], status); - set_bit(HNAE3_GLOBAL_RESET, reset_requests); + reset_level = hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", + &hclge_ssu_port_based_pf_int[0], + status); + set_bit(reset_level, reset_requests); } /* read and log PPP PF errors */ desc_data = (__le32 *)&desc[2]; status = le32_to_cpu(*desc_data); - if (status) - hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0", - &hclge_ppp_pf_abnormal_int[0], status); + if (status) { + reset_level = hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0", + &hclge_ppp_pf_abnormal_int[0], + status); + set_bit(reset_level, reset_requests); + } /* log PPU(RCB) PF errors */ desc_data = (__le32 *)&desc[3]; status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK; - if (status) - hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST", - &hclge_ppu_pf_abnormal_int[0], status); + if (status) { + reset_level = hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST", + &hclge_ppu_pf_abnormal_int[0], + status); + set_bit(reset_level, reset_requests); + } /* clear all PF MSIx errors */ hclge_cmd_reuse_desc(&desc[0], false); @@ -1463,8 +1759,31 @@ int hclge_handle_hw_msix_error(struct hclge_dev *hdev, if (ret) { dev_err(dev, "clear all pf msix int cmd failed (%d)\n", ret); - /* reset everything for now */ - set_bit(HNAE3_GLOBAL_RESET, reset_requests); + } + + /* query and clear mac tnl interruptions */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_MAC_TNL_INT, + true); + ret = hclge_cmd_send(&hdev->hw, &desc[0], 1); + if (ret) { + dev_err(dev, "query mac tnl int cmd failed (%d)\n", ret); + goto msi_error; + } + + status = le32_to_cpu(desc->data[0]); + if (status) { + /* When mac tnl interrupt occurs, we record current time and + * register status here in a fifo, then clear the status. So + * that if link status changes suddenly at some time, we can + * query them by debugfs. + */ + mac_tnl_stats.time = local_clock(); + mac_tnl_stats.status = status; + kfifo_put(&hdev->mac_tnl_log, mac_tnl_stats); + ret = hclge_clear_mac_tnl_int(hdev); + if (ret) + dev_err(dev, "clear mac tnl int failed (%d)\n", ret); + set_bit(HNAE3_NONE_RESET, reset_requests); } msi_error: diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h index fc068280d391..9645590c9294 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -47,6 +47,9 @@ #define HCLGE_NCSI_ERR_INT_TYPE 0x9 #define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF #define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF +#define HCLGE_MAC_TNL_INT_EN GENMASK(7, 0) +#define HCLGE_MAC_TNL_INT_EN_MASK GENMASK(7, 0) +#define HCLGE_MAC_TNL_INT_CLR GENMASK(7, 0) #define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0) #define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0) #define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0) @@ -112,8 +115,10 @@ struct hclge_hw_blk { struct hclge_hw_error { u32 int_msk; const char *msg; + enum hnae3_reset_type reset_level; }; +int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en); int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state); pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev); int hclge_handle_hw_msix_error(struct hclge_dev *hdev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index bb7ff3b6eccd..1c35c66afb88 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -12,6 +12,7 @@ #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/if_vlan.h> +#include <linux/crash_dump.h> #include <net/rtnetlink.h> #include "hclge_cmd.h" #include "hclge_dcb.h" @@ -31,6 +32,7 @@ static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); static int hclge_init_vlan_config(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); +static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle); static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, u16 *allocated_size, bool is_alloc); @@ -697,6 +699,16 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data) p = hclge_tqps_get_stats(handle, p); } +static void hclge_get_mac_pause_stat(struct hnae3_handle *handle, u64 *tx_cnt, + u64 *rx_cnt) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + *tx_cnt = hdev->hw_stats.mac_stats.mac_tx_mac_pause_num; + *rx_cnt = hdev->hw_stats.mac_stats.mac_rx_mac_pause_num; +} + static int hclge_parse_func_status(struct hclge_dev *hdev, struct hclge_func_status_cmd *status) { @@ -1015,6 +1027,23 @@ static int hclge_get_cap(struct hclge_dev *hdev) return ret; } +static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) +{ +#define HCLGE_MIN_TX_DESC 64 +#define HCLGE_MIN_RX_DESC 64 + + if (!is_kdump_kernel()) + return; + + dev_info(&hdev->pdev->dev, + "Running kdump kernel. Using minimal resources\n"); + + /* minimal queue pairs equals to the number of vports */ + hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; + hdev->num_tx_desc = HCLGE_MIN_TX_DESC; + hdev->num_rx_desc = HCLGE_MIN_RX_DESC; +} + static int hclge_configure(struct hclge_dev *hdev) { struct hclge_cfg cfg; @@ -1074,6 +1103,8 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE; + hclge_init_kdump_kernel_config(hdev); + return ret; } @@ -1337,6 +1368,8 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) vport->back = hdev; vport->vport_id = i; vport->mps = HCLGE_MAC_DEFAULT_FRAME; + vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE; + vport->rxvlan_cfg.rx_vlan_offload_en = true; INIT_LIST_HEAD(&vport->vlan_list); INIT_LIST_HEAD(&vport->uc_mac_list); INIT_LIST_HEAD(&vport->mc_mac_list); @@ -1399,7 +1432,7 @@ static int hclge_tx_buffer_alloc(struct hclge_dev *hdev, return ret; } -static int hclge_get_tc_num(struct hclge_dev *hdev) +static u32 hclge_get_tc_num(struct hclge_dev *hdev) { int i, cnt = 0; @@ -1409,17 +1442,6 @@ static int hclge_get_tc_num(struct hclge_dev *hdev) return cnt; } -static int hclge_get_pfc_enalbe_num(struct hclge_dev *hdev) -{ - int i, cnt = 0; - - for (i = 0; i < HCLGE_MAX_TC_NUM; i++) - if (hdev->hw_tc_map & BIT(i) && - hdev->tm_info.hw_pfc_map & BIT(i)) - cnt++; - return cnt; -} - /* Get the number of pfc enabled TCs, which have private buffer */ static int hclge_get_pfc_priv_num(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc) @@ -1483,14 +1505,12 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc, u32 rx_all) { - u32 shared_buf_min, shared_buf_tc, shared_std; - int tc_num, pfc_enable_num; + u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd; + u32 tc_num = hclge_get_tc_num(hdev); u32 shared_buf, aligned_mps; u32 rx_priv; int i; - tc_num = hclge_get_tc_num(hdev); - pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); if (hnae3_dev_dcb_supported(hdev)) @@ -1499,9 +1519,7 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF + hdev->dv_buf_size; - shared_buf_tc = pfc_enable_num * aligned_mps + - (tc_num - pfc_enable_num) * aligned_mps / 2 + - aligned_mps; + shared_buf_tc = tc_num * aligned_mps + aligned_mps; shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc), HCLGE_BUF_SIZE_UNIT); @@ -1518,19 +1536,26 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, } else { buf_alloc->s_buf.self.high = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF; - buf_alloc->s_buf.self.low = - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT); + buf_alloc->s_buf.self.low = aligned_mps; + } + + if (hnae3_dev_dcb_supported(hdev)) { + if (tc_num) + hi_thrd = (shared_buf - hdev->dv_buf_size) / tc_num; + else + hi_thrd = shared_buf - hdev->dv_buf_size; + + hi_thrd = max_t(u32, hi_thrd, 2 * aligned_mps); + hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT); + lo_thrd = hi_thrd - aligned_mps / 2; + } else { + hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF; + lo_thrd = aligned_mps; } for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { - if ((hdev->hw_tc_map & BIT(i)) && - (hdev->tm_info.hw_pfc_map & BIT(i))) { - buf_alloc->s_buf.tc_thrd[i].low = aligned_mps; - buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps; - } else { - buf_alloc->s_buf.tc_thrd[i].low = 0; - buf_alloc->s_buf.tc_thrd[i].high = aligned_mps; - } + buf_alloc->s_buf.tc_thrd[i].low = lo_thrd; + buf_alloc->s_buf.tc_thrd[i].high = hi_thrd; } return true; @@ -2143,7 +2168,8 @@ static int hclge_mac_init(struct hclge_dev *hdev) static void hclge_mbx_task_schedule(struct hclge_dev *hdev) { - if (!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) + if (!test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) && + !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) schedule_work(&hdev->mbx_service_task); } @@ -2222,6 +2248,7 @@ static void hclge_update_link_status(struct hclge_dev *hdev) for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { handle = &hdev->vport[i].nic; client->ops->link_status_change(handle, state); + hclge_config_mac_tnl_int(hdev, state); rhandle = &hdev->vport[i].roce; if (rclient && rclient->ops->link_status_change) rclient->ops->link_status_change(rhandle, @@ -2344,6 +2371,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); + hdev->rst_stats.imp_rst_cnt++; return HCLGE_VECTOR0_EVENT_RST; } @@ -2352,6 +2380,7 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); + hdev->rst_stats.global_rst_cnt++; return HCLGE_VECTOR0_EVENT_RST; } @@ -2360,12 +2389,16 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); + hdev->rst_stats.core_rst_cnt++; return HCLGE_VECTOR0_EVENT_RST; } /* check for vector0 msix event source */ - if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) + if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) { + dev_dbg(&hdev->pdev->dev, "received event 0x%x\n", + msix_src_reg); return HCLGE_VECTOR0_EVENT_ERR; + } /* check for vector0 mailbox(=CMDQ RX) event source */ if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { @@ -2374,6 +2407,9 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) return HCLGE_VECTOR0_EVENT_MBX; } + /* print other vector0 event source */ + dev_dbg(&hdev->pdev->dev, "cmdq_src_reg:0x%x, msix_src_reg:0x%x\n", + cmdq_src_reg, msix_src_reg); return HCLGE_VECTOR0_EVENT_OTHER; } @@ -2657,7 +2693,7 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) return ret; } - if (!reset) + if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) continue; /* Inform VF to process the reset. @@ -2694,9 +2730,18 @@ int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) static void hclge_do_reset(struct hclge_dev *hdev) { + struct hnae3_handle *handle = &hdev->vport[0].nic; struct pci_dev *pdev = hdev->pdev; u32 val; + if (hclge_get_hw_reset_stat(handle)) { + dev_info(&pdev->dev, "Hardware reset not finish\n"); + dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n", + hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING), + hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG)); + return; + } + switch (hdev->reset_type) { case HNAE3_GLOBAL_RESET: val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); @@ -2775,6 +2820,10 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, clear_bit(HNAE3_FLR_RESET, addr); } + if (hdev->reset_type != HNAE3_NONE_RESET && + rst_level < hdev->reset_type) + return HNAE3_NONE_RESET; + return rst_level; } @@ -2844,6 +2893,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev) * after hclge_cmd_init is called. */ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + hdev->rst_stats.pf_rst_cnt++; break; case HNAE3_FLR_RESET: /* There is no mechanism for PF to know if VF has stopped IO @@ -2852,6 +2902,7 @@ static int hclge_reset_prepare_wait(struct hclge_dev *hdev) msleep(100); set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + hdev->rst_stats.flr_rst_cnt++; break; case HNAE3_IMP_RESET: reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); @@ -2932,7 +2983,7 @@ static void hclge_reset(struct hclge_dev *hdev) * know if device is undergoing reset */ ae_dev->reset_type = hdev->reset_type; - hdev->reset_count++; + hdev->rst_stats.reset_cnt++; /* perform reset of the stack & ae device for a client */ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); if (ret) @@ -2958,6 +3009,8 @@ static void hclge_reset(struct hclge_dev *hdev) goto err_reset; } + hdev->rst_stats.hw_reset_done_cnt++; + ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); if (ret) goto err_reset; @@ -3001,7 +3054,9 @@ static void hclge_reset(struct hclge_dev *hdev) hdev->last_reset_time = jiffies; hdev->reset_fail_cnt = 0; + hdev->rst_stats.reset_done_cnt++; ae_dev->reset_type = HNAE3_NONE_RESET; + del_timer(&hdev->reset_timer); return; @@ -5194,7 +5249,7 @@ static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - return hdev->reset_count; + return hdev->rst_stats.hw_reset_done_cnt; } static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) @@ -5282,8 +5337,8 @@ static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en, #define HCLGE_SERDES_RETRY_MS 10 #define HCLGE_SERDES_RETRY_NUM 100 -#define HCLGE_MAC_LINK_STATUS_MS 20 -#define HCLGE_MAC_LINK_STATUS_NUM 10 +#define HCLGE_MAC_LINK_STATUS_MS 10 +#define HCLGE_MAC_LINK_STATUS_NUM 100 #define HCLGE_MAC_LINK_STATUS_DOWN 0 #define HCLGE_MAC_LINK_STATUS_UP 1 @@ -5942,8 +5997,11 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport, } /* check if we just hit the duplicate */ - if (!ret) - ret = -EINVAL; + if (!ret) { + dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n", + vport->vport_id, addr); + return 0; + } dev_err(&hdev->pdev->dev, "PF failed to add unicast entry(%pM) in the MAC table\n", @@ -6293,7 +6351,8 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p, return -EINVAL; } - if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) + if ((!is_first || is_kdump_kernel()) && + hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr)) dev_warn(&hdev->pdev->dev, "remove old uc mac address fail.\n"); @@ -6543,30 +6602,6 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto, return ret; } -int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, - u16 vlan_id, bool is_kill) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - - return hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, vlan_id, - 0, is_kill); -} - -static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, - u16 vlan, u8 qos, __be16 proto) -{ - struct hclge_vport *vport = hclge_get_vport(handle); - struct hclge_dev *hdev = vport->back; - - if ((vfid >= hdev->num_alloc_vfs) || (vlan > 4095) || (qos > 7)) - return -EINVAL; - if (proto != htons(ETH_P_8021Q)) - return -EPROTONOSUPPORT; - - return hclge_set_vlan_filter_hw(hdev, proto, vfid, vlan, qos, false); -} - static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport) { struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg; @@ -6640,6 +6675,52 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport) return status; } +static int hclge_vlan_offload_cfg(struct hclge_vport *vport, + u16 port_base_vlan_state, + u16 vlan_tag) +{ + int ret; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->txvlan_cfg.accept_tag1 = true; + vport->txvlan_cfg.insert_tag1_en = false; + vport->txvlan_cfg.default_tag1 = 0; + } else { + vport->txvlan_cfg.accept_tag1 = false; + vport->txvlan_cfg.insert_tag1_en = true; + vport->txvlan_cfg.default_tag1 = vlan_tag; + } + + vport->txvlan_cfg.accept_untag1 = true; + + /* accept_tag2 and accept_untag2 are not supported on + * pdev revision(0x20), new revision support them, + * this two fields can not be configured by user. + */ + vport->txvlan_cfg.accept_tag2 = true; + vport->txvlan_cfg.accept_untag2 = true; + vport->txvlan_cfg.insert_tag2_en = false; + vport->txvlan_cfg.default_tag2 = 0; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->rxvlan_cfg.strip_tag1_en = false; + vport->rxvlan_cfg.strip_tag2_en = + vport->rxvlan_cfg.rx_vlan_offload_en; + } else { + vport->rxvlan_cfg.strip_tag1_en = + vport->rxvlan_cfg.rx_vlan_offload_en; + vport->rxvlan_cfg.strip_tag2_en = true; + } + vport->rxvlan_cfg.vlan1_vlan_prionly = false; + vport->rxvlan_cfg.vlan2_vlan_prionly = false; + + ret = hclge_set_vlan_tx_offload_cfg(vport); + if (ret) + return ret; + + return hclge_set_vlan_rx_offload_cfg(vport); +} + static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) { struct hclge_rx_vlan_type_cfg_cmd *rx_req; @@ -6730,34 +6811,14 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) return ret; for (i = 0; i < hdev->num_alloc_vport; i++) { - vport = &hdev->vport[i]; - vport->txvlan_cfg.accept_tag1 = true; - vport->txvlan_cfg.accept_untag1 = true; - - /* accept_tag2 and accept_untag2 are not supported on - * pdev revision(0x20), new revision support them. The - * value of this two fields will not return error when driver - * send command to fireware in revision(0x20). - * This two fields can not configured by user. - */ - vport->txvlan_cfg.accept_tag2 = true; - vport->txvlan_cfg.accept_untag2 = true; + u16 vlan_tag; - vport->txvlan_cfg.insert_tag1_en = false; - vport->txvlan_cfg.insert_tag2_en = false; - vport->txvlan_cfg.default_tag1 = 0; - vport->txvlan_cfg.default_tag2 = 0; - - ret = hclge_set_vlan_tx_offload_cfg(vport); - if (ret) - return ret; - - vport->rxvlan_cfg.strip_tag1_en = false; - vport->rxvlan_cfg.strip_tag2_en = true; - vport->rxvlan_cfg.vlan1_vlan_prionly = false; - vport->rxvlan_cfg.vlan2_vlan_prionly = false; + vport = &hdev->vport[i]; + vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag; - ret = hclge_set_vlan_rx_offload_cfg(vport); + ret = hclge_vlan_offload_cfg(vport, + vport->port_base_vlan_cfg.state, + vlan_tag); if (ret) return ret; } @@ -6765,7 +6826,8 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); } -void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id) +static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool writen_to_tbl) { struct hclge_vport_vlan_cfg *vlan; @@ -6777,14 +6839,38 @@ void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id) if (!vlan) return; - vlan->hd_tbl_status = true; + vlan->hd_tbl_status = writen_to_tbl; vlan->vlan_id = vlan_id; list_add_tail(&vlan->node, &vport->vlan_list); } -void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, - bool is_write_tbl) +static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport) +{ + struct hclge_vport_vlan_cfg *vlan, *tmp; + struct hclge_dev *hdev = vport->back; + int ret; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) { + if (!vlan->hd_tbl_status) { + ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q), + vport->vport_id, + vlan->vlan_id, 0, false); + if (ret) { + dev_err(&hdev->pdev->dev, + "restore vport vlan list failed, ret=%d\n", + ret); + return ret; + } + } + vlan->hd_tbl_status = true; + } + + return 0; +} + +static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, + bool is_write_tbl) { struct hclge_vport_vlan_cfg *vlan, *tmp; struct hclge_dev *hdev = vport->back; @@ -6847,14 +6933,203 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); - vport->rxvlan_cfg.strip_tag1_en = false; - vport->rxvlan_cfg.strip_tag2_en = enable; + if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) { + vport->rxvlan_cfg.strip_tag1_en = false; + vport->rxvlan_cfg.strip_tag2_en = enable; + } else { + vport->rxvlan_cfg.strip_tag1_en = enable; + vport->rxvlan_cfg.strip_tag2_en = true; + } vport->rxvlan_cfg.vlan1_vlan_prionly = false; vport->rxvlan_cfg.vlan2_vlan_prionly = false; + vport->rxvlan_cfg.rx_vlan_offload_en = enable; return hclge_set_vlan_rx_offload_cfg(vport); } +static int hclge_update_vlan_filter_entries(struct hclge_vport *vport, + u16 port_base_vlan_state, + struct hclge_vlan_info *new_info, + struct hclge_vlan_info *old_info) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) { + hclge_rm_vport_all_vlan_table(vport, false); + return hclge_set_vlan_filter_hw(hdev, + htons(new_info->vlan_proto), + vport->vport_id, + new_info->vlan_tag, + new_info->qos, false); + } + + ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto), + vport->vport_id, old_info->vlan_tag, + old_info->qos, true); + if (ret) + return ret; + + return hclge_add_vport_all_vlan_table(vport); +} + +int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, + struct hclge_vlan_info *vlan_info) +{ + struct hnae3_handle *nic = &vport->nic; + struct hclge_vlan_info *old_vlan_info; + struct hclge_dev *hdev = vport->back; + int ret; + + old_vlan_info = &vport->port_base_vlan_cfg.vlan_info; + + ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag); + if (ret) + return ret; + + if (state == HNAE3_PORT_BASE_VLAN_MODIFY) { + /* add new VLAN tag */ + ret = hclge_set_vlan_filter_hw(hdev, + htons(vlan_info->vlan_proto), + vport->vport_id, + vlan_info->vlan_tag, + vlan_info->qos, false); + if (ret) + return ret; + + /* remove old VLAN tag */ + ret = hclge_set_vlan_filter_hw(hdev, + htons(old_vlan_info->vlan_proto), + vport->vport_id, + old_vlan_info->vlan_tag, + old_vlan_info->qos, true); + if (ret) + return ret; + + goto update; + } + + ret = hclge_update_vlan_filter_entries(vport, state, vlan_info, + old_vlan_info); + if (ret) + return ret; + + /* update state only when disable/enable port based VLAN */ + vport->port_base_vlan_cfg.state = state; + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; + else + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + +update: + vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag; + vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos; + vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto; + + return 0; +} + +static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport, + enum hnae3_port_base_vlan_state state, + u16 vlan) +{ + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) { + if (!vlan) + return HNAE3_PORT_BASE_VLAN_NOCHANGE; + else + return HNAE3_PORT_BASE_VLAN_ENABLE; + } else { + if (!vlan) + return HNAE3_PORT_BASE_VLAN_DISABLE; + else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan) + return HNAE3_PORT_BASE_VLAN_NOCHANGE; + else + return HNAE3_PORT_BASE_VLAN_MODIFY; + } +} + +static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid, + u16 vlan, u8 qos, __be16 proto) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_vlan_info vlan_info; + u16 state; + int ret; + + if (hdev->pdev->revision == 0x20) + return -EOPNOTSUPP; + + /* qos is a 3 bits value, so can not be bigger than 7 */ + if (vfid >= hdev->num_alloc_vfs || vlan > VLAN_N_VID - 1 || qos > 7) + return -EINVAL; + if (proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + vport = &hdev->vport[vfid]; + state = hclge_get_port_base_vlan_state(vport, + vport->port_base_vlan_cfg.state, + vlan); + if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE) + return 0; + + vlan_info.vlan_tag = vlan; + vlan_info.qos = qos; + vlan_info.vlan_proto = ntohs(proto); + + /* update port based VLAN for PF */ + if (!vfid) { + hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info); + hclge_notify_client(hdev, HNAE3_UP_CLIENT); + + return ret; + } + + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) { + return hclge_update_port_base_vlan_cfg(vport, state, + &vlan_info); + } else { + ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0], + (u8)vfid, state, + vlan, qos, + ntohs(proto)); + return ret; + } +} + +int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, + u16 vlan_id, bool is_kill) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + bool writen_to_tbl = false; + int ret = 0; + + /* when port based VLAN enabled, we use port based VLAN as the VLAN + * filter entry. In this case, we don't update VLAN filter table + * when user add new VLAN or remove exist VLAN, just update the vport + * VLAN list. The VLAN id in VLAN list won't be writen in VLAN filter + * table until port based VLAN disabled + */ + if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) { + ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id, + vlan_id, 0, is_kill); + writen_to_tbl = true; + } + + if (ret) + return ret; + + if (is_kill) + hclge_rm_vport_vlan_table(vport, vlan_id, false); + else + hclge_add_vport_vlan_table(vport, vlan_id, + writen_to_tbl); + + return 0; +} + static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) { struct hclge_config_max_frm_size_cmd *req; @@ -7290,6 +7565,32 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle, *tp_mdix = ETH_TP_MDI; } +static void hclge_info_show(struct hclge_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + + dev_info(dev, "PF info begin:\n"); + + dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps); + dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc); + dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc); + dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport); + dev_info(dev, "Numbers of vmdp vports: %d\n", hdev->num_vmdq_vport); + dev_info(dev, "Numbers of VF for this PF: %d\n", hdev->num_req_vfs); + dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map); + dev_info(dev, "Total buffer size for TX/RX: %d\n", hdev->pkt_buf_size); + dev_info(dev, "TX buffer size for each TC: %d\n", hdev->tx_buf_size); + dev_info(dev, "DV buffer size for each TC: %d\n", hdev->dv_buf_size); + dev_info(dev, "This is %s PF\n", + hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main"); + dev_info(dev, "DCB %s\n", + hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable"); + dev_info(dev, "MQPRIO %s\n", + hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable"); + + dev_info(dev, "PF info end.\n"); +} + static int hclge_init_client_instance(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev) { @@ -7311,6 +7612,9 @@ static int hclge_init_client_instance(struct hnae3_client *client, hnae3_set_client_init_flag(client, ae_dev, 1); + if (netif_msg_drv(&hdev->vport->nic)) + hclge_info_show(hdev); + if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { struct hnae3_client *rc = hdev->roce_client; @@ -7670,6 +7974,8 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_mdiobus_unreg; } + INIT_KFIFO(hdev->mac_tnl_log); + hclge_dcb_ops_set(hdev); timer_setup(&hdev->service_timer, hclge_service_timer, 0); @@ -7718,7 +8024,7 @@ static void hclge_reset_vport_state(struct hclge_dev *hdev) int i; for (i = 0; i < hdev->num_alloc_vport; i++) { - hclge_vport_start(vport); + hclge_vport_stop(vport); vport++; } } @@ -7823,6 +8129,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_enable_vector(&hdev->misc_vector, false); synchronize_irq(hdev->misc_vector.vector_irq); + hclge_config_mac_tnl_int(hdev, false); hclge_hw_error_set_state(hdev, false); hclge_cmd_uninit(hdev); hclge_misc_irq_uninit(hdev); @@ -8268,6 +8575,7 @@ static const struct hnae3_ae_ops hclge_ops = { .set_mtu = hclge_set_mtu, .reset_queue = hclge_reset_tqp, .get_stats = hclge_get_stats, + .get_mac_pause_stats = hclge_get_mac_pause_stat, .update_stats = hclge_update_stats, .get_strings = hclge_get_strings, .get_sset_count = hclge_get_sset_count, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index b57ac4beb313..4aba6248965d 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -7,6 +7,7 @@ #include <linux/types.h> #include <linux/phy.h> #include <linux/if_vlan.h> +#include <linux/kfifo.h> #include "hclge_cmd.h" #include "hnae3.h" @@ -649,6 +650,23 @@ struct hclge_vport_vlan_cfg { u16 vlan_id; }; +struct hclge_rst_stats { + u32 reset_done_cnt; /* the number of reset has completed */ + u32 hw_reset_done_cnt; /* the number of HW reset has completed */ + u32 pf_rst_cnt; /* the number of PF reset */ + u32 flr_rst_cnt; /* the number of FLR */ + u32 core_rst_cnt; /* the number of CORE reset */ + u32 global_rst_cnt; /* the number of GLOBAL */ + u32 imp_rst_cnt; /* the number of IMP reset */ + u32 reset_cnt; /* the number of reset */ +}; + +/* time and register status when mac tunnel interruption occur */ +struct hclge_mac_tnl_stats { + u64 time; + u32 status; +}; + /* For each bit of TCAM entry, it uses a pair of 'x' and * 'y' to indicate which value to match, like below: * ---------------------------------- @@ -675,6 +693,7 @@ struct hclge_vport_vlan_cfg { (y) = (_k_ ^ ~_v_) & (_k_); \ } while (0) +#define HCLGE_MAC_TNL_LOG_SIZE 8 #define HCLGE_VPORT_NUM 256 struct hclge_dev { struct pci_dev *pdev; @@ -691,7 +710,7 @@ struct hclge_dev { unsigned long default_reset_request; unsigned long reset_request; /* reset has been requested */ unsigned long reset_pending; /* client rst is pending to be served */ - unsigned long reset_count; /* the number of reset has been done */ + struct hclge_rst_stats rst_stats; u32 reset_fail_cnt; u32 fw_version; u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */ @@ -791,6 +810,9 @@ struct hclge_dev { struct mutex umv_mutex; /* protect share_umv_size */ struct mutex vport_cfg_mutex; /* Protect stored vf table */ + + DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats, + HCLGE_MAC_TNL_LOG_SIZE); }; /* VPort level vlan tag configuration for TX direction */ @@ -807,10 +829,11 @@ struct hclge_tx_vtag_cfg { /* VPort level vlan tag configuration for RX direction */ struct hclge_rx_vtag_cfg { - bool strip_tag1_en; /* Whether strip inner vlan tag */ - bool strip_tag2_en; /* Whether strip outer vlan tag */ - bool vlan1_vlan_prionly;/* Inner VLAN Tag up to descriptor Enable */ - bool vlan2_vlan_prionly;/* Outer VLAN Tag up to descriptor Enable */ + u8 rx_vlan_offload_en; /* Whether enable rx vlan offload */ + u8 strip_tag1_en; /* Whether strip inner vlan tag */ + u8 strip_tag2_en; /* Whether strip outer vlan tag */ + u8 vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */ + u8 vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */ }; struct hclge_rss_tuple_cfg { @@ -829,6 +852,17 @@ enum HCLGE_VPORT_STATE { HCLGE_VPORT_STATE_MAX }; +struct hclge_vlan_info { + u16 vlan_proto; /* so far support 802.1Q only */ + u16 qos; + u16 vlan_tag; +}; + +struct hclge_port_base_vlan_config { + u16 state; + struct hclge_vlan_info vlan_info; +}; + struct hclge_vport { u16 alloc_tqps; /* Allocated Tx/Rx queues */ @@ -842,9 +876,10 @@ struct hclge_vport { u16 alloc_rss_size; u16 qs_offset; - u16 bw_limit; /* VSI BW Limit (0 = disabled) */ + u32 bw_limit; /* VSI BW Limit (0 = disabled) */ u8 dwrr; + struct hclge_port_base_vlan_config port_base_vlan_cfg; struct hclge_tx_vtag_cfg txvlan_cfg; struct hclge_rx_vtag_cfg rxvlan_cfg; @@ -924,9 +959,11 @@ void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr, void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list, enum HCLGE_MAC_ADDR_TYPE mac_type); void hclge_uninit_vport_mac_table(struct hclge_dev *hdev); -void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id); -void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, - bool is_write_tbl); void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list); void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev); +int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, + struct hclge_vlan_info *vlan_info); +int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, + u16 state, u16 vlan_tag, u16 qos, + u16 vlan_proto); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 306a23e486de..fe48c5634a87 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -212,8 +212,7 @@ static int hclge_set_vf_promisc_mode(struct hclge_vport *vport, } static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req, - bool gen_resp) + struct hclge_mbx_vf_to_pf_cmd *mbx_req) { const u8 *mac_addr = (const u8 *)(&mbx_req->msg[2]); struct hclge_dev *hdev = vport->back; @@ -249,7 +248,7 @@ static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport, return -EIO; } - if (gen_resp) + if (mbx_req->mbx_need_resp & HCLGE_MBX_NEED_RESP_BIT) hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); return 0; @@ -289,9 +288,25 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport, return 0; } +int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid, + u16 state, u16 vlan_tag, u16 qos, + u16 vlan_proto) +{ +#define MSG_DATA_SIZE 8 + + u8 msg_data[MSG_DATA_SIZE]; + + memcpy(&msg_data[0], &state, sizeof(u16)); + memcpy(&msg_data[2], &vlan_proto, sizeof(u16)); + memcpy(&msg_data[4], &qos, sizeof(u16)); + memcpy(&msg_data[6], &vlan_tag, sizeof(u16)); + + return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), + HLCGE_MBX_PUSH_VLAN_INFO, vfid); +} + static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, - struct hclge_mbx_vf_to_pf_cmd *mbx_req, - bool gen_resp) + struct hclge_mbx_vf_to_pf_cmd *mbx_req) { int status = 0; @@ -305,19 +320,27 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, memcpy(&proto, &mbx_req->msg[5], sizeof(proto)); status = hclge_set_vlan_filter(handle, cpu_to_be16(proto), vlan, is_kill); - if (!status) - is_kill ? hclge_rm_vport_vlan_table(vport, vlan, false) - : hclge_add_vport_vlan_table(vport, vlan); } else if (mbx_req->msg[1] == HCLGE_MBX_VLAN_RX_OFF_CFG) { struct hnae3_handle *handle = &vport->nic; bool en = mbx_req->msg[2] ? true : false; status = hclge_en_hw_strip_rxvtag(handle, en); + } else if (mbx_req->msg[1] == HCLGE_MBX_PORT_BASE_VLAN_CFG) { + struct hclge_vlan_info *vlan_info; + u16 *state; + + state = (u16 *)&mbx_req->msg[2]; + vlan_info = (struct hclge_vlan_info *)&mbx_req->msg[4]; + status = hclge_update_port_base_vlan_cfg(vport, *state, + vlan_info); + } else if (mbx_req->msg[1] == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) { + u8 state; + + state = vport->port_base_vlan_cfg.state; + status = hclge_gen_resp_to_vf(vport, mbx_req, 0, &state, + sizeof(u8)); } - if (gen_resp) - status = hclge_gen_resp_to_vf(vport, mbx_req, status, NULL, 0); - return status; } @@ -385,24 +408,32 @@ static int hclge_get_vf_queue_depth(struct hclge_vport *vport, HCLGE_TQPS_DEPTH_INFO_LEN); } +static int hclge_get_vf_media_type(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + struct hclge_dev *hdev = vport->back; + u8 resp_data; + + resp_data = hdev->hw.mac.media_type; + return hclge_gen_resp_to_vf(vport, mbx_req, 0, &resp_data, + sizeof(resp_data)); +} + static int hclge_get_link_info(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req) { struct hclge_dev *hdev = vport->back; u16 link_status; - u8 msg_data[10]; - u16 media_type; + u8 msg_data[8]; u8 dest_vfid; u16 duplex; /* mac.link can only be 0 or 1 */ link_status = (u16)hdev->hw.mac.link; duplex = hdev->hw.mac.duplex; - media_type = hdev->hw.mac.media_type; memcpy(&msg_data[0], &link_status, sizeof(u16)); memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32)); memcpy(&msg_data[6], &duplex, sizeof(u16)); - memcpy(&msg_data[8], &media_type, sizeof(u16)); dest_vfid = mbx_req->mbx_src_vfid; /* send this requested info to VF */ @@ -565,7 +596,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ret); break; case HCLGE_MBX_SET_UNICAST: - ret = hclge_set_vf_uc_mac_addr(vport, req, true); + ret = hclge_set_vf_uc_mac_addr(vport, req); if (ret) dev_err(&hdev->pdev->dev, "PF fail(%d) to set VF UC MAC Addr\n", @@ -579,7 +610,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev) ret); break; case HCLGE_MBX_SET_VLAN: - ret = hclge_set_vf_vlan_cfg(vport, req, false); + ret = hclge_set_vf_vlan_cfg(vport, req); if (ret) dev_err(&hdev->pdev->dev, "PF failed(%d) to config VF's VLAN\n", @@ -662,6 +693,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev) hclge_rm_vport_all_vlan_table(vport, true); mutex_unlock(&hdev->vport_cfg_mutex); break; + case HCLGE_MBX_GET_MEDIA_TYPE: + ret = hclge_get_vf_media_type(vport, req); + if (ret) + dev_err(&hdev->pdev->dev, + "PF fail(%d) to media type for VF\n", + ret); + break; default: dev_err(&hdev->pdev->dev, "un-supported mailbox message, code = %d\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 91760d9120a5..63b82e2af4ad 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -3,6 +3,7 @@ #include <linux/etherdevice.h> #include <linux/kernel.h> +#include <linux/marvell_phy.h> #include "hclge_cmd.h" #include "hclge_main.h" @@ -129,12 +130,18 @@ static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum) int hclge_mac_mdio_config(struct hclge_dev *hdev) { +#define PHY_INEXISTENT 255 + struct hclge_mac *mac = &hdev->hw.mac; struct phy_device *phydev; struct mii_bus *mdio_bus; int ret; - if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) { + if (hdev->hw.mac.phy_addr == PHY_INEXISTENT) { + dev_info(&hdev->pdev->dev, + "no phy device is connected to mdio bus\n"); + return 0; + } else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) { dev_err(&hdev->pdev->dev, "phy_addr(%d) is too large.\n", hdev->hw.mac.phy_addr); return -EINVAL; @@ -210,6 +217,8 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle) phydev->supported &= ~SUPPORTED_FIBRE; + phydev->dev_flags |= MARVELL_PHY_LED0_LINK_LED1_ACTIVE; + ret = phy_connect_direct(netdev, phydev, hclge_mac_adjust_link, PHY_INTERFACE_MODE_SGMII); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index aafc69f4bfdd..a7bbb6d3091a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -1331,8 +1331,11 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init) ret = hclge_pfc_setup_hw(hdev); if (init && ret == -EOPNOTSUPP) dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n"); - else + else if (ret) { + dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n", + ret); return ret; + } return hclge_tm_bp_setup(hdev); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index f108bad71e64..2394338190be 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -27,26 +27,39 @@ static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring) return ring->desc_num - used - 1; } +static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring, + int head) +{ + int ntu = ring->next_to_use; + int ntc = ring->next_to_clean; + + if (ntu > ntc) + return head >= ntc && head <= ntu; + + return head >= ntc || head <= ntu; +} + static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw) { + struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw); struct hclgevf_cmq_ring *csq = &hw->cmq.csq; - u16 ntc = csq->next_to_clean; - struct hclgevf_desc *desc; int clean = 0; u32 head; - desc = &csq->desc[ntc]; head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG); - while (head != ntc) { - memset(desc, 0, sizeof(*desc)); - ntc++; - if (ntc == csq->desc_num) - ntc = 0; - desc = &csq->desc[ntc]; - clean++; + rmb(); /* Make sure head is ready before touch any data */ + + if (!hclgevf_is_valid_csq_clean_head(csq, head)) { + dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head, + csq->next_to_use, csq->next_to_clean); + dev_warn(&hdev->pdev->dev, + "Disabling any further commands to IMP firmware\n"); + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + return -EIO; } - csq->next_to_clean = ntc; + clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num; + csq->next_to_clean = head; return clean; } @@ -322,13 +335,13 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) int ret; spin_lock_bh(&hdev->hw.cmq.csq.lock); - spin_lock_bh(&hdev->hw.cmq.crq.lock); + spin_lock(&hdev->hw.cmq.crq.lock); /* initialize the pointers of async rx queue of mailbox */ hdev->arq.hdev = hdev; hdev->arq.head = 0; hdev->arq.tail = 0; - hdev->arq.count = 0; + atomic_set(&hdev->arq.count, 0); hdev->hw.cmq.csq.next_to_clean = 0; hdev->hw.cmq.csq.next_to_use = 0; hdev->hw.cmq.crq.next_to_clean = 0; @@ -336,7 +349,7 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) hclgevf_cmd_init_regs(&hdev->hw); - spin_unlock_bh(&hdev->hw.cmq.crq.lock); + spin_unlock(&hdev->hw.cmq.crq.lock); spin_unlock_bh(&hdev->hw.cmq.csq.lock); clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); @@ -345,8 +358,8 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) * reset may happen when lower level reset is being processed. */ if (hclgevf_is_reset_pending(hdev)) { - set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); - return -EBUSY; + ret = -EBUSY; + goto err_cmd_init; } /* get firmware version */ @@ -354,13 +367,18 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) if (ret) { dev_err(&hdev->pdev->dev, "failed(%d) to query firmware version\n", ret); - return ret; + goto err_cmd_init; } hdev->fw_version = version; dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); return 0; + +err_cmd_init: + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + + return ret; } static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 905b41b7e3d5..c5bf6986c489 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -245,6 +245,27 @@ static int hclgevf_get_tc_info(struct hclgevf_dev *hdev) return 0; } +static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev) +{ + struct hnae3_handle *nic = &hdev->nic; + u8 resp_msg; + int ret; + + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, + NULL, 0, true, &resp_msg, sizeof(u8)); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF request to get port based vlan state failed %d", + ret); + return ret; + } + + nic->port_base_vlan_state = resp_msg; + + return 0; +} + static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) { #define HCLGEVF_TQPS_RSS_INFO_LEN 6 @@ -307,6 +328,25 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) return qid_in_pf; } +static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev) +{ + u8 resp_msg; + int ret; + + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0, + true, &resp_msg, sizeof(resp_msg)); + if (ret) { + dev_err(&hdev->pdev->dev, + "VF request to get the pf port media type failed %d", + ret); + return ret; + } + + hdev->hw.mac.media_type = resp_msg; + + return 0; +} + static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) { struct hclgevf_tqp *tqp; @@ -404,7 +444,7 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) } } -void hclgevf_update_link_mode(struct hclgevf_dev *hdev) +static void hclgevf_update_link_mode(struct hclgevf_dev *hdev) { #define HCLGEVF_ADVERTISING 0 #define HCLGEVF_SUPPORTED 1 @@ -1375,9 +1415,11 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) case HNAE3_VF_FUNC_RESET: ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, 0, true, NULL, sizeof(u8)); + hdev->rst_stats.vf_func_rst_cnt++; break; case HNAE3_FLR_RESET: set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + hdev->rst_stats.flr_rst_cnt++; break; default: break; @@ -1400,7 +1442,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev) * know if device is undergoing reset */ ae_dev->reset_type = hdev->reset_type; - hdev->reset_count++; + hdev->rst_stats.rst_cnt++; rtnl_lock(); /* bring down the nic to stop any ongoing TX/RX */ @@ -1426,6 +1468,8 @@ static int hclgevf_reset(struct hclgevf_dev *hdev) goto err_reset; } + hdev->rst_stats.hw_rst_done_cnt++; + rtnl_lock(); /* now, re-initialize the nic client and ae device*/ @@ -1444,6 +1488,7 @@ static int hclgevf_reset(struct hclgevf_dev *hdev) hdev->last_reset_time = jiffies; ae_dev->reset_type = HNAE3_NONE_RESET; + hdev->rst_stats.rst_done_cnt++; return ret; err_reset_lock: @@ -1455,6 +1500,8 @@ err_reset: */ hclgevf_cmd_init(hdev); dev_err(&hdev->pdev->dev, "failed to reset VF\n"); + if (hclgevf_is_reset_pending(hdev)) + hclgevf_reset_task_schedule(hdev); return ret; } @@ -1602,6 +1649,7 @@ static void hclgevf_service_timer(unsigned long data) mod_timer(&hdev->service_timer, jiffies + 5 * HZ); + hdev->stats_timer++; hclgevf_task_schedule(hdev); } @@ -1710,7 +1758,7 @@ static void hclgevf_keep_alive_task(struct work_struct *work) hdev = container_of(work, struct hclgevf_dev, keep_alive_task); - if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) return; ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, @@ -1722,9 +1770,16 @@ static void hclgevf_keep_alive_task(struct work_struct *work) static void hclgevf_service_task(struct work_struct *work) { + struct hnae3_handle *handle; struct hclgevf_dev *hdev; hdev = container_of(work, struct hclgevf_dev, service_task); + handle = &hdev->nic; + + if (hdev->stats_timer >= HCLGEVF_STATS_TIMER_INTERVAL) { + hclgevf_tqps_update_stats(handle); + hdev->stats_timer = 0; + } /* request the link status from the PF. PF would be able to tell VF * about such updates in future so we might remove this later @@ -1761,6 +1816,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); *clearval = cmdq_src_reg; + hdev->rst_stats.vf_rst_cnt++; return HCLGEVF_VECTOR0_EVENT_RST; } @@ -1813,6 +1869,11 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) { int ret; + /* get current port based vlan state from PF */ + ret = hclgevf_get_port_base_vlan_filter_state(hdev); + if (ret) + return ret; + /* get queue configuration from PF */ ret = hclgevf_get_queue_info(hdev); if (ret) @@ -1823,6 +1884,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) if (ret) return ret; + ret = hclgevf_get_pf_media_type(hdev); + if (ret) + return ret; + /* get tc configuration from PF */ return hclgevf_get_tc_info(hdev); } @@ -1985,8 +2050,10 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle) set_bit(HCLGEVF_STATE_DOWN, &hdev->state); - for (i = 0; i < handle->kinfo.num_tqps; i++) - hclgevf_reset_tqp(handle, i); + if (hdev->reset_type != HNAE3_VF_RESET) + for (i = 0; i < handle->kinfo.num_tqps; i++) + if (hclgevf_reset_tqp(handle, i)) + break; /* reset tqp stats */ hclgevf_reset_tqp_stats(handle); @@ -2006,9 +2073,15 @@ static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) static int hclgevf_client_start(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int ret; + + ret = hclgevf_set_alive(handle, true); + if (ret) + return ret; mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); - return hclgevf_set_alive(handle, true); + + return 0; } static void hclgevf_client_stop(struct hnae3_handle *handle) @@ -2051,6 +2124,10 @@ static void hclgevf_state_uninit(struct hclgevf_dev *hdev) { set_bit(HCLGEVF_STATE_DOWN, &hdev->state); + if (hdev->keep_alive_timer.function) + del_timer_sync(&hdev->keep_alive_timer); + if (hdev->keep_alive_task.func) + cancel_work_sync(&hdev->keep_alive_task); if (hdev->service_timer.data) del_timer_sync(&hdev->service_timer); if (hdev->service_task.func) @@ -2155,6 +2232,23 @@ static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev) hclgevf_free_vector(hdev, 0); } +static void hclgevf_info_show(struct hclgevf_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + + dev_info(dev, "VF info begin:\n"); + + dev_info(dev, "Task queue pairs numbers: %d\n", hdev->num_tqps); + dev_info(dev, "Desc num per TX queue: %d\n", hdev->num_tx_desc); + dev_info(dev, "Desc num per RX queue: %d\n", hdev->num_rx_desc); + dev_info(dev, "Numbers of vports: %d\n", hdev->num_alloc_vport); + dev_info(dev, "HW tc map: %d\n", hdev->hw_tc_map); + dev_info(dev, "PF media type of this VF: %d\n", + hdev->hw.mac.media_type); + + dev_info(dev, "VF info end.\n"); +} + static int hclgevf_init_client_instance(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev) { @@ -2172,6 +2266,9 @@ static int hclgevf_init_client_instance(struct hnae3_client *client, hnae3_set_client_init_flag(client, ae_dev, 1); + if (netif_msg_drv(&hdev->nic)) + hclgevf_info_show(hdev); + if (hdev->roce_client && hnae3_dev_roce_supported(hdev)) { struct hnae3_client *rc = hdev->roce_client; @@ -2677,7 +2774,7 @@ static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - return hdev->reset_count; + return hdev->rst_stats.hw_rst_done_cnt; } static void hclgevf_get_link_mode(struct hnae3_handle *handle, @@ -2756,6 +2853,31 @@ static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, } } +void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, + u8 *port_base_vlan_info, u8 data_size) +{ + struct hnae3_handle *nic = &hdev->nic; + + rtnl_lock(); + hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + rtnl_unlock(); + + /* send msg to PF and wait update port based vlan info */ + hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_VLAN, + HCLGE_MBX_PORT_BASE_VLAN_CFG, + port_base_vlan_info, data_size, + false, NULL, 0); + + if (state == HNAE3_PORT_BASE_VLAN_DISABLE) + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE; + else + nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE; + + rtnl_lock(); + hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); + rtnl_unlock(); +} + static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index c128863ee7d0..ee3a6cbe87d3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -116,6 +116,8 @@ #define HCLGEVF_S_IP_BIT BIT(3) #define HCLGEVF_V_TAG_BIT BIT(4) +#define HCLGEVF_STATS_TIMER_INTERVAL (36) + enum hclgevf_evt_cause { HCLGEVF_VECTOR0_EVENT_RST, HCLGEVF_VECTOR0_EVENT_MBX, @@ -210,6 +212,15 @@ struct hclgevf_misc_vector { int vector_irq; }; +struct hclgevf_rst_stats { + u32 rst_cnt; /* the number of reset */ + u32 vf_func_rst_cnt; /* the number of VF function reset */ + u32 flr_rst_cnt; /* the number of FLR */ + u32 vf_rst_cnt; /* the number of VF reset */ + u32 rst_done_cnt; /* the number of reset completed */ + u32 hw_rst_done_cnt; /* the number of HW reset completed */ +}; + struct hclgevf_dev { struct pci_dev *pdev; struct hnae3_ae_dev *ae_dev; @@ -227,7 +238,7 @@ struct hclgevf_dev { #define HCLGEVF_RESET_REQUESTED 0 #define HCLGEVF_RESET_PENDING 1 unsigned long reset_state; /* requested, pending */ - unsigned long reset_count; /* the number of reset has been done */ + struct hclgevf_rst_stats rst_stats; u32 reset_attempts; u32 fw_version; @@ -272,6 +283,7 @@ struct hclgevf_dev { struct hnae3_client *nic_client; struct hnae3_client *roce_client; u32 flag; + u32 stats_timer; }; static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev) @@ -290,4 +302,6 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, u8 duplex); void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev); void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev); +void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state, + u8 *port_base_vlan_info, u8 data_size); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 7dc3c9f79169..30f2e9352cf3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -49,8 +49,8 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, if (i >= HCLGEVF_MAX_TRY_TIMES) { dev_err(&hdev->pdev->dev, - "VF could not get mbx resp(=%d) from PF in %d tries\n", - hdev->mbx_resp.received_resp, i); + "VF could not get mbx(%d,%d) resp(=%d) from PF in %d tries\n", + code0, code1, hdev->mbx_resp.received_resp, i); return -EIO; } @@ -68,8 +68,11 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) { dev_err(&hdev->pdev->dev, - "VF could not match resp code(code0=%d,code1=%d), %d", + "VF could not match resp code(code0=%d,code1=%d), %d\n", code0, code1, mbx_resp->resp_status); + dev_err(&hdev->pdev->dev, + "VF could not match resp r_code(r_code0=%d,r_code1=%d)\n", + r_code0, r_code1); return -EIO; } @@ -95,6 +98,8 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, } hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false); + req->mbx_need_resp |= need_resp ? HCLGE_MBX_NEED_RESP_BIT : + ~HCLGE_MBX_NEED_RESP_BIT; req->msg[0] = code; req->msg[1] = subcode; memcpy(&req->msg[2], msg_data, msg_len); @@ -198,6 +203,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) case HCLGE_MBX_LINK_STAT_CHANGE: case HCLGE_MBX_ASSERTING_RESET: case HCLGE_MBX_LINK_STAT_MODE: + case HLCGE_MBX_PUSH_VLAN_INFO: /* set this mbx event as pending. This is required as we * might loose interrupt event when mbx task is busy * handling. This shall be cleared when mbx task just @@ -208,7 +214,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) /* we will drop the async msg if we find ARQ as full * and continue with next message */ - if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) { + if (atomic_read(&hdev->arq.count) >= + HCLGE_MBX_MAX_ARQ_MSG_NUM) { dev_warn(&hdev->pdev->dev, "Async Q full, dropping msg(%d)\n", req->msg[1]); @@ -220,7 +227,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) memcpy(&msg_q[0], req->msg, HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16)); hclge_mbx_tail_ptr_move_arq(hdev->arq); - hdev->arq.count++; + atomic_inc(&hdev->arq.count); hclgevf_mbx_task_schedule(hdev); @@ -243,8 +250,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) { enum hnae3_reset_type reset_type; - u16 link_status; - u16 *msg_q; + u16 link_status, state; + u16 *msg_q, *vlan_info; u8 duplex; u32 speed; u32 tail; @@ -272,7 +279,6 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) link_status = le16_to_cpu(msg_q[1]); memcpy(&speed, &msg_q[2], sizeof(speed)); duplex = (u8)le16_to_cpu(msg_q[4]); - hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]); /* update upper layer with new link link status */ hclgevf_update_link_status(hdev, link_status); @@ -300,6 +306,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) hclgevf_reset_task_schedule(hdev); break; + case HLCGE_MBX_PUSH_VLAN_INFO: + state = le16_to_cpu(msg_q[1]); + vlan_info = &msg_q[1]; + hclgevf_update_port_base_vlan_info(hdev, state, + (u8 *)vlan_info, 8); + break; default: dev_err(&hdev->pdev->dev, "fetched unsupported(%d) message from arq\n", @@ -308,7 +320,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) } hclge_mbx_head_ptr_move_arq(hdev->arq); - hdev->arq.count--; + atomic_dec(&hdev->arq.count); msg_q = hdev->arq.msg_q[hdev->arq.head]; } } diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 036742a0385a..9987504a1e83 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -119,8 +119,10 @@ #define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12) #define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14) -#define MII_88E1121_PHY_LED_CTRL 16 +#define MII_PHY_LED_CTRL 16 #define MII_88E1121_PHY_LED_DEF 0x0030 +#define MII_88E1510_PHY_LED_DEF 0x1177 +#define MII_88E1510_PHY_LED0_LINK_LED1_ACTIVE 0x1040 #define MII_M1011_PHY_STATUS 0x11 #define MII_M1011_PHY_STATUS_1000 0x8000 @@ -622,8 +624,43 @@ error: return err; } +static void marvell_config_led(struct phy_device *phydev) +{ + u16 def_config; + int err; + + switch (MARVELL_PHY_FAMILY_ID(phydev->phy_id)) { + /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */ + case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1121R): + case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1318S): + def_config = MII_88E1121_PHY_LED_DEF; + break; + /* Default PHY LED config: + * LED[0] .. 1000Mbps Link + * LED[1] .. 100Mbps Link + * LED[2] .. Blink, Activity + */ + case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1510): + if (phydev->dev_flags & MARVELL_PHY_LED0_LINK_LED1_ACTIVE) + def_config = MII_88E1510_PHY_LED0_LINK_LED1_ACTIVE; + else + def_config = MII_88E1510_PHY_LED_DEF; + break; + default: + return; + } + + err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, MII_PHY_LED_CTRL, + def_config); + if (err < 0) + pr_warn("Fail to config marvell phy LED.\n"); +} + static int marvell_config_init(struct phy_device *phydev) { + /* Set defalut LED */ + marvell_config_led(phydev); + /* Set registers from marvell,reg-init DT property */ return marvell_of_reg_init(phydev); } @@ -805,18 +842,19 @@ static int m88e1111_config_init(struct phy_device *phydev) return genphy_soft_reset(phydev); } -static int m88e1121_config_init(struct phy_device *phydev) +static int m88e1318_config_init(struct phy_device *phydev) { - int err; - - /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */ - err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, - MII_88E1121_PHY_LED_CTRL, - MII_88E1121_PHY_LED_DEF); - if (err < 0) - return err; + if (phy_interrupt_is_valid(phydev)) { + int err = phy_modify_paged( + phydev, MII_MARVELL_LED_PAGE, + MII_88E1318S_PHY_LED_TCR, + MII_88E1318S_PHY_LED_TCR_FORCE_INT, + MII_88E1318S_PHY_LED_TCR_INTn_ENABLE | + MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW); + if (err < 0) + return err; + } - /* Set marvell,reg-init configuration from device tree */ return marvell_config_init(phydev); } @@ -850,7 +888,7 @@ static int m88e1510_config_init(struct phy_device *phydev) return err; } - return m88e1121_config_init(phydev); + return m88e1318_config_init(phydev); } static int m88e1118_config_aneg(struct phy_device *phydev) @@ -1949,7 +1987,7 @@ static struct phy_driver marvell_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .probe = &m88e1121_probe, - .config_init = &m88e1121_config_init, + .config_init = &marvell_config_init, .config_aneg = &m88e1121_config_aneg, .read_status = &marvell_read_status, .ack_interrupt = &marvell_ack_interrupt, @@ -1970,7 +2008,7 @@ static struct phy_driver marvell_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, - .config_init = &m88e1121_config_init, + .config_init = &m88e1318_config_init, .config_aneg = &m88e1318_config_aneg, .read_status = &marvell_read_status, .ack_interrupt = &marvell_ack_interrupt, diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index cec736687d3a..a4b3cb3e0d80 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -154,6 +154,8 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj, struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; + uint32_t faddr; + struct active_regions active_regions = { }; if (!capable(CAP_SYS_ADMIN)) return 0; @@ -164,11 +166,21 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj, return -EAGAIN; } - if (IS_NOCACHE_VPD_TYPE(ha)) - ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, - ha->nvram_size); + if (!IS_NOCACHE_VPD_TYPE(ha)) { + mutex_unlock(&ha->optrom_mutex); + goto skip; + } + + faddr = ha->flt_region_nvram; + if (IS_QLA28XX(ha)) { + if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) + faddr = ha->flt_region_nvram_sec; + } + ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size); + mutex_unlock(&ha->optrom_mutex); +skip: return memory_read_from_buffer(buf, count, &off, ha->nvram, ha->nvram_size); } @@ -223,9 +235,9 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, } /* Write NVRAM. */ - ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count); - ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base, - count); + ha->isp_ops->write_nvram(vha, buf, ha->nvram_base, count); + ha->isp_ops->read_nvram(vha, ha->nvram, ha->nvram_base, + count); mutex_unlock(&ha->optrom_mutex); ql_dbg(ql_dbg_user, vha, 0x7060, @@ -427,7 +439,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, valid = 1; else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) - || IS_QLA27XX(ha)) + || IS_QLA27XX(ha) || IS_QLA28XX(ha)) valid = 1; if (!valid) { ql_log(ql_log_warn, vha, 0x7065, @@ -504,6 +516,7 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; uint32_t faddr; + struct active_regions active_regions = { }; if (unlikely(pci_channel_offline(ha->pdev))) return -EAGAIN; @@ -511,22 +524,33 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj, if (!capable(CAP_SYS_ADMIN)) return -EINVAL; - if (IS_NOCACHE_VPD_TYPE(ha)) { - faddr = ha->flt_region_vpd << 2; + if (IS_NOCACHE_VPD_TYPE(ha)) + goto skip; + + faddr = ha->flt_region_vpd << 2; - if (IS_QLA27XX(ha) && - qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE) + if (IS_QLA28XX(ha)) { + qla28xx_get_aux_images(vha, &active_regions); + if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) faddr = ha->flt_region_vpd_sec << 2; - mutex_lock(&ha->optrom_mutex); - if (qla2x00_chip_is_down(vha)) { - mutex_unlock(&ha->optrom_mutex); - return -EAGAIN; - } - ha->isp_ops->read_optrom(vha, ha->vpd, faddr, - ha->vpd_size); + ql_dbg(ql_dbg_init, vha, 0x7070, + "Loading %s nvram image.\n", + active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? + "primary" : "secondary"); + } + + mutex_lock(&ha->optrom_mutex); + if (qla2x00_chip_is_down(vha)) { mutex_unlock(&ha->optrom_mutex); + return -EAGAIN; } + + ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size); + mutex_unlock(&ha->optrom_mutex); + + ha->isp_ops->read_optrom(vha, ha->vpd, faddr, ha->vpd_size); +skip: return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size); } @@ -563,8 +587,8 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj, } /* Write NVRAM. */ - ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count); - ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count); + ha->isp_ops->write_nvram(vha, buf, ha->vpd_base, count); + ha->isp_ops->read_nvram(vha, ha->vpd, ha->vpd_base, count); /* Update flash version information for 4Gb & above. */ if (!IS_FWI2_CAPABLE(ha)) { @@ -682,7 +706,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, ql_log(ql_log_info, vha, 0x706f, "Issuing MPI reset.\n"); - if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { uint32_t idc_control; qla83xx_idc_lock(vha, 0); @@ -934,7 +958,7 @@ static struct bin_attribute sysfs_dcbx_tlv_attr = { static struct sysfs_entry { char *name; struct bin_attribute *attr; - int is4GBp_only; + int type; } bin_file_entries[] = { { "fw_dump", &sysfs_fw_dump_attr, }, { "nvram", &sysfs_nvram_attr, }, @@ -957,11 +981,11 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha) int ret; for (iter = bin_file_entries; iter->name; iter++) { - if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw)) + if (iter->type && !IS_FWI2_CAPABLE(vha->hw)) continue; - if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw)) + if (iter->type == 2 && !IS_QLA25XX(vha->hw)) continue; - if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw))) + if (iter->type == 3 && !(IS_CNA_CAPABLE(vha->hw))) continue; ret = sysfs_create_bin_file(&host->shost_gendev.kobj, @@ -985,13 +1009,14 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon) struct qla_hw_data *ha = vha->hw; for (iter = bin_file_entries; iter->name; iter++) { - if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha)) + if (iter->type && !IS_FWI2_CAPABLE(ha)) continue; - if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha)) + if (iter->type == 2 && !IS_QLA25XX(ha)) continue; - if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw))) + if (iter->type == 3 && !(IS_CNA_CAPABLE(ha))) continue; - if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw)) + if (iter->type == 0x27 && + (!IS_QLA27XX(ha) || !IS_QLA28XX(ha))) continue; sysfs_remove_bin_file(&host->shost_gendev.kobj, @@ -1336,7 +1361,8 @@ qla2x00_optrom_gold_fw_version_show(struct device *dev, scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; - if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA27XX(ha)) + if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n", @@ -1358,24 +1384,40 @@ qla24xx_84xx_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { int rval = QLA_SUCCESS; - uint16_t status[2] = {0, 0}; + uint16_t status[2] = { 0 }; scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; if (!IS_QLA84XX(ha)) return scnprintf(buf, PAGE_SIZE, "\n"); - if (ha->cs84xx->op_fw_version == 0) + if (!ha->cs84xx->op_fw_version) { rval = qla84xx_verify_chip(vha, status); - if ((rval == QLA_SUCCESS) && (status[0] == 0)) - return scnprintf(buf, PAGE_SIZE, "%u\n", - (uint32_t)ha->cs84xx->op_fw_version); + if (!rval && !status[0]) + return scnprintf(buf, PAGE_SIZE, "%u\n", + (uint32_t)ha->cs84xx->op_fw_version); + } return scnprintf(buf, PAGE_SIZE, "\n"); } static ssize_t +qla2x00_serdes_version_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", + ha->serdes_version[0], ha->serdes_version[1], + ha->serdes_version[2]); +} + +static ssize_t qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1383,7 +1425,7 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr, struct qla_hw_data *ha = vha->hw; if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha) && - !IS_QLA27XX(ha)) + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n", @@ -1596,7 +1638,7 @@ qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr, scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; - if (!IS_QLA27XX(ha)) + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", @@ -1604,35 +1646,38 @@ qla2x00_pep_version_show(struct device *dev, struct device_attribute *attr, } static ssize_t -qla2x00_min_link_speed_show(struct device *dev, struct device_attribute *attr, - char *buf) +qla2x00_min_supported_speed_show(struct device *dev, + struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; - if (!IS_QLA27XX(ha)) + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%s\n", - ha->min_link_speed == 5 ? "32Gps" : - ha->min_link_speed == 4 ? "16Gps" : - ha->min_link_speed == 3 ? "8Gps" : - ha->min_link_speed == 2 ? "4Gps" : - ha->min_link_speed != 0 ? "unknown" : ""); + ha->min_supported_speed == 6 ? "64Gps" : + ha->min_supported_speed == 5 ? "32Gps" : + ha->min_supported_speed == 4 ? "16Gps" : + ha->min_supported_speed == 3 ? "8Gps" : + ha->min_supported_speed == 2 ? "4Gps" : + ha->min_supported_speed != 0 ? "unknown" : ""); } static ssize_t -qla2x00_max_speed_sup_show(struct device *dev, struct device_attribute *attr, - char *buf) +qla2x00_max_supported_speed_show(struct device *dev, + struct device_attribute *attr, char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; - if (!IS_QLA27XX(ha)) + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return scnprintf(buf, PAGE_SIZE, "\n"); return scnprintf(buf, PAGE_SIZE, "%s\n", - ha->max_speed_sup ? "32Gps" : "16Gps"); + ha->max_supported_speed == 2 ? "64Gps" : + ha->max_supported_speed == 1 ? "32Gps" : + ha->max_supported_speed == 0 ? "16Gps" : "unknown"); } static ssize_t @@ -1645,7 +1690,7 @@ qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr, int mode = QLA_SET_DATA_RATE_LR; struct qla_hw_data *ha = vha->hw; - if (!IS_QLA27XX(vha->hw)) { + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) { ql_log(ql_log_warn, vha, 0x70d8, "Speed setting not supported \n"); return -EINVAL; @@ -2164,6 +2209,32 @@ qla2x00_dif_bundle_statistics_show(struct device *dev, ha->dif_bundle_dma_allocs, ha->pool.unusable.count); } +static ssize_t +qla2x00_fw_attr_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + return scnprintf(buf, PAGE_SIZE, "%llx\n", + (uint64_t)ha->fw_attributes_ext[1] << 48 | + (uint64_t)ha->fw_attributes_ext[0] << 32 | + (uint64_t)ha->fw_attributes_h << 16 | + (uint64_t)ha->fw_attributes); +} + +static ssize_t +qla2x00_port_no_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + return scnprintf(buf, PAGE_SIZE, "%u\n", vha->hw->port_no); +} + static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL); static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); @@ -2192,6 +2263,7 @@ static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show, NULL); static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show, NULL); +static DEVICE_ATTR(serdes_version, 0444, qla2x00_serdes_version_show, NULL); static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL); static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL); static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show, @@ -2209,8 +2281,10 @@ static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR, qla2x00_allow_cna_fw_dump_show, qla2x00_allow_cna_fw_dump_store); static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL); -static DEVICE_ATTR(min_link_speed, S_IRUGO, qla2x00_min_link_speed_show, NULL); -static DEVICE_ATTR(max_speed_sup, S_IRUGO, qla2x00_max_speed_sup_show, NULL); +static DEVICE_ATTR(min_supported_speed, 0444, + qla2x00_min_supported_speed_show, NULL); +static DEVICE_ATTR(max_supported_speed, 0444, + qla2x00_max_supported_speed_show, NULL); static DEVICE_ATTR(zio_threshold, 0644, qla_zio_threshold_show, qla_zio_threshold_store); @@ -2221,6 +2295,8 @@ static DEVICE_ATTR(dif_bundle_statistics, 0444, qla2x00_dif_bundle_statistics_show, NULL); static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show, qla2x00_port_speed_store); +static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL); +static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL); struct device_attribute *qla2x00_host_attrs[] = { @@ -2242,6 +2318,7 @@ struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_optrom_fw_version, &dev_attr_84xx_fw_version, &dev_attr_total_isp_aborts, + &dev_attr_serdes_version, &dev_attr_mpi_version, &dev_attr_phy_version, &dev_attr_flash_block_size, @@ -2256,11 +2333,13 @@ struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_fw_dump_size, &dev_attr_allow_cna_fw_dump, &dev_attr_pep_version, - &dev_attr_min_link_speed, - &dev_attr_max_speed_sup, + &dev_attr_min_supported_speed, + &dev_attr_max_supported_speed, &dev_attr_zio_threshold, &dev_attr_dif_bundle_statistics, &dev_attr_port_speed, + &dev_attr_port_no, + &dev_attr_fw_attr, NULL, /* reserve for qlini_mode */ NULL, /* reserve for ql2xiniexchg */ NULL, /* reserve for ql2xexchoffld */ @@ -2296,16 +2375,15 @@ qla2x00_get_host_port_id(struct Scsi_Host *shost) static void qla2x00_get_host_speed(struct Scsi_Host *shost) { - struct qla_hw_data *ha = ((struct scsi_qla_host *) - (shost_priv(shost)))->hw; - u32 speed = FC_PORTSPEED_UNKNOWN; + scsi_qla_host_t *vha = shost_priv(shost); + u32 speed; - if (IS_QLAFX00(ha)) { + if (IS_QLAFX00(vha->hw)) { qlafx00_get_host_speed(shost); return; } - switch (ha->link_data_rate) { + switch (vha->hw->link_data_rate) { case PORT_SPEED_1GB: speed = FC_PORTSPEED_1GBIT; break; @@ -2327,7 +2405,14 @@ qla2x00_get_host_speed(struct Scsi_Host *shost) case PORT_SPEED_32GB: speed = FC_PORTSPEED_32GBIT; break; + case PORT_SPEED_64GB: + speed = FC_PORTSPEED_64GBIT; + break; + default: + speed = FC_PORTSPEED_UNKNOWN; + break; } + fc_host_speed(shost) = speed; } @@ -2335,7 +2420,7 @@ static void qla2x00_get_host_port_type(struct Scsi_Host *shost) { scsi_qla_host_t *vha = shost_priv(shost); - uint32_t port_type = FC_PORTTYPE_UNKNOWN; + uint32_t port_type; if (vha->vp_idx) { fc_host_port_type(shost) = FC_PORTTYPE_NPIV; @@ -2354,7 +2439,11 @@ qla2x00_get_host_port_type(struct Scsi_Host *shost) case ISP_CFG_F: port_type = FC_PORTTYPE_NPORT; break; + default: + port_type = FC_PORTTYPE_UNKNOWN; + break; } + fc_host_port_type(shost) = port_type; } @@ -2416,13 +2505,10 @@ qla2x00_get_starget_port_id(struct scsi_target *starget) fc_starget_port_id(starget) = port_id; } -static void +static inline void qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) { - if (timeout) - rport->dev_loss_tmo = timeout; - else - rport->dev_loss_tmo = 1; + rport->dev_loss_tmo = timeout ? timeout : 1; } static void @@ -2977,7 +3063,7 @@ void qla2x00_init_host_attr(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; - u32 speed = FC_PORTSPEED_UNKNOWN; + u32 speeds = FC_PORTSPEED_UNKNOWN; fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); @@ -2988,25 +3074,45 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha) fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; if (IS_CNA_CAPABLE(ha)) - speed = FC_PORTSPEED_10GBIT; - else if (IS_QLA2031(ha)) - speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT | - FC_PORTSPEED_4GBIT; - else if (IS_QLA25XX(ha)) - speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | - FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; + speeds = FC_PORTSPEED_10GBIT; + else if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { + if (ha->max_supported_speed == 2) { + if (ha->min_supported_speed <= 6) + speeds |= FC_PORTSPEED_64GBIT; + } + if (ha->max_supported_speed == 2 || + ha->max_supported_speed == 1) { + if (ha->min_supported_speed <= 5) + speeds |= FC_PORTSPEED_32GBIT; + } + if (ha->max_supported_speed == 2 || + ha->max_supported_speed == 1 || + ha->max_supported_speed == 0) { + if (ha->min_supported_speed <= 4) + speeds |= FC_PORTSPEED_16GBIT; + } + if (ha->max_supported_speed == 1 || + ha->max_supported_speed == 0) { + if (ha->min_supported_speed <= 3) + speeds |= FC_PORTSPEED_8GBIT; + } + if (ha->max_supported_speed == 0) { + if (ha->min_supported_speed <= 2) + speeds |= FC_PORTSPEED_4GBIT; + } + } else if (IS_QLA2031(ha)) + speeds = FC_PORTSPEED_16GBIT|FC_PORTSPEED_8GBIT| + FC_PORTSPEED_4GBIT; + else if (IS_QLA25XX(ha) || IS_QLAFX00(ha)) + speeds = FC_PORTSPEED_8GBIT|FC_PORTSPEED_4GBIT| + FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT; else if (IS_QLA24XX_TYPE(ha)) - speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT | - FC_PORTSPEED_1GBIT; + speeds = FC_PORTSPEED_4GBIT|FC_PORTSPEED_2GBIT| + FC_PORTSPEED_1GBIT; else if (IS_QLA23XX(ha)) - speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; - else if (IS_QLAFX00(ha)) - speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | - FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; - else if (IS_QLA27XX(ha)) - speed = FC_PORTSPEED_32GBIT | FC_PORTSPEED_16GBIT | - FC_PORTSPEED_8GBIT; + speeds = FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT; else - speed = FC_PORTSPEED_1GBIT; - fc_host_supported_speeds(vha->host) = speed; + speeds = FC_PORTSPEED_1GBIT; + + fc_host_supported_speeds(vha->host) = speeds; } diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index ff294092b6a4..55d4223fe833 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -84,8 +84,7 @@ qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha, return 0; } - if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' || - bcode[3] != 'S') { + if (memcmp(bcode, "HQOS", 4)) { /* Invalid FCP priority data header*/ ql_dbg(ql_dbg_user, vha, 0x7052, "Invalid FCP Priority data header. bcode=0x%x.\n", @@ -1413,7 +1412,8 @@ qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha, start == (ha->flt_region_fw * 4)) valid = 1; else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || - IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) + IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) valid = 1; if (!valid) { ql_log(ql_log_warn, vha, 0x7058, @@ -1962,7 +1962,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job) /* Dump the vendor information */ ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf, - (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00)); + piocb_rqst, sizeof(*piocb_rqst)); if (!vha->flags.online) { ql_log(ql_log_warn, vha, 0x70d0, @@ -2158,7 +2158,7 @@ qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job) struct qla_hw_data *ha = vha->hw; struct qla_flash_update_caps cap; - if (!(IS_QLA27XX(ha))) + if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha)) return -EPERM; memset(&cap, 0, sizeof(cap)); @@ -2191,7 +2191,7 @@ qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job) uint64_t online_fw_attr = 0; struct qla_flash_update_caps cap; - if (!(IS_QLA27XX(ha))) + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return -EPERM; memset(&cap, 0, sizeof(cap)); @@ -2239,7 +2239,7 @@ qla27xx_get_bbcr_data(struct bsg_job *bsg_job) uint8_t domain, area, al_pa, state; int rval; - if (!(IS_QLA27XX(ha))) + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return -EPERM; memset(&bbcr, 0, sizeof(bbcr)); @@ -2324,8 +2324,8 @@ qla2x00_get_priv_stats(struct bsg_job *bsg_job) rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options); if (rval == QLA_SUCCESS) { - ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3, - (uint8_t *)stats, sizeof(*stats)); + ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e5, + stats, sizeof(*stats)); sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats)); } @@ -2354,7 +2354,8 @@ qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job) int rval; struct qla_dport_diag *dd; - if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw)) + if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && + !IS_QLA28XX(vha->hw)) return -EPERM; dd = kmalloc(sizeof(*dd), GFP_KERNEL); @@ -2389,6 +2390,45 @@ qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job) } static int +qla2x00_get_flash_image_status(struct bsg_job *bsg_job) +{ + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct qla_hw_data *ha = vha->hw; + struct qla_active_regions regions = { }; + struct active_regions active_regions = { }; + + qla28xx_get_aux_images(vha, &active_regions); + regions.global_image = active_regions.global; + + if (IS_QLA28XX(ha)) { + qla27xx_get_active_image(vha, &active_regions); + regions.board_config = active_regions.aux.board_config; + regions.vpd_nvram = active_regions.aux.vpd_nvram; + regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1; + regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3; + } + + ql_dbg(ql_dbg_user, vha, 0x70e1, + "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n", + __func__, vha->host_no, regions.global_image, + regions.board_config, regions.vpd_nvram, + regions.npiv_config_0_1, regions.npiv_config_2_3); + + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, ®ions, sizeof(regions)); + + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + bsg_reply->reply_payload_rcv_len = sizeof(regions); + bsg_reply->result = DID_OK << 16; + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return 0; +} + +static int qla2x00_process_vendor_specific(struct bsg_job *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; @@ -2461,6 +2501,9 @@ qla2x00_process_vendor_specific(struct bsg_job *bsg_job) case QL_VND_DPORT_DIAGNOSTICS: return qla2x00_do_dport_diagnostics(bsg_job); + case QL_VND_SS_GET_FLASH_IMAGE_STATUS: + return qla2x00_get_flash_image_status(bsg_job); + default: return -ENOSYS; } diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h index d97dfd521356..7594fad7b5b5 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.h +++ b/drivers/scsi/qla2xxx/qla_bsg.h @@ -31,6 +31,7 @@ #define QL_VND_GET_PRIV_STATS 0x18 #define QL_VND_DPORT_DIAGNOSTICS 0x19 #define QL_VND_GET_PRIV_STATS_EX 0x1A +#define QL_VND_SS_GET_FLASH_IMAGE_STATUS 0x1E /* BSG Vendor specific subcode returns */ #define EXT_STATUS_OK 0 @@ -279,4 +280,14 @@ struct qla_dport_diag { #define QLA_DPORT_RESULT 0x0 #define QLA_DPORT_START 0x2 +/* active images in flash */ +struct qla_active_regions { + uint8_t global_image; + uint8_t board_config; + uint8_t vpd_nvram; + uint8_t npiv_config_0_1; + uint8_t npiv_config_2_3; + uint8_t reserved[32]; +} __packed; + #endif diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index c7533fa7f46e..94da4b9927e9 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -111,30 +111,25 @@ int qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, uint32_t ram_dwords, void **nxt) { - int rval; - uint32_t cnt, stat, timer, dwords, idx; - uint16_t mb0; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; dma_addr_t dump_dma = ha->gid_list_dma; - uint32_t *dump = (uint32_t *)ha->gid_list; + uint32_t *chunk = (void *)ha->gid_list; + uint32_t dwords = qla2x00_gid_list_size(ha) / 4; + uint32_t stat; + ulong i, j, timer = 6000000; + int rval = QLA_FUNCTION_FAILED; - rval = QLA_SUCCESS; - mb0 = 0; - - WRT_REG_WORD(®->mailbox0, MBC_LOAD_DUMP_MPI_RAM); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + for (i = 0; i < ram_dwords; i += dwords, addr += dwords) { + if (i + dwords > ram_dwords) + dwords = ram_dwords - i; - dwords = qla2x00_gid_list_size(ha) / 4; - for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS; - cnt += dwords, addr += dwords) { - if (cnt + dwords > ram_dwords) - dwords = ram_dwords - cnt; - + WRT_REG_WORD(®->mailbox0, MBC_LOAD_DUMP_MPI_RAM); WRT_REG_WORD(®->mailbox1, LSW(addr)); WRT_REG_WORD(®->mailbox8, MSW(addr)); - WRT_REG_WORD(®->mailbox2, MSW(dump_dma)); - WRT_REG_WORD(®->mailbox3, LSW(dump_dma)); + WRT_REG_WORD(®->mailbox2, MSW(LSD(dump_dma))); + WRT_REG_WORD(®->mailbox3, LSW(LSD(dump_dma))); WRT_REG_WORD(®->mailbox6, MSW(MSD(dump_dma))); WRT_REG_WORD(®->mailbox7, LSW(MSD(dump_dma))); @@ -145,76 +140,76 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); ha->flags.mbox_int = 0; - for (timer = 6000000; timer; timer--) { - /* Check for pending interrupts. */ - stat = RD_REG_DWORD(®->host_status); - if (stat & HSRX_RISC_INT) { - stat &= 0xff; - - if (stat == 0x1 || stat == 0x2 || - stat == 0x10 || stat == 0x11) { - set_bit(MBX_INTERRUPT, - &ha->mbx_cmd_flags); + while (timer--) { + udelay(5); - mb0 = RD_REG_WORD(®->mailbox0); - RD_REG_WORD(®->mailbox1); + stat = RD_REG_DWORD(®->host_status); + /* Check for pending interrupts. */ + if (!(stat & HSRX_RISC_INT)) + continue; - WRT_REG_DWORD(®->hccr, - HCCRX_CLR_RISC_INT); - RD_REG_DWORD(®->hccr); - break; - } + stat &= 0xff; + if (stat != 0x1 && stat != 0x2 && + stat != 0x10 && stat != 0x11) { /* Clear this intr; it wasn't a mailbox intr */ WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD(®->hccr); + continue; } - udelay(5); + + set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + rval = RD_REG_WORD(®->mailbox0) & MBS_MASK; + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); + RD_REG_DWORD(®->hccr); + break; } ha->flags.mbox_int = 1; + *nxt = ram + i; - if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { - rval = mb0 & MBS_MASK; - for (idx = 0; idx < dwords; idx++) - ram[cnt + idx] = IS_QLA27XX(ha) ? - le32_to_cpu(dump[idx]) : swab32(dump[idx]); - } else { - rval = QLA_FUNCTION_FAILED; + if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { + /* no interrupt, timed out*/ + return rval; + } + if (rval) { + /* error completion status */ + return rval; + } + for (j = 0; j < dwords; j++) { + ram[i + j] = + (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? + chunk[j] : swab32(chunk[j]); } } - *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL; - return rval; + *nxt = ram + i; + return QLA_SUCCESS; } int qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, uint32_t ram_dwords, void **nxt) { - int rval; - uint32_t cnt, stat, timer, dwords, idx; - uint16_t mb0; + int rval = QLA_FUNCTION_FAILED; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; dma_addr_t dump_dma = ha->gid_list_dma; - uint32_t *dump = (uint32_t *)ha->gid_list; + uint32_t *chunk = (void *)ha->gid_list; + uint32_t dwords = qla2x00_gid_list_size(ha) / 4; + uint32_t stat; + ulong i, j, timer = 6000000; - rval = QLA_SUCCESS; - mb0 = 0; - - WRT_REG_WORD(®->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - dwords = qla2x00_gid_list_size(ha) / 4; - for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS; - cnt += dwords, addr += dwords) { - if (cnt + dwords > ram_dwords) - dwords = ram_dwords - cnt; + for (i = 0; i < ram_dwords; i += dwords, addr += dwords) { + if (i + dwords > ram_dwords) + dwords = ram_dwords - i; + WRT_REG_WORD(®->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED); WRT_REG_WORD(®->mailbox1, LSW(addr)); WRT_REG_WORD(®->mailbox8, MSW(addr)); - WRT_REG_WORD(®->mailbox2, MSW(dump_dma)); - WRT_REG_WORD(®->mailbox3, LSW(dump_dma)); + WRT_REG_WORD(®->mailbox2, MSW(LSD(dump_dma))); + WRT_REG_WORD(®->mailbox3, LSW(LSD(dump_dma))); WRT_REG_WORD(®->mailbox6, MSW(MSD(dump_dma))); WRT_REG_WORD(®->mailbox7, LSW(MSD(dump_dma))); @@ -223,45 +218,48 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); ha->flags.mbox_int = 0; - for (timer = 6000000; timer; timer--) { - /* Check for pending interrupts. */ + while (timer--) { + udelay(5); stat = RD_REG_DWORD(®->host_status); - if (stat & HSRX_RISC_INT) { - stat &= 0xff; - if (stat == 0x1 || stat == 0x2 || - stat == 0x10 || stat == 0x11) { - set_bit(MBX_INTERRUPT, - &ha->mbx_cmd_flags); - - mb0 = RD_REG_WORD(®->mailbox0); - - WRT_REG_DWORD(®->hccr, - HCCRX_CLR_RISC_INT); - RD_REG_DWORD(®->hccr); - break; - } + /* Check for pending interrupts. */ + if (!(stat & HSRX_RISC_INT)) + continue; - /* Clear this intr; it wasn't a mailbox intr */ + stat &= 0xff; + if (stat != 0x1 && stat != 0x2 && + stat != 0x10 && stat != 0x11) { WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); RD_REG_DWORD(®->hccr); + continue; } - udelay(5); + + set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + rval = RD_REG_WORD(®->mailbox0) & MBS_MASK; + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); + RD_REG_DWORD(®->hccr); + break; } ha->flags.mbox_int = 1; + *nxt = ram + i; - if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { - rval = mb0 & MBS_MASK; - for (idx = 0; idx < dwords; idx++) - ram[cnt + idx] = IS_QLA27XX(ha) ? - le32_to_cpu(dump[idx]) : swab32(dump[idx]); - } else { - rval = QLA_FUNCTION_FAILED; + if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { + /* no interrupt, timed out*/ + return rval; + } + if (rval) { + /* error completion status */ + return rval; + } + for (j = 0; j < dwords; j++) { + ram[i + j] = + (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? + chunk[j] : swab32(chunk[j]); } } - *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL; - return rval; + *nxt = ram + i; + return QLA_SUCCESS; } static int @@ -669,7 +667,8 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) struct qla2xxx_mq_chain *mq = ptr; device_reg_t *reg; - if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) + if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) return ptr; mq = ptr; @@ -2521,7 +2520,7 @@ qla83xx_fw_dump_failed: /****************************************************************************/ static inline int -ql_mask_match(uint32_t level) +ql_mask_match(uint level) { return (level & ql2xextended_error_logging) == level; } @@ -2540,7 +2539,7 @@ ql_mask_match(uint32_t level) * msg: The message to be displayed. */ void -ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...) +ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...) { va_list va; struct va_format vaf; @@ -2583,8 +2582,7 @@ ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...) * msg: The message to be displayed. */ void -ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, - const char *fmt, ...) +ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...) { va_list va; struct va_format vaf; @@ -2620,7 +2618,7 @@ ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, * msg: The message to be displayed. */ void -ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...) +ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...) { va_list va; struct va_format vaf; @@ -2678,8 +2676,7 @@ ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...) * msg: The message to be displayed. */ void -ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, - const char *fmt, ...) +ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...) { va_list va; struct va_format vaf; @@ -2719,7 +2716,7 @@ ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, } void -ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id) +ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id) { int i; struct qla_hw_data *ha = vha->hw; @@ -2741,13 +2738,12 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id) ql_dbg(level, vha, id, "Mailbox registers:\n"); for (i = 0; i < 6; i++, mbx_reg++) ql_dbg(level, vha, id, - "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg)); + "mbox[%d] %#04x\n", i, RD_REG_WORD(mbx_reg)); } void -ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id, - uint8_t *buf, uint size) +ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, void *buf, uint size) { uint cnt; diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index 8877aa97d829..bb01b680ce9f 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -318,20 +318,20 @@ struct qla2xxx_fw_dump { * as compared to other log levels. */ -extern int ql_errlev; +extern uint ql_errlev; void __attribute__((format (printf, 4, 5))) -ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...); +ql_dbg(uint, scsi_qla_host_t *vha, uint, const char *fmt, ...); void __attribute__((format (printf, 4, 5))) -ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...); +ql_dbg_pci(uint, struct pci_dev *pdev, uint, const char *fmt, ...); void __attribute__((format (printf, 4, 5))) ql_dbg_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...); void __attribute__((format (printf, 4, 5))) -ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...); +ql_log(uint, scsi_qla_host_t *vha, uint, const char *fmt, ...); void __attribute__((format (printf, 4, 5))) -ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...); +ql_log_pci(uint, struct pci_dev *pdev, uint, const char *fmt, ...); void __attribute__((format (printf, 4, 5))) ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...); diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index c0f7593666a1..22ec7c176c00 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -1033,6 +1033,7 @@ struct mbx_cmd_32 { #define MBC_GET_FIRMWARE_VERSION 8 /* Get firmware revision. */ #define MBC_LOAD_RISC_RAM 9 /* Load RAM command. */ #define MBC_DUMP_RISC_RAM 0xa /* Dump RAM command. */ +#define MBC_SECURE_FLASH_UPDATE 0xa /* Secure Flash Update(28xx) */ #define MBC_LOAD_RISC_RAM_EXTENDED 0xb /* Load RAM extended. */ #define MBC_DUMP_RISC_RAM_EXTENDED 0xc /* Dump RAM extended. */ #define MBC_WRITE_RAM_WORD_EXTENDED 0xd /* Write RAM word extended */ @@ -1203,6 +1204,10 @@ struct mbx_cmd_32 { #define QLA27XX_IMG_STATUS_VER_MAJOR 0x01 #define QLA27XX_IMG_STATUS_VER_MINOR 0x00 #define QLA27XX_IMG_STATUS_SIGN 0xFACEFADE +#define QLA28XX_IMG_STATUS_SIGN 0xFACEFADF +#define QLA28XX_IMG_STATUS_SIGN 0xFACEFADF +#define QLA28XX_AUX_IMG_STATUS_SIGN 0xFACEFAED +#define QLA27XX_DEFAULT_IMAGE 0 #define QLA27XX_PRIMARY_IMAGE 1 #define QLA27XX_SECONDARY_IMAGE 2 @@ -2672,6 +2677,7 @@ struct ct_fdmiv2_hba_attributes { #define FDMI_PORT_SPEED_8GB 0x10 #define FDMI_PORT_SPEED_16GB 0x20 #define FDMI_PORT_SPEED_32GB 0x40 +#define FDMI_PORT_SPEED_64GB 0x80 #define FDMI_PORT_SPEED_UNKNOWN 0x8000 #define FC_CLASS_2 0x04 @@ -3130,10 +3136,10 @@ struct rsp_que; struct isp_operations { int (*pci_config) (struct scsi_qla_host *); - void (*reset_chip) (struct scsi_qla_host *); + int (*reset_chip)(struct scsi_qla_host *); int (*chip_diag) (struct scsi_qla_host *); void (*config_rings) (struct scsi_qla_host *); - void (*reset_adapter) (struct scsi_qla_host *); + int (*reset_adapter)(struct scsi_qla_host *); int (*nvram_config) (struct scsi_qla_host *); void (*update_fw_options) (struct scsi_qla_host *); int (*load_risc) (struct scsi_qla_host *, uint32_t *); @@ -3159,9 +3165,9 @@ struct isp_operations { void *(*prep_ms_fdmi_iocb) (struct scsi_qla_host *, uint32_t, uint32_t); - uint8_t *(*read_nvram) (struct scsi_qla_host *, uint8_t *, + uint8_t *(*read_nvram)(struct scsi_qla_host *, void *, uint32_t, uint32_t); - int (*write_nvram) (struct scsi_qla_host *, uint8_t *, uint32_t, + int (*write_nvram)(struct scsi_qla_host *, void *, uint32_t, uint32_t); void (*fw_dump) (struct scsi_qla_host *, int); @@ -3170,9 +3176,9 @@ struct isp_operations { int (*beacon_off) (struct scsi_qla_host *); void (*beacon_blink) (struct scsi_qla_host *); - uint8_t * (*read_optrom) (struct scsi_qla_host *, uint8_t *, + void *(*read_optrom)(struct scsi_qla_host *, void *, uint32_t, uint32_t); - int (*write_optrom) (struct scsi_qla_host *, uint8_t *, uint32_t, + int (*write_optrom)(struct scsi_qla_host *, void *, uint32_t, uint32_t); int (*get_flash_version) (struct scsi_qla_host *, void *); @@ -3368,7 +3374,8 @@ struct qla_tc_param { #define QLA_MQ_SIZE 32 #define QLA_MAX_QUEUES 256 #define ISP_QUE_REG(ha, id) \ - ((ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? \ + ((ha->mqenable || IS_QLA83XX(ha) || \ + IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? \ ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\ ((void __iomem *)ha->iobase)) #define QLA_REQ_QUE_ID(tag) \ @@ -3621,6 +3628,8 @@ struct qla_hw_data { uint32_t rida_fmt2:1; uint32_t purge_mbox:1; uint32_t n2n_bigger:1; + uint32_t secure_adapter:1; + uint32_t secure_fw:1; } flags; uint16_t max_exchg; @@ -3703,6 +3712,7 @@ struct qla_hw_data { #define PORT_SPEED_8GB 0x04 #define PORT_SPEED_16GB 0x05 #define PORT_SPEED_32GB 0x06 +#define PORT_SPEED_64GB 0x07 #define PORT_SPEED_10GB 0x13 uint16_t link_data_rate; /* F/W operating speed */ uint16_t set_data_rate; /* Set by user */ @@ -3729,6 +3739,11 @@ struct qla_hw_data { #define PCI_DEVICE_ID_QLOGIC_ISP2071 0x2071 #define PCI_DEVICE_ID_QLOGIC_ISP2271 0x2271 #define PCI_DEVICE_ID_QLOGIC_ISP2261 0x2261 +#define PCI_DEVICE_ID_QLOGIC_ISP2061 0x2061 +#define PCI_DEVICE_ID_QLOGIC_ISP2081 0x2081 +#define PCI_DEVICE_ID_QLOGIC_ISP2089 0x2089 +#define PCI_DEVICE_ID_QLOGIC_ISP2281 0x2281 +#define PCI_DEVICE_ID_QLOGIC_ISP2289 0x2289 uint32_t isp_type; #define DT_ISP2100 BIT_0 @@ -3753,7 +3768,12 @@ struct qla_hw_data { #define DT_ISP2071 BIT_19 #define DT_ISP2271 BIT_20 #define DT_ISP2261 BIT_21 -#define DT_ISP_LAST (DT_ISP2261 << 1) +#define DT_ISP2061 BIT_22 +#define DT_ISP2081 BIT_23 +#define DT_ISP2089 BIT_24 +#define DT_ISP2281 BIT_25 +#define DT_ISP2289 BIT_26 +#define DT_ISP_LAST (DT_ISP2289 << 1) uint32_t device_type; #define DT_T10_PI BIT_25 @@ -3788,6 +3808,8 @@ struct qla_hw_data { #define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071) #define IS_QLA2271(ha) (DT_MASK(ha) & DT_ISP2271) #define IS_QLA2261(ha) (DT_MASK(ha) & DT_ISP2261) +#define IS_QLA2081(ha) (DT_MASK(ha) & DT_ISP2081) +#define IS_QLA2281(ha) (DT_MASK(ha) & DT_ISP2281) #define IS_QLA23XX(ha) (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \ IS_QLA6312(ha) || IS_QLA6322(ha)) @@ -3797,6 +3819,7 @@ struct qla_hw_data { #define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha)) #define IS_QLA84XX(ha) (IS_QLA8432(ha)) #define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha) || IS_QLA2261(ha)) +#define IS_QLA28XX(ha) (IS_QLA2081(ha) || IS_QLA2281(ha)) #define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ IS_QLA84XX(ha)) #define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \ @@ -3805,14 +3828,15 @@ struct qla_hw_data { #define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \ IS_QLA25XX(ha) || IS_QLA81XX(ha) || \ IS_QLA82XX(ha) || IS_QLA83XX(ha) || \ - IS_QLA8044(ha) || IS_QLA27XX(ha)) + IS_QLA8044(ha) || IS_QLA27XX(ha) || \ + IS_QLA28XX(ha)) #define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \ - IS_QLA27XX(ha)) + IS_QLA27XX(ha) || IS_QLA28XX(ha)) #define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled) #define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \ - IS_QLA27XX(ha)) + IS_QLA27XX(ha) || IS_QLA28XX(ha)) #define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha) || \ - IS_QLA27XX(ha)) + IS_QLA27XX(ha) || IS_QLA28XX(ha)) #define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha)) #define IS_T10_PI_CAPABLE(ha) ((ha)->device_type & DT_T10_PI) @@ -3823,28 +3847,34 @@ struct qla_hw_data { #define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS) #define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED) #define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha) || \ - IS_QLA27XX(ha)) -#define IS_BIDI_CAPABLE(ha) ((IS_QLA25XX(ha) || IS_QLA2031(ha))) + IS_QLA27XX(ha) || IS_QLA28XX(ha)) +#define IS_BIDI_CAPABLE(ha) \ + (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) /* Bit 21 of fw_attributes decides the MCTP capabilities */ #define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \ ((ha)->fw_attributes_ext[0] & BIT_0)) #define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) #define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) #define IS_PI_DIFB_DIX0_CAPABLE(ha) (0) -#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) +#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \ + IS_QLA28XX(ha)) #define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \ (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22)) -#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) +#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \ + IS_QLA28XX(ha)) #define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length) -#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha)) -#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) -#define IS_FAWWN_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) +#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha) || IS_QLA28XX(ha)) +#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \ + IS_QLA28XX(ha)) +#define IS_FAWWN_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \ + IS_QLA28XX(ha)) #define IS_EXCHG_OFFLD_CAPABLE(ha) \ - (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) + (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) #define IS_EXLOGIN_OFFLD_CAPABLE(ha) \ - (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) + (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || \ + IS_QLA27XX(ha) || IS_QLA28XX(ha)) #define USE_ASYNC_SCAN(ha) (IS_QLA25XX(ha) || IS_QLA81XX(ha) ||\ - IS_QLA83XX(ha) || IS_QLA27XX(ha)) + IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) /* HBA serial number */ uint8_t serial0; @@ -3888,6 +3918,9 @@ struct qla_hw_data { void *sfp_data; dma_addr_t sfp_data_dma; + void *flt; + dma_addr_t flt_dma; + #define XGMAC_DATA_SIZE 4096 void *xgmac_data; dma_addr_t xgmac_data_dma; @@ -3999,18 +4032,22 @@ struct qla_hw_data { uint8_t fw_seriallink_options[4]; uint16_t fw_seriallink_options24[4]; + uint8_t serdes_version[3]; uint8_t mpi_version[3]; uint32_t mpi_capabilities; uint8_t phy_version[3]; uint8_t pep_version[3]; /* Firmware dump template */ - void *fw_dump_template; - uint32_t fw_dump_template_len; - /* Firmware dump information. */ + struct fwdt { + void *template; + ulong length; + ulong dump_size; + } fwdt[2]; struct qla2xxx_fw_dump *fw_dump; uint32_t fw_dump_len; - int fw_dumped; + bool fw_dumped; + bool fw_dump_mpi; unsigned long fw_dump_cap_flags; #define RISC_PAUSE_CMPL 0 #define DMA_SHUTDOWN_CMPL 1 @@ -4049,7 +4086,6 @@ struct qla_hw_data { uint16_t product_id[4]; uint8_t model_number[16+1]; -#define BINZERO "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0" char model_desc[80]; uint8_t adapter_id[16+1]; @@ -4089,22 +4125,28 @@ struct qla_hw_data { uint32_t fdt_protect_sec_cmd; uint32_t fdt_wrt_sts_reg_cmd; - uint32_t flt_region_flt; - uint32_t flt_region_fdt; - uint32_t flt_region_boot; - uint32_t flt_region_boot_sec; - uint32_t flt_region_fw; - uint32_t flt_region_fw_sec; - uint32_t flt_region_vpd_nvram; - uint32_t flt_region_vpd; - uint32_t flt_region_vpd_sec; - uint32_t flt_region_nvram; - uint32_t flt_region_npiv_conf; - uint32_t flt_region_gold_fw; - uint32_t flt_region_fcp_prio; - uint32_t flt_region_bootload; - uint32_t flt_region_img_status_pri; - uint32_t flt_region_img_status_sec; + struct { + uint32_t flt_region_flt; + uint32_t flt_region_fdt; + uint32_t flt_region_boot; + uint32_t flt_region_boot_sec; + uint32_t flt_region_fw; + uint32_t flt_region_fw_sec; + uint32_t flt_region_vpd_nvram; + uint32_t flt_region_vpd_nvram_sec; + uint32_t flt_region_vpd; + uint32_t flt_region_vpd_sec; + uint32_t flt_region_nvram; + uint32_t flt_region_nvram_sec; + uint32_t flt_region_npiv_conf; + uint32_t flt_region_gold_fw; + uint32_t flt_region_fcp_prio; + uint32_t flt_region_bootload; + uint32_t flt_region_img_status_pri; + uint32_t flt_region_img_status_sec; + uint32_t flt_region_aux_img_status_pri; + uint32_t flt_region_aux_img_status_sec; + }; uint8_t active_image; /* Needed for BEACON */ @@ -4197,8 +4239,8 @@ struct qla_hw_data { struct qlt_hw_data tgt; int allow_cna_fw_dump; uint32_t fw_ability_mask; - uint16_t min_link_speed; - uint16_t max_speed_sup; + uint16_t min_supported_speed; + uint16_t max_supported_speed; /* DMA pool for the DIF bundling buffers */ struct dma_pool *dif_bundl_pool; @@ -4225,9 +4267,20 @@ struct qla_hw_data { atomic_t zio_threshold; uint16_t last_zio_threshold; + #define DEFAULT_ZIO_THRESHOLD 5 }; +struct active_regions { + uint8_t global; + struct { + uint8_t board_config; + uint8_t vpd_nvram; + uint8_t npiv_config_0_1; + uint8_t npiv_config_2_3; + } aux; +}; + #define FW_ABILITY_MAX_SPEED_MASK 0xFUL #define FW_ABILITY_MAX_SPEED_16G 0x0 #define FW_ABILITY_MAX_SPEED_32G 0x1 @@ -4315,6 +4368,7 @@ typedef struct scsi_qla_host { #define N2N_LOGIN_NEEDED 30 #define IOCB_WORK_ACTIVE 31 #define SET_ZIO_THRESHOLD_NEEDED 32 +#define ISP_ABORT_TO_ROM 33 unsigned long pci_flags; #define PFLG_DISCONNECTED 0 /* PCI device removed */ @@ -4429,7 +4483,7 @@ typedef struct scsi_qla_host { int fcport_count; wait_queue_head_t fcport_waitQ; wait_queue_head_t vref_waitq; - uint8_t min_link_speed_feat; + uint8_t min_supported_speed; uint8_t n2n_node_name[WWN_SIZE]; uint8_t n2n_port_name[WWN_SIZE]; uint16_t n2n_id; @@ -4439,14 +4493,21 @@ typedef struct scsi_qla_host { struct qla27xx_image_status { uint8_t image_status_mask; - uint16_t generation_number; - uint8_t reserved[3]; - uint8_t ver_minor; + uint16_t generation; uint8_t ver_major; + uint8_t ver_minor; + uint8_t bitmap; /* 28xx only */ + uint8_t reserved[2]; uint32_t checksum; uint32_t signature; } __packed; +/* 28xx aux image status bimap values */ +#define QLA28XX_AUX_IMG_BOARD_CONFIG BIT_0 +#define QLA28XX_AUX_IMG_VPD_NVRAM BIT_1 +#define QLA28XX_AUX_IMG_NPIV_CONFIG_0_1 BIT_2 +#define QLA28XX_AUX_IMG_NPIV_CONFIG_2_3 BIT_3 + #define SET_VP_IDX 1 #define SET_AL_PA 2 #define RESET_VP_IDX 3 @@ -4493,6 +4554,24 @@ struct qla2_sgx { } \ } + +#define SFUB_CHECKSUM_SIZE 4 + +struct secure_flash_update_block { + uint32_t block_info; + uint32_t signature_lo; + uint32_t signature_hi; + uint32_t signature_upper[0x3e]; +}; + +struct secure_flash_update_block_pk { + uint32_t block_info; + uint32_t signature_lo; + uint32_t signature_hi; + uint32_t signature_upper[0x3e]; + uint32_t public_key[0x41]; +}; + /* * Macros to help code, maintain, etc. */ @@ -4593,6 +4672,7 @@ struct qla2_sgx { #define OPTROM_SIZE_81XX 0x400000 #define OPTROM_SIZE_82XX 0x800000 #define OPTROM_SIZE_83XX 0x1000000 +#define OPTROM_SIZE_28XX 0x2000000 #define OPTROM_BURST_SIZE 0x1000 #define OPTROM_BURST_DWORDS (OPTROM_BURST_SIZE / 4) @@ -4689,10 +4769,13 @@ struct sff_8247_a0 { #define AUTO_DETECT_SFP_SUPPORT(_vha)\ (ql2xautodetectsfp && !_vha->vp_idx && \ (IS_QLA25XX(_vha->hw) || IS_QLA81XX(_vha->hw) ||\ - IS_QLA83XX(_vha->hw) || IS_QLA27XX(_vha->hw))) + IS_QLA83XX(_vha->hw) || IS_QLA27XX(_vha->hw) || \ + IS_QLA28XX(_vha->hw))) + +#define FLASH_SEMAPHORE_REGISTER_ADDR 0x00101016 #define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \ - (IS_QLA27XX(_ha) || IS_QLA83XX(_ha))) + (IS_QLA27XX(_ha) || IS_QLA28XX(_ha) || IS_QLA83XX(_ha))) #define SAVE_TOPO(_ha) { \ if (_ha->current_topology) \ @@ -4708,4 +4791,5 @@ struct sff_8247_a0 { #include "qla_gbl.h" #include "qla_dbg.h" #include "qla_inline.h" + #endif diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index d647760d4804..18dd8a640b7c 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -386,7 +386,7 @@ qla_dfs_naqp_write(struct file *file, const char __user *buffer, int rc = 0; unsigned long num_act_qp; - if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha))) { + if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))) { pr_err("host%ld: this adapter does not support Multi Q.", vha->host_no); return -EINVAL; @@ -438,7 +438,7 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && - !IS_QLA27XX(ha)) + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) goto out; if (!ha->fce) goto out; @@ -448,11 +448,6 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha) atomic_set(&qla2x00_dfs_root_count, 0); qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL); - if (!qla2x00_dfs_root) { - ql_log(ql_log_warn, vha, 0x00f7, - "Unable to create debugfs root directory.\n"); - goto out; - } create_dir: if (ha->dfs_dir) @@ -460,64 +455,28 @@ create_dir: mutex_init(&ha->fce_mutex); ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root); - if (!ha->dfs_dir) { - ql_log(ql_log_warn, vha, 0x00f8, - "Unable to create debugfs ha directory.\n"); - goto out; - } atomic_inc(&qla2x00_dfs_root_count); create_nodes: ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count", S_IRUSR, ha->dfs_dir, vha, &dfs_fw_resource_cnt_ops); - if (!ha->dfs_fw_resource_cnt) { - ql_log(ql_log_warn, vha, 0x00fd, - "Unable to create debugFS fw_resource_count node.\n"); - goto out; - } ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_counters_ops); - if (!ha->dfs_tgt_counters) { - ql_log(ql_log_warn, vha, 0xd301, - "Unable to create debugFS tgt_counters node.\n"); - goto out; - } ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database", S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops); - if (!ha->tgt.dfs_tgt_port_database) { - ql_log(ql_log_warn, vha, 0xd03f, - "Unable to create debugFS tgt_port_database node.\n"); - goto out; - } ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, &dfs_fce_ops); - if (!ha->dfs_fce) { - ql_log(ql_log_warn, vha, 0x00f9, - "Unable to create debugfs fce node.\n"); - goto out; - } ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess", S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops); - if (!ha->tgt.dfs_tgt_sess) { - ql_log(ql_log_warn, vha, 0xd040, - "Unable to create debugFS tgt_sess node.\n"); - goto out; - } - if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) { + if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) ha->tgt.dfs_naqp = debugfs_create_file("naqp", 0400, ha->dfs_dir, vha, &dfs_naqp_ops); - if (!ha->tgt.dfs_naqp) { - ql_log(ql_log_warn, vha, 0xd011, - "Unable to create debugFS naqp node.\n"); - goto out; - } - } out: return 0; } diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 50c1e6c62e31..d53cd7875a85 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -1515,13 +1515,31 @@ struct qla_flt_header { #define FLT_REG_VPD_SEC_27XX_2 0xD8 #define FLT_REG_VPD_SEC_27XX_3 0xDA +/* 28xx */ +#define FLT_REG_AUX_IMG_PRI_28XX 0x125 +#define FLT_REG_AUX_IMG_SEC_28XX 0x126 +#define FLT_REG_VPD_SEC_28XX_0 0x10C +#define FLT_REG_VPD_SEC_28XX_1 0x10E +#define FLT_REG_VPD_SEC_28XX_2 0x110 +#define FLT_REG_VPD_SEC_28XX_3 0x112 +#define FLT_REG_NVRAM_SEC_28XX_0 0x10D +#define FLT_REG_NVRAM_SEC_28XX_1 0x10F +#define FLT_REG_NVRAM_SEC_28XX_2 0x111 +#define FLT_REG_NVRAM_SEC_28XX_3 0x113 + struct qla_flt_region { - uint32_t code; + uint16_t code; + uint8_t attribute; + uint8_t reserved; uint32_t size; uint32_t start; uint32_t end; }; +#define FLT_REGION_SIZE 16 +#define FLT_MAX_REGIONS 0xFF +#define FLT_REGIONS_SIZE (FLT_REGION_SIZE * FLT_MAX_REGIONS) + /* Flash NPIV Configuration Table ********************************************/ struct qla_npiv_header { @@ -1711,6 +1729,10 @@ struct access_chip_rsp_84xx { #define LR_DIST_FW_SHIFT (LR_DIST_FW_POS - LR_DIST_NV_POS) #define LR_DIST_FW_FIELD(x) ((x) << LR_DIST_FW_SHIFT & 0xf000) +/* FAC semaphore defines */ +#define FAC_SEMAPHORE_UNLOCK 0 +#define FAC_SEMAPHORE_LOCK 1 + struct nvram_81xx { /* NVRAM header. */ uint8_t id[4]; @@ -1757,7 +1779,7 @@ struct nvram_81xx { uint16_t reserved_6_3[14]; /* Offset 192. */ - uint8_t min_link_speed; + uint8_t min_supported_speed; uint8_t reserved_7_0; uint16_t reserved_7[31]; @@ -2005,6 +2027,8 @@ struct ex_init_cb_81xx { #define FARX_ACCESS_FLASH_CONF_81XX 0x7FFD0000 #define FARX_ACCESS_FLASH_DATA_81XX 0x7F800000 +#define FARX_ACCESS_FLASH_CONF_28XX 0x7FFD0000 +#define FARX_ACCESS_FLASH_DATA_28XX 0x7F7D0000 /* FCP priority config defines *************************************/ /* operations */ @@ -2079,6 +2103,7 @@ struct qla_fcp_prio_cfg { #define FA_NPIV_CONF1_ADDR_81 0xD2000 /* 83XX Flash locations -- occupies second 8MB region. */ -#define FA_FLASH_LAYOUT_ADDR_83 0xFC400 +#define FA_FLASH_LAYOUT_ADDR_83 (0x3F1000/4) +#define FA_FLASH_LAYOUT_ADDR_28 (0x11000/4) #endif diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index ba31488d76d2..858e26bab17f 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -18,14 +18,14 @@ extern int qla2100_pci_config(struct scsi_qla_host *); extern int qla2300_pci_config(struct scsi_qla_host *); extern int qla24xx_pci_config(scsi_qla_host_t *); extern int qla25xx_pci_config(scsi_qla_host_t *); -extern void qla2x00_reset_chip(struct scsi_qla_host *); -extern void qla24xx_reset_chip(struct scsi_qla_host *); +extern int qla2x00_reset_chip(struct scsi_qla_host *); +extern int qla24xx_reset_chip(struct scsi_qla_host *); extern int qla2x00_chip_diag(struct scsi_qla_host *); extern int qla24xx_chip_diag(struct scsi_qla_host *); extern void qla2x00_config_rings(struct scsi_qla_host *); extern void qla24xx_config_rings(struct scsi_qla_host *); -extern void qla2x00_reset_adapter(struct scsi_qla_host *); -extern void qla24xx_reset_adapter(struct scsi_qla_host *); +extern int qla2x00_reset_adapter(struct scsi_qla_host *); +extern int qla24xx_reset_adapter(struct scsi_qla_host *); extern int qla2x00_nvram_config(struct scsi_qla_host *); extern int qla24xx_nvram_config(struct scsi_qla_host *); extern int qla81xx_nvram_config(struct scsi_qla_host *); @@ -93,7 +93,6 @@ extern int qla2xxx_mctp_dump(scsi_qla_host_t *); extern int qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *); extern int qla2x00_init_rings(scsi_qla_host_t *); -extern uint8_t qla27xx_find_valid_image(struct scsi_qla_host *); extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *, int, int, bool); extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *); @@ -108,6 +107,11 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *); int qla24xx_detect_sfp(scsi_qla_host_t *vha); int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8); +extern void qla28xx_get_aux_images(struct scsi_qla_host *, + struct active_regions *); +extern void qla27xx_get_active_image(struct scsi_qla_host *, + struct active_regions *); + void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *, uint16_t *); extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *, @@ -118,6 +122,7 @@ int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport); void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport); int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *); void qla_rscn_replay(fc_port_t *fcport); +extern bool qla24xx_risc_firmware_invalid(uint32_t *); /* * Global Data in qla_os.c source file. @@ -466,6 +471,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *, int); extern int qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t); +extern int qla81xx_fac_semaphore_access(scsi_qla_host_t *, int); + extern int qla2x00_get_xgmac_stats(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t *); @@ -511,6 +518,14 @@ extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *); extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t); int qla24xx_res_count_wait(struct scsi_qla_host *, uint16_t *, int); +extern int qla28xx_secure_flash_update(scsi_qla_host_t *, uint16_t, uint16_t, + uint32_t, dma_addr_t, uint32_t); + +extern int qla2xxx_read_remote_register(scsi_qla_host_t *, uint32_t, + uint32_t *); +extern int qla2xxx_write_remote_register(scsi_qla_host_t *, uint32_t, + uint32_t); + /* * Global Function Prototypes in qla_isr.c source file. */ @@ -542,19 +557,20 @@ fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8); */ extern void qla2x00_release_nvram_protection(scsi_qla_host_t *); extern uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *, - uint32_t, uint32_t); -extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, - uint32_t); -extern uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, - uint32_t); -extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, - uint32_t); -extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, - uint32_t); -extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, - uint32_t); -extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t, - uint32_t); + uint32_t, uint32_t); +extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, void *, uint32_t, + uint32_t); +extern uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *, void *, uint32_t, + uint32_t); +extern int qla2x00_write_nvram_data(scsi_qla_host_t *, void *, uint32_t, + uint32_t); +extern int qla24xx_write_nvram_data(scsi_qla_host_t *, void *, uint32_t, + uint32_t); +extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, void *, uint32_t, + uint32_t); +extern int qla25xx_write_nvram_data(scsi_qla_host_t *, void *, uint32_t, + uint32_t); + extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t); bool qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *, uint32_t); bool qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *, uint16_t); @@ -574,18 +590,18 @@ extern int qla83xx_restart_nic_firmware(scsi_qla_host_t *); extern int qla83xx_access_control(scsi_qla_host_t *, uint16_t, uint32_t, uint32_t, uint16_t *); -extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *, +extern void *qla2x00_read_optrom_data(struct scsi_qla_host *, void *, uint32_t, uint32_t); -extern int qla2x00_write_optrom_data(struct scsi_qla_host *, uint8_t *, +extern int qla2x00_write_optrom_data(struct scsi_qla_host *, void *, uint32_t, uint32_t); -extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *, +extern void *qla24xx_read_optrom_data(struct scsi_qla_host *, void *, uint32_t, uint32_t); -extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *, +extern int qla24xx_write_optrom_data(struct scsi_qla_host *, void *, uint32_t, uint32_t); -extern uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *, uint8_t *, +extern void *qla25xx_read_optrom_data(struct scsi_qla_host *, void *, uint32_t, uint32_t); -extern uint8_t *qla8044_read_optrom_data(struct scsi_qla_host *, - uint8_t *, uint32_t, uint32_t); +extern void *qla8044_read_optrom_data(struct scsi_qla_host *, + void *, uint32_t, uint32_t); extern void qla8044_watchdog(struct scsi_qla_host *vha); extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *); @@ -610,20 +626,13 @@ extern void qla82xx_fw_dump(scsi_qla_host_t *, int); extern void qla8044_fw_dump(scsi_qla_host_t *, int); extern void qla27xx_fwdump(scsi_qla_host_t *, int); -extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *); +extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *, void *); extern int qla27xx_fwdt_template_valid(void *); extern ulong qla27xx_fwdt_template_size(void *); -extern const void *qla27xx_fwdt_template_default(void); -extern ulong qla27xx_fwdt_template_default_size(void); - -extern void qla2x00_dump_regs(scsi_qla_host_t *); -extern void qla2x00_dump_buffer(uint8_t *, uint32_t); -extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t); -extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t); -extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t, - uint8_t *, uint32_t); -extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int); +extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int); +extern void ql_dump_regs(uint, scsi_qla_host_t *, uint); +extern void ql_dump_buffer(uint, scsi_qla_host_t *, uint, void *, uint); /* * Global Function Prototypes in qla_gs.c source file. */ @@ -722,7 +731,7 @@ extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); /* qlafx00 related functions */ extern int qlafx00_pci_config(struct scsi_qla_host *); extern int qlafx00_initialize_adapter(struct scsi_qla_host *); -extern void qlafx00_soft_reset(scsi_qla_host_t *); +extern int qlafx00_soft_reset(scsi_qla_host_t *); extern int qlafx00_chip_diag(scsi_qla_host_t *); extern void qlafx00_config_rings(struct scsi_qla_host *); extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *); @@ -765,16 +774,16 @@ extern int qla82xx_pci_region_offset(struct pci_dev *, int); extern int qla82xx_iospace_config(struct qla_hw_data *); /* Initialization related functions */ -extern void qla82xx_reset_chip(struct scsi_qla_host *); +extern int qla82xx_reset_chip(struct scsi_qla_host *); extern void qla82xx_config_rings(struct scsi_qla_host *); extern void qla82xx_watchdog(scsi_qla_host_t *); extern int qla82xx_start_firmware(scsi_qla_host_t *); /* Firmware and flash related functions */ extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *); -extern uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *, uint8_t *, +extern void *qla82xx_read_optrom_data(struct scsi_qla_host *, void *, uint32_t, uint32_t); -extern int qla82xx_write_optrom_data(struct scsi_qla_host *, uint8_t *, +extern int qla82xx_write_optrom_data(struct scsi_qla_host *, void *, uint32_t, uint32_t); /* Mailbox related functions */ @@ -870,7 +879,7 @@ extern void qla8044_clear_drv_active(struct qla_hw_data *); void qla8044_get_minidump(struct scsi_qla_host *vha); int qla8044_collect_md_data(struct scsi_qla_host *vha); extern int qla8044_md_get_template(scsi_qla_host_t *); -extern int qla8044_write_optrom_data(struct scsi_qla_host *, uint8_t *, +extern int qla8044_write_optrom_data(struct scsi_qla_host *, void *, uint32_t, uint32_t); extern irqreturn_t qla8044_intr_handler(int, void *); extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t); diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 4721b318d71d..5e28f546e50f 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -152,8 +152,8 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt, vha->d_id.b.area, vha->d_id.b.al_pa, comp_status, ct_rsp->header.response); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, - 0x2078, (uint8_t *)&ct_rsp->header, - sizeof(struct ct_rsp_hdr)); + 0x2078, ct_rsp, + offsetof(typeof(*ct_rsp), rsp)); rval = QLA_INVALID_COMMAND; } else rval = QLA_SUCCESS; @@ -1794,7 +1794,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha) if (IS_CNA_CAPABLE(ha)) eiter->a.sup_speed = cpu_to_be32( FDMI_PORT_SPEED_10GB); - else if (IS_QLA27XX(ha)) + else if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) eiter->a.sup_speed = cpu_to_be32( FDMI_PORT_SPEED_32GB| FDMI_PORT_SPEED_16GB| @@ -2373,7 +2373,7 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha) if (IS_CNA_CAPABLE(ha)) eiter->a.sup_speed = cpu_to_be32( FDMI_PORT_SPEED_10GB); - else if (IS_QLA27XX(ha)) + else if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) eiter->a.sup_speed = cpu_to_be32( FDMI_PORT_SPEED_32GB| FDMI_PORT_SPEED_16GB| @@ -2783,6 +2783,31 @@ qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd, return &p->p.req; } +static uint16_t +qla2x00_port_speed_capability(uint16_t speed) +{ + switch (speed) { + case BIT_15: + return PORT_SPEED_1GB; + case BIT_14: + return PORT_SPEED_2GB; + case BIT_13: + return PORT_SPEED_4GB; + case BIT_12: + return PORT_SPEED_10GB; + case BIT_11: + return PORT_SPEED_8GB; + case BIT_10: + return PORT_SPEED_16GB; + case BIT_8: + return PORT_SPEED_32GB; + case BIT_7: + return PORT_SPEED_64GB; + default: + return PORT_SPEED_UNKNOWN; + } +} + /** * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query. * @vha: HA context @@ -2855,31 +2880,8 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list) } rval = QLA_FUNCTION_FAILED; } else { - /* Save port-speed */ - switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) { - case BIT_15: - list[i].fp_speed = PORT_SPEED_1GB; - break; - case BIT_14: - list[i].fp_speed = PORT_SPEED_2GB; - break; - case BIT_13: - list[i].fp_speed = PORT_SPEED_4GB; - break; - case BIT_12: - list[i].fp_speed = PORT_SPEED_10GB; - break; - case BIT_11: - list[i].fp_speed = PORT_SPEED_8GB; - break; - case BIT_10: - list[i].fp_speed = PORT_SPEED_16GB; - break; - case BIT_8: - list[i].fp_speed = PORT_SPEED_32GB; - break; - } - + list->fp_speed = qla2x00_port_speed_capability( + be16_to_cpu(ct_rsp->rsp.gpsc.speed)); ql_dbg(ql_dbg_disc, vha, 0x205b, "GPSC ext entry - fpn " "%8phN speeds=%04x speed=%04x.\n", @@ -3048,29 +3050,8 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res) goto done; } } else { - switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) { - case BIT_15: - fcport->fp_speed = PORT_SPEED_1GB; - break; - case BIT_14: - fcport->fp_speed = PORT_SPEED_2GB; - break; - case BIT_13: - fcport->fp_speed = PORT_SPEED_4GB; - break; - case BIT_12: - fcport->fp_speed = PORT_SPEED_10GB; - break; - case BIT_11: - fcport->fp_speed = PORT_SPEED_8GB; - break; - case BIT_10: - fcport->fp_speed = PORT_SPEED_16GB; - break; - case BIT_8: - fcport->fp_speed = PORT_SPEED_32GB; - break; - } + fcport->fp_speed = qla2x00_port_speed_capability( + be16_to_cpu(ct_rsp->rsp.gpsc.speed)); ql_dbg(ql_dbg_disc, vha, 0x2054, "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n", diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 7679285d7c95..1351e03f0da8 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2104,6 +2104,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) int rval; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; memset(&vha->qla_stats, 0, sizeof(vha->qla_stats)); memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat)); @@ -2138,6 +2139,15 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) ha->isp_ops->reset_chip(vha); + /* Check for secure flash support */ + if (IS_QLA28XX(ha)) { + if (RD_REG_DWORD(®->mailbox12) & BIT_0) { + ql_log(ql_log_info, vha, 0xffff, "Adapter is Secure\n"); + ha->flags.secure_adapter = 1; + } + } + + rval = qla2xxx_get_flash_info(vha); if (rval) { ql_log(ql_log_fatal, vha, 0x004f, @@ -2454,7 +2464,7 @@ qla2x00_isp_firmware(scsi_qla_host_t *vha) * * Returns 0 on success. */ -void +int qla2x00_reset_chip(scsi_qla_host_t *vha) { unsigned long flags = 0; @@ -2462,9 +2472,10 @@ qla2x00_reset_chip(scsi_qla_host_t *vha) struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; uint32_t cnt; uint16_t cmd; + int rval = QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(ha->pdev))) - return; + return rval; ha->isp_ops->disable_intrs(ha); @@ -2590,6 +2601,8 @@ qla2x00_reset_chip(scsi_qla_host_t *vha) } spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_SUCCESS; } /** @@ -2830,14 +2843,15 @@ acquired: * * Returns 0 on success. */ -void +int qla24xx_reset_chip(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; + int rval = QLA_FUNCTION_FAILED; if (pci_channel_offline(ha->pdev) && ha->flags.pci_channel_io_perm_failure) { - return; + return rval; } ha->isp_ops->disable_intrs(ha); @@ -2845,7 +2859,9 @@ qla24xx_reset_chip(scsi_qla_host_t *vha) qla25xx_manipulate_risc_semaphore(vha); /* Perform RISC reset. */ - qla24xx_reset_risc(vha); + rval = qla24xx_reset_risc(vha); + + return rval; } /** @@ -3020,7 +3036,7 @@ qla2x00_alloc_offload_mem(scsi_qla_host_t *vha) if (IS_FWI2_CAPABLE(ha)) { /* Allocate memory for Fibre Channel Event Buffer. */ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && - !IS_QLA27XX(ha)) + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) goto try_eft; if (ha->fce) @@ -3091,12 +3107,15 @@ eft_err: void qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) { + int rval; uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, eft_size, fce_size, mq_size; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; struct qla2xxx_fw_dump *fw_dump; + dma_addr_t tc_dma; + void *tc; dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; req_q_size = rsp_q_size = 0; @@ -3108,7 +3127,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) mem_size = (ha->fw_memory_size - 0x11000 + 1) * sizeof(uint16_t); } else if (IS_FWI2_CAPABLE(ha)) { - if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); else if (IS_QLA81XX(ha)) fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); @@ -3120,7 +3139,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) mem_size = (ha->fw_memory_size - 0x100000 + 1) * sizeof(uint32_t); if (ha->mqenable) { - if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && + !IS_QLA28XX(ha)) mq_size = sizeof(struct qla2xxx_mq_chain); /* * Allocate maximum buffer size for all queues. @@ -3135,25 +3155,56 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) mq_size += ha->tgt.atio_q_length * sizeof(request_t); /* Allocate memory for Fibre Channel Event Buffer. */ if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && - !IS_QLA27XX(ha)) + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) goto try_eft; fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; try_eft: + if (ha->eft) + dma_free_coherent(&ha->pdev->dev, + EFT_SIZE, ha->eft, ha->eft_dma); + + /* Allocate memory for Extended Trace Buffer. */ + tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, + GFP_KERNEL); + if (!tc) { + ql_log(ql_log_warn, vha, 0x00c1, + "Unable to allocate (%d KB) for EFT.\n", + EFT_SIZE / 1024); + goto allocate; + } + + rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); + if (rval) { + ql_log(ql_log_warn, vha, 0x00c2, + "Unable to initialize EFT (%d).\n", rval); + dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, + tc_dma); + } ql_dbg(ql_dbg_init, vha, 0x00c3, "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); eft_size = EFT_SIZE; } - if (IS_QLA27XX(ha)) { - if (!ha->fw_dump_template) { - ql_log(ql_log_warn, vha, 0x00ba, - "Failed missing fwdump template\n"); - return; + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + struct fwdt *fwdt = ha->fwdt; + uint j; + + for (j = 0; j < 2; j++, fwdt++) { + if (!fwdt->template) { + ql_log(ql_log_warn, vha, 0x00ba, + "-> fwdt%u no template\n", j); + continue; + } + ql_dbg(ql_dbg_init, vha, 0x00fa, + "-> fwdt%u calculating fwdump size...\n", j); + fwdt->dump_size = qla27xx_fwdt_calculate_dump_size( + vha, fwdt->template); + ql_dbg(ql_dbg_init, vha, 0x00fa, + "-> fwdt%u calculated fwdump size = %#lx bytes\n", + j, fwdt->dump_size); + dump_size += fwdt->dump_size; } - dump_size = qla27xx_fwdt_calculate_dump_size(vha); - ql_dbg(ql_dbg_init, vha, 0x00fa, - "-> allocating fwdump (%x bytes)...\n", dump_size); goto allocate; } @@ -3188,7 +3239,7 @@ allocate: "Allocated (%d KB) for firmware dump.\n", dump_size / 1024); - if (IS_QLA27XX(ha)) + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) return; ha->fw_dump->signature[0] = 'Q'; @@ -3498,7 +3549,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) if (rval == QLA_SUCCESS) { qla24xx_detect_sfp(vha); - if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && + if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) && (ha->zio_mode == QLA_ZIO_MODE_6)) qla27xx_set_zio_threshold(vha, ha->last_zio_threshold); @@ -3570,7 +3622,7 @@ enable_82xx_npiv: spin_unlock_irqrestore(&ha->hardware_lock, flags); } - if (IS_QLA27XX(ha)) + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flags.fac_supported = 1; else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) { uint32_t size; @@ -3585,7 +3637,8 @@ enable_82xx_npiv: ha->fw_major_version, ha->fw_minor_version, ha->fw_subminor_version); - if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) { ha->flags.fac_supported = 0; rval = QLA_SUCCESS; } @@ -3647,8 +3700,7 @@ qla2x00_update_fw_options(scsi_qla_host_t *vha) ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0115, "Serial link options.\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0109, - (uint8_t *)&ha->fw_seriallink_options, - sizeof(ha->fw_seriallink_options)); + ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options)); ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING; if (ha->fw_seriallink_options[3] & BIT_2) { @@ -3738,7 +3790,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) /* Move PUREX, ABTS RX & RIDA to ATIOQ */ if (ql2xmvasynctoatio && - (IS_QLA83XX(ha) || IS_QLA27XX(ha))) { + (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) { if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) ha->fw_options[2] |= BIT_11; @@ -3746,7 +3798,8 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) ha->fw_options[2] &= ~BIT_11; } - if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) { /* * Tell FW to track each exchange to prevent * driver from using stale exchange. @@ -3843,7 +3896,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha) if (IS_SHADOW_REG_CAPABLE(ha)) icb->firmware_options_2 |= cpu_to_le32(BIT_30|BIT_29); - if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) { icb->qos = cpu_to_le16(QLA_DEFAULT_QUE_QOS); icb->rid = cpu_to_le16(rid); if (ha->flags.msix_enabled) { @@ -4266,11 +4320,14 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, { char *st, *en; uint16_t index; + uint64_t zero[2] = { 0 }; struct qla_hw_data *ha = vha->hw; int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha); - if (memcmp(model, BINZERO, len) != 0) { + if (len > sizeof(zero)) + len = sizeof(zero); + if (memcmp(model, &zero, len) != 0) { strncpy(ha->model_number, model, len); st = en = ha->model_number; en += len - 1; @@ -4357,7 +4414,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) rval = QLA_SUCCESS; /* Determine NVRAM starting address. */ - ha->nvram_size = sizeof(nvram_t); + ha->nvram_size = sizeof(*nv); ha->nvram_base = 0; if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) if ((RD_REG_WORD(®->ctrl_status) >> 14) == 1) @@ -4371,16 +4428,15 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010f, "Contents of NVRAM.\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0110, - (uint8_t *)nv, ha->nvram_size); + nv, ha->nvram_size); /* Bad NVRAM data, set defaults parameters. */ - if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || - nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) { + if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || + nv->nvram_version < 1) { /* Reset NVRAM data. */ ql_log(ql_log_warn, vha, 0x0064, - "Inconsistent NVRAM " - "detected: checksum=0x%x id=%c version=0x%x.\n", - chksum, nv->id[0], nv->nvram_version); + "Inconsistent NVRAM detected: checksum=%#x id=%.4s version=%#x.\n", + chksum, nv->id, nv->nvram_version); ql_log(ql_log_warn, vha, 0x0065, "Falling back to " "functioning (yet invalid -- WWPN) defaults.\n"); @@ -4943,8 +4999,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) ql_dbg(ql_dbg_disc, vha, 0x2011, "Entries in ID list (%d).\n", entries); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2075, - (uint8_t *)ha->gid_list, - entries * sizeof(struct gid_list_info)); + ha->gid_list, entries * sizeof(*ha->gid_list)); if (entries == 0) { spin_lock_irqsave(&vha->work_lock, flags); @@ -6632,6 +6687,14 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) if (vha->flags.online) { qla2x00_abort_isp_cleanup(vha); + if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) { + ha->flags.chip_reset_done = 1; + vha->flags.online = 1; + status = 0; + clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); + return status; + } + if (IS_QLA8031(ha)) { ql_dbg(ql_dbg_p3p, vha, 0xb05c, "Clearing fcoe driver presence.\n"); @@ -6872,7 +6935,7 @@ qla25xx_init_queues(struct qla_hw_data *ha) * Input: * ha = adapter block pointer. */ -void +int qla2x00_reset_adapter(scsi_qla_host_t *vha) { unsigned long flags = 0; @@ -6888,17 +6951,20 @@ qla2x00_reset_adapter(scsi_qla_host_t *vha) WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); RD_REG_WORD(®->hccr); /* PCI Posting. */ spin_unlock_irqrestore(&ha->hardware_lock, flags); + + return QLA_SUCCESS; } -void +int qla24xx_reset_adapter(scsi_qla_host_t *vha) { unsigned long flags = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + int rval = QLA_SUCCESS; if (IS_P3P_TYPE(ha)) - return; + return rval; vha->flags.online = 0; ha->isp_ops->disable_intrs(ha); @@ -6912,6 +6978,8 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha) if (IS_NOPOLLING_TYPE(ha)) ha->isp_ops->enable_intrs(ha); + + return rval; } /* On sparc systems, obtain port and node WWN from firmware @@ -6962,34 +7030,33 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) ha->vpd_base = FA_NVRAM_VPD1_ADDR; } - ha->nvram_size = sizeof(struct nvram_24xx); + ha->nvram_size = sizeof(*nv); ha->vpd_size = FA_NVRAM_VPD_SIZE; /* Get VPD data into cache */ ha->vpd = ha->nvram + VPD_OFFSET; - ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, + ha->isp_ops->read_nvram(vha, ha->vpd, ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); /* Get NVRAM data into cache and calculate checksum. */ dptr = (uint32_t *)nv; - ha->isp_ops->read_nvram(vha, (uint8_t *)dptr, ha->nvram_base, - ha->nvram_size); + ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size); for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) chksum += le32_to_cpu(*dptr); ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x006a, "Contents of NVRAM\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010d, - (uint8_t *)nv, ha->nvram_size); + nv, ha->nvram_size); /* Bad NVRAM data, set defaults parameters. */ - if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' - || nv->id[3] != ' ' || - nv->nvram_version < cpu_to_le16(ICB_VERSION)) { + if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || + le16_to_cpu(nv->nvram_version) < ICB_VERSION) { /* Reset NVRAM data. */ ql_log(ql_log_warn, vha, 0x006b, - "Inconsistent NVRAM detected: checksum=0x%x id=%c " - "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version); + "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n", + chksum, nv->id, nv->nvram_version); + ql_dump_buffer(ql_dbg_init, vha, 0x006b, nv, sizeof(*nv)); ql_log(ql_log_warn, vha, 0x006c, "Falling back to functioning (yet invalid -- WWPN) " "defaults.\n"); @@ -7198,128 +7265,311 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) return (rval); } -uint8_t qla27xx_find_valid_image(struct scsi_qla_host *vha) +static void +qla27xx_print_image(struct scsi_qla_host *vha, char *name, + struct qla27xx_image_status *image_status) +{ + ql_dbg(ql_dbg_init, vha, 0x018b, + "%s %s: mask=%#02x gen=%#04x ver=%u.%u map=%#01x sum=%#08x sig=%#08x\n", + name, "status", + image_status->image_status_mask, + le16_to_cpu(image_status->generation), + image_status->ver_major, + image_status->ver_minor, + image_status->bitmap, + le32_to_cpu(image_status->checksum), + le32_to_cpu(image_status->signature)); +} + +static bool +qla28xx_check_aux_image_status_signature( + struct qla27xx_image_status *image_status) +{ + ulong signature = le32_to_cpu(image_status->signature); + + return signature != QLA28XX_AUX_IMG_STATUS_SIGN; +} + +static bool +qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status) +{ + ulong signature = le32_to_cpu(image_status->signature); + + return + signature != QLA27XX_IMG_STATUS_SIGN && + signature != QLA28XX_IMG_STATUS_SIGN; +} + +static ulong +qla27xx_image_status_checksum(struct qla27xx_image_status *image_status) +{ + uint32_t *p = (void *)image_status; + uint n = sizeof(*image_status) / sizeof(*p); + uint32_t sum = 0; + + for ( ; n--; p++) + sum += le32_to_cpup(p); + + return sum; +} + +static inline uint +qla28xx_component_bitmask(struct qla27xx_image_status *aux, uint bitmask) +{ + return aux->bitmap & bitmask ? + QLA27XX_SECONDARY_IMAGE : QLA27XX_PRIMARY_IMAGE; +} + +static void +qla28xx_component_status( + struct active_regions *active_regions, struct qla27xx_image_status *aux) +{ + active_regions->aux.board_config = + qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_BOARD_CONFIG); + + active_regions->aux.vpd_nvram = + qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_VPD_NVRAM); + + active_regions->aux.npiv_config_0_1 = + qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_0_1); + + active_regions->aux.npiv_config_2_3 = + qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3); +} + +static int +qla27xx_compare_image_generation( + struct qla27xx_image_status *pri_image_status, + struct qla27xx_image_status *sec_image_status) +{ + /* calculate generation delta as uint16 (this accounts for wrap) */ + int16_t delta = + le16_to_cpu(pri_image_status->generation) - + le16_to_cpu(sec_image_status->generation); + + ql_dbg(ql_dbg_init, NULL, 0x0180, "generation delta = %d\n", delta); + + return delta; +} + +void +qla28xx_get_aux_images( + struct scsi_qla_host *vha, struct active_regions *active_regions) { - struct qla27xx_image_status pri_image_status, sec_image_status; - uint8_t valid_pri_image, valid_sec_image; - uint32_t *wptr; - uint32_t cnt, chksum, size; struct qla_hw_data *ha = vha->hw; + struct qla27xx_image_status pri_aux_image_status, sec_aux_image_status; + bool valid_pri_image = false, valid_sec_image = false; + bool active_pri_image = false, active_sec_image = false; - valid_pri_image = valid_sec_image = 1; - ha->active_image = 0; - size = sizeof(struct qla27xx_image_status) / sizeof(uint32_t); + if (!ha->flt_region_aux_img_status_pri) { + ql_dbg(ql_dbg_init, vha, 0x018a, "Primary aux image not addressed\n"); + goto check_sec_image; + } - if (!ha->flt_region_img_status_pri) { - valid_pri_image = 0; + qla24xx_read_flash_data(vha, (void *)&pri_aux_image_status, + ha->flt_region_aux_img_status_pri, + sizeof(pri_aux_image_status) >> 2); + qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status); + + if (qla28xx_check_aux_image_status_signature(&pri_aux_image_status)) { + ql_dbg(ql_dbg_init, vha, 0x018b, + "Primary aux image signature (%#x) not valid\n", + le32_to_cpu(pri_aux_image_status.signature)); + goto check_sec_image; + } + + if (qla27xx_image_status_checksum(&pri_aux_image_status)) { + ql_dbg(ql_dbg_init, vha, 0x018c, + "Primary aux image checksum failed\n"); goto check_sec_image; } - qla24xx_read_flash_data(vha, (uint32_t *)(&pri_image_status), - ha->flt_region_img_status_pri, size); + valid_pri_image = true; + + if (pri_aux_image_status.image_status_mask & 1) { + ql_dbg(ql_dbg_init, vha, 0x018d, + "Primary aux image is active\n"); + active_pri_image = true; + } + +check_sec_image: + if (!ha->flt_region_aux_img_status_sec) { + ql_dbg(ql_dbg_init, vha, 0x018a, + "Secondary aux image not addressed\n"); + goto check_valid_image; + } + + qla24xx_read_flash_data(vha, (void *)&sec_aux_image_status, + ha->flt_region_aux_img_status_sec, + sizeof(sec_aux_image_status) >> 2); + qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status); - if (pri_image_status.signature != QLA27XX_IMG_STATUS_SIGN) { + if (qla28xx_check_aux_image_status_signature(&sec_aux_image_status)) { ql_dbg(ql_dbg_init, vha, 0x018b, - "Primary image signature (0x%x) not valid\n", - pri_image_status.signature); - valid_pri_image = 0; + "Secondary aux image signature (%#x) not valid\n", + le32_to_cpu(sec_aux_image_status.signature)); + goto check_valid_image; + } + + if (qla27xx_image_status_checksum(&sec_aux_image_status)) { + ql_dbg(ql_dbg_init, vha, 0x018c, + "Secondary aux image checksum failed\n"); + goto check_valid_image; + } + + valid_sec_image = true; + + if (sec_aux_image_status.image_status_mask & 1) { + ql_dbg(ql_dbg_init, vha, 0x018d, + "Secondary aux image is active\n"); + active_sec_image = true; + } + +check_valid_image: + if (valid_pri_image && active_pri_image && + valid_sec_image && active_sec_image) { + if (qla27xx_compare_image_generation(&pri_aux_image_status, + &sec_aux_image_status) >= 0) { + qla28xx_component_status(active_regions, + &pri_aux_image_status); + } else { + qla28xx_component_status(active_regions, + &sec_aux_image_status); + } + } else if (valid_pri_image && active_pri_image) { + qla28xx_component_status(active_regions, &pri_aux_image_status); + } else if (valid_sec_image && active_sec_image) { + qla28xx_component_status(active_regions, &sec_aux_image_status); + } + + ql_dbg(ql_dbg_init, vha, 0x018f, + "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n", + active_regions->aux.board_config, + active_regions->aux.vpd_nvram, + active_regions->aux.npiv_config_0_1, + active_regions->aux.npiv_config_2_3); +} + +void +qla27xx_get_active_image(struct scsi_qla_host *vha, + struct active_regions *active_regions) +{ + struct qla_hw_data *ha = vha->hw; + struct qla27xx_image_status pri_image_status, sec_image_status; + bool valid_pri_image = false, valid_sec_image = false; + bool active_pri_image = false, active_sec_image = false; + + if (!ha->flt_region_img_status_pri) { + ql_dbg(ql_dbg_init, vha, 0x018a, "Primary image not addressed\n"); goto check_sec_image; } - wptr = (uint32_t *)(&pri_image_status); - cnt = size; + qla24xx_read_flash_data(vha, (void *)(&pri_image_status), + ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2); + qla27xx_print_image(vha, "Primary image", &pri_image_status); - for (chksum = 0; cnt--; wptr++) - chksum += le32_to_cpu(*wptr); + if (qla27xx_check_image_status_signature(&pri_image_status)) { + ql_dbg(ql_dbg_init, vha, 0x018b, + "Primary image signature (%#x) not valid\n", + le32_to_cpu(pri_image_status.signature)); + goto check_sec_image; + } - if (chksum) { + if (qla27xx_image_status_checksum(&pri_image_status)) { ql_dbg(ql_dbg_init, vha, 0x018c, - "Checksum validation failed for primary image (0x%x)\n", - chksum); - valid_pri_image = 0; + "Primary image checksum failed\n"); + goto check_sec_image; + } + + valid_pri_image = true; + + if (pri_image_status.image_status_mask & 1) { + ql_dbg(ql_dbg_init, vha, 0x018d, + "Primary image is active\n"); + active_pri_image = true; } check_sec_image: if (!ha->flt_region_img_status_sec) { - valid_sec_image = 0; + ql_dbg(ql_dbg_init, vha, 0x018a, "Secondary image not addressed\n"); goto check_valid_image; } qla24xx_read_flash_data(vha, (uint32_t *)(&sec_image_status), - ha->flt_region_img_status_sec, size); + ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2); + qla27xx_print_image(vha, "Secondary image", &sec_image_status); - if (sec_image_status.signature != QLA27XX_IMG_STATUS_SIGN) { - ql_dbg(ql_dbg_init, vha, 0x018d, - "Secondary image signature(0x%x) not valid\n", - sec_image_status.signature); - valid_sec_image = 0; + if (qla27xx_check_image_status_signature(&sec_image_status)) { + ql_dbg(ql_dbg_init, vha, 0x018b, + "Secondary image signature (%#x) not valid\n", + le32_to_cpu(sec_image_status.signature)); goto check_valid_image; } - wptr = (uint32_t *)(&sec_image_status); - cnt = size; - for (chksum = 0; cnt--; wptr++) - chksum += le32_to_cpu(*wptr); - if (chksum) { - ql_dbg(ql_dbg_init, vha, 0x018e, - "Checksum validation failed for secondary image (0x%x)\n", - chksum); - valid_sec_image = 0; + if (qla27xx_image_status_checksum(&sec_image_status)) { + ql_dbg(ql_dbg_init, vha, 0x018c, + "Secondary image checksum failed\n"); + goto check_valid_image; + } + + valid_sec_image = true; + + if (sec_image_status.image_status_mask & 1) { + ql_dbg(ql_dbg_init, vha, 0x018d, + "Secondary image is active\n"); + active_sec_image = true; } check_valid_image: - if (valid_pri_image && (pri_image_status.image_status_mask & 0x1)) - ha->active_image = QLA27XX_PRIMARY_IMAGE; - if (valid_sec_image && (sec_image_status.image_status_mask & 0x1)) { - if (!ha->active_image || - pri_image_status.generation_number < - sec_image_status.generation_number) - ha->active_image = QLA27XX_SECONDARY_IMAGE; + if (valid_pri_image && active_pri_image) + active_regions->global = QLA27XX_PRIMARY_IMAGE; + + if (valid_sec_image && active_sec_image) { + if (!active_regions->global || + qla27xx_compare_image_generation( + &pri_image_status, &sec_image_status) < 0) { + active_regions->global = QLA27XX_SECONDARY_IMAGE; + } } - ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x018f, "%s image\n", - ha->active_image == 0 ? "default bootld and fw" : - ha->active_image == 1 ? "primary" : - ha->active_image == 2 ? "secondary" : - "Invalid"); + ql_dbg(ql_dbg_init, vha, 0x018f, "active image %s (%u)\n", + active_regions->global == QLA27XX_DEFAULT_IMAGE ? + "default (boot/fw)" : + active_regions->global == QLA27XX_PRIMARY_IMAGE ? + "primary" : + active_regions->global == QLA27XX_SECONDARY_IMAGE ? + "secondary" : "invalid", + active_regions->global); +} - return ha->active_image; +bool qla24xx_risc_firmware_invalid(uint32_t *dword) +{ + return + !(dword[4] | dword[5] | dword[6] | dword[7]) || + !(~dword[4] | ~dword[5] | ~dword[6] | ~dword[7]); } static int qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, uint32_t faddr) { - int rval = QLA_SUCCESS; - int segments, fragment; - uint32_t *dcode, dlen; - uint32_t risc_addr; - uint32_t risc_size; - uint32_t i; + int rval; + uint templates, segments, fragment; + ulong i; + uint j; + ulong dlen; + uint32_t *dcode; + uint32_t risc_addr, risc_size, risc_attr = 0; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; + struct fwdt *fwdt = ha->fwdt; ql_dbg(ql_dbg_init, vha, 0x008b, "FW: Loading firmware from flash (%x).\n", faddr); - rval = QLA_SUCCESS; - - segments = FA_RISC_CODE_SEGMENTS; - dcode = (uint32_t *)req->ring; - *srisc_addr = 0; - - if (IS_QLA27XX(ha) && - qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE) - faddr = ha->flt_region_fw_sec; - - /* Validate firmware image by checking version. */ - qla24xx_read_flash_data(vha, dcode, faddr + 4, 4); - for (i = 0; i < 4; i++) - dcode[i] = be32_to_cpu(dcode[i]); - if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && - dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || - (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && - dcode[3] == 0)) { + dcode = (void *)req->ring; + qla24xx_read_flash_data(vha, dcode, faddr, 8); + if (qla24xx_risc_firmware_invalid(dcode)) { ql_log(ql_log_fatal, vha, 0x008c, "Unable to verify the integrity of flash firmware " "image.\n"); @@ -7330,34 +7580,36 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, return QLA_FUNCTION_FAILED; } - while (segments && rval == QLA_SUCCESS) { - /* Read segment's load information. */ - qla24xx_read_flash_data(vha, dcode, faddr, 4); - + dcode = (void *)req->ring; + *srisc_addr = 0; + segments = FA_RISC_CODE_SEGMENTS; + for (j = 0; j < segments; j++) { + ql_dbg(ql_dbg_init, vha, 0x008d, + "-> Loading segment %u...\n", j); + qla24xx_read_flash_data(vha, dcode, faddr, 10); risc_addr = be32_to_cpu(dcode[2]); - *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; risc_size = be32_to_cpu(dcode[3]); + if (!*srisc_addr) { + *srisc_addr = risc_addr; + risc_attr = be32_to_cpu(dcode[9]); + } - fragment = 0; - while (risc_size > 0 && rval == QLA_SUCCESS) { - dlen = (uint32_t)(ha->fw_transfer_size >> 2); + dlen = ha->fw_transfer_size >> 2; + for (fragment = 0; risc_size; fragment++) { if (dlen > risc_size) dlen = risc_size; ql_dbg(ql_dbg_init, vha, 0x008e, - "Loading risc segment@ risc addr %x " - "number of dwords 0x%x offset 0x%x.\n", - risc_addr, dlen, faddr); - + "-> Loading fragment %u: %#x <- %#x (%#lx dwords)...\n", + fragment, risc_addr, faddr, dlen); qla24xx_read_flash_data(vha, dcode, faddr, dlen); for (i = 0; i < dlen; i++) dcode[i] = swab32(dcode[i]); - rval = qla2x00_load_ram(vha, req->dma, risc_addr, - dlen); + rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); if (rval) { ql_log(ql_log_fatal, vha, 0x008f, - "Failed to load segment %d of firmware.\n", + "-> Failed load firmware fragment %u.\n", fragment); return QLA_FUNCTION_FAILED; } @@ -7365,107 +7617,82 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, faddr += dlen; risc_addr += dlen; risc_size -= dlen; - fragment++; } - - /* Next segment. */ - segments--; } - if (!IS_QLA27XX(ha)) - return rval; + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return QLA_SUCCESS; - if (ha->fw_dump_template) - vfree(ha->fw_dump_template); - ha->fw_dump_template = NULL; - ha->fw_dump_template_len = 0; - - ql_dbg(ql_dbg_init, vha, 0x0161, - "Loading fwdump template from %x\n", faddr); - qla24xx_read_flash_data(vha, dcode, faddr, 7); - risc_size = be32_to_cpu(dcode[2]); - ql_dbg(ql_dbg_init, vha, 0x0162, - "-> array size %x dwords\n", risc_size); - if (risc_size == 0 || risc_size == ~0) - goto default_template; - - dlen = (risc_size - 8) * sizeof(*dcode); - ql_dbg(ql_dbg_init, vha, 0x0163, - "-> template allocating %x bytes...\n", dlen); - ha->fw_dump_template = vmalloc(dlen); - if (!ha->fw_dump_template) { - ql_log(ql_log_warn, vha, 0x0164, - "Failed fwdump template allocate %x bytes.\n", risc_size); - goto default_template; - } - - faddr += 7; - risc_size -= 8; - dcode = ha->fw_dump_template; - qla24xx_read_flash_data(vha, dcode, faddr, risc_size); - for (i = 0; i < risc_size; i++) - dcode[i] = le32_to_cpu(dcode[i]); - - if (!qla27xx_fwdt_template_valid(dcode)) { - ql_log(ql_log_warn, vha, 0x0165, - "Failed fwdump template validate\n"); - goto default_template; - } - - dlen = qla27xx_fwdt_template_size(dcode); - ql_dbg(ql_dbg_init, vha, 0x0166, - "-> template size %x bytes\n", dlen); - if (dlen > risc_size * sizeof(*dcode)) { - ql_log(ql_log_warn, vha, 0x0167, - "Failed fwdump template exceeds array by %zx bytes\n", - (size_t)(dlen - risc_size * sizeof(*dcode))); - goto default_template; - } - ha->fw_dump_template_len = dlen; - return rval; + templates = (risc_attr & BIT_9) ? 2 : 1; + ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates); + for (j = 0; j < templates; j++, fwdt++) { + if (fwdt->template) + vfree(fwdt->template); + fwdt->template = NULL; + fwdt->length = 0; + + dcode = (void *)req->ring; + qla24xx_read_flash_data(vha, dcode, faddr, 7); + risc_size = be32_to_cpu(dcode[2]); + ql_dbg(ql_dbg_init, vha, 0x0161, + "-> fwdt%u template array at %#x (%#x dwords)\n", + j, faddr, risc_size); + if (!risc_size || !~risc_size) { + ql_dbg(ql_dbg_init, vha, 0x0162, + "-> fwdt%u failed to read array\n", j); + goto failed; + } -default_template: - ql_log(ql_log_warn, vha, 0x0168, "Using default fwdump template\n"); - if (ha->fw_dump_template) - vfree(ha->fw_dump_template); - ha->fw_dump_template = NULL; - ha->fw_dump_template_len = 0; - - dlen = qla27xx_fwdt_template_default_size(); - ql_dbg(ql_dbg_init, vha, 0x0169, - "-> template allocating %x bytes...\n", dlen); - ha->fw_dump_template = vmalloc(dlen); - if (!ha->fw_dump_template) { - ql_log(ql_log_warn, vha, 0x016a, - "Failed fwdump template allocate %x bytes.\n", risc_size); - goto failed_template; - } - - dcode = ha->fw_dump_template; - risc_size = dlen / sizeof(*dcode); - memcpy(dcode, qla27xx_fwdt_template_default(), dlen); - for (i = 0; i < risc_size; i++) - dcode[i] = be32_to_cpu(dcode[i]); - - if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) { - ql_log(ql_log_warn, vha, 0x016b, - "Failed fwdump template validate\n"); - goto failed_template; - } - - dlen = qla27xx_fwdt_template_size(ha->fw_dump_template); - ql_dbg(ql_dbg_init, vha, 0x016c, - "-> template size %x bytes\n", dlen); - ha->fw_dump_template_len = dlen; - return rval; + /* skip header and ignore checksum */ + faddr += 7; + risc_size -= 8; + + ql_dbg(ql_dbg_init, vha, 0x0163, + "-> fwdt%u template allocate template %#x words...\n", + j, risc_size); + fwdt->template = vmalloc(risc_size * sizeof(*dcode)); + if (!fwdt->template) { + ql_log(ql_log_warn, vha, 0x0164, + "-> fwdt%u failed allocate template.\n", j); + goto failed; + } -failed_template: - ql_log(ql_log_warn, vha, 0x016d, "Failed default fwdump template\n"); - if (ha->fw_dump_template) - vfree(ha->fw_dump_template); - ha->fw_dump_template = NULL; - ha->fw_dump_template_len = 0; - return rval; + dcode = fwdt->template; + qla24xx_read_flash_data(vha, dcode, faddr, risc_size); + + if (!qla27xx_fwdt_template_valid(dcode)) { + ql_log(ql_log_warn, vha, 0x0165, + "-> fwdt%u failed template validate\n", j); + goto failed; + } + + dlen = qla27xx_fwdt_template_size(dcode); + ql_dbg(ql_dbg_init, vha, 0x0166, + "-> fwdt%u template size %#lx bytes (%#lx words)\n", + j, dlen, dlen / sizeof(*dcode)); + if (dlen > risc_size * sizeof(*dcode)) { + ql_log(ql_log_warn, vha, 0x0167, + "-> fwdt%u template exceeds array (%-lu bytes)\n", + j, dlen - risc_size * sizeof(*dcode)); + goto failed; + } + + fwdt->length = dlen; + ql_dbg(ql_dbg_init, vha, 0x0168, + "-> fwdt%u loaded template ok\n", j); + + faddr += risc_size + 1; + } + + return QLA_SUCCESS; + +failed: + if (fwdt->template) + vfree(fwdt->template); + fwdt->template = NULL; + fwdt->length = 0; + + return QLA_SUCCESS; } #define QLA_FW_URL "http://ldriver.qlogic.com/firmware/" @@ -7573,94 +7800,73 @@ static int qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; - int segments, fragment; - uint32_t *dcode, dlen; - uint32_t risc_addr; - uint32_t risc_size; - uint32_t i; + uint templates, segments, fragment; + uint32_t *dcode; + ulong dlen; + uint32_t risc_addr, risc_size, risc_attr = 0; + ulong i; + uint j; struct fw_blob *blob; - const uint32_t *fwcode; - uint32_t fwclen; + uint32_t *fwcode; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; + struct fwdt *fwdt = ha->fwdt; + + ql_dbg(ql_dbg_init, vha, 0x0090, + "-> FW: Loading via request-firmware.\n"); - /* Load firmware blob. */ blob = qla2x00_request_firmware(vha); if (!blob) { - ql_log(ql_log_warn, vha, 0x0090, - "Firmware image unavailable.\n"); - ql_log(ql_log_warn, vha, 0x0091, - "Firmware images can be retrieved from: " - QLA_FW_URL ".\n"); + ql_log(ql_log_warn, vha, 0x0092, + "-> Firmware file not found.\n"); return QLA_FUNCTION_FAILED; } - ql_dbg(ql_dbg_init, vha, 0x0092, - "FW: Loading via request-firmware.\n"); - - rval = QLA_SUCCESS; - - segments = FA_RISC_CODE_SEGMENTS; - dcode = (uint32_t *)req->ring; - *srisc_addr = 0; - fwcode = (uint32_t *)blob->fw->data; - fwclen = 0; - - /* Validate firmware image by checking version. */ - if (blob->fw->size < 8 * sizeof(uint32_t)) { + fwcode = (void *)blob->fw->data; + dcode = fwcode; + if (qla24xx_risc_firmware_invalid(dcode)) { ql_log(ql_log_fatal, vha, 0x0093, "Unable to verify integrity of firmware image (%zd).\n", blob->fw->size); - return QLA_FUNCTION_FAILED; - } - for (i = 0; i < 4; i++) - dcode[i] = be32_to_cpu(fwcode[i + 4]); - if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && - dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || - (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && - dcode[3] == 0)) { - ql_log(ql_log_fatal, vha, 0x0094, - "Unable to verify integrity of firmware image (%zd).\n", - blob->fw->size); ql_log(ql_log_fatal, vha, 0x0095, "Firmware data: %08x %08x %08x %08x.\n", dcode[0], dcode[1], dcode[2], dcode[3]); return QLA_FUNCTION_FAILED; } - while (segments && rval == QLA_SUCCESS) { + dcode = (void *)req->ring; + *srisc_addr = 0; + segments = FA_RISC_CODE_SEGMENTS; + for (j = 0; j < segments; j++) { + ql_dbg(ql_dbg_init, vha, 0x0096, + "-> Loading segment %u...\n", j); risc_addr = be32_to_cpu(fwcode[2]); - *srisc_addr = *srisc_addr == 0 ? risc_addr : *srisc_addr; risc_size = be32_to_cpu(fwcode[3]); - /* Validate firmware image size. */ - fwclen += risc_size * sizeof(uint32_t); - if (blob->fw->size < fwclen) { - ql_log(ql_log_fatal, vha, 0x0096, - "Unable to verify integrity of firmware image " - "(%zd).\n", blob->fw->size); - return QLA_FUNCTION_FAILED; + if (!*srisc_addr) { + *srisc_addr = risc_addr; + risc_attr = be32_to_cpu(fwcode[9]); } - fragment = 0; - while (risc_size > 0 && rval == QLA_SUCCESS) { - dlen = (uint32_t)(ha->fw_transfer_size >> 2); + dlen = ha->fw_transfer_size >> 2; + for (fragment = 0; risc_size; fragment++) { if (dlen > risc_size) dlen = risc_size; ql_dbg(ql_dbg_init, vha, 0x0097, - "Loading risc segment@ risc addr %x " - "number of dwords 0x%x.\n", risc_addr, dlen); + "-> Loading fragment %u: %#x <- %#x (%#lx words)...\n", + fragment, risc_addr, + (uint32_t)(fwcode - (typeof(fwcode))blob->fw->data), + dlen); for (i = 0; i < dlen; i++) dcode[i] = swab32(fwcode[i]); - rval = qla2x00_load_ram(vha, req->dma, risc_addr, - dlen); + rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); if (rval) { ql_log(ql_log_fatal, vha, 0x0098, - "Failed to load segment %d of firmware.\n", + "-> Failed load firmware fragment %u.\n", fragment); return QLA_FUNCTION_FAILED; } @@ -7668,106 +7874,82 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) fwcode += dlen; risc_addr += dlen; risc_size -= dlen; - fragment++; } - - /* Next segment. */ - segments--; } - if (!IS_QLA27XX(ha)) - return rval; + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return QLA_SUCCESS; - if (ha->fw_dump_template) - vfree(ha->fw_dump_template); - ha->fw_dump_template = NULL; - ha->fw_dump_template_len = 0; - - ql_dbg(ql_dbg_init, vha, 0x171, - "Loading fwdump template from %x\n", - (uint32_t)((void *)fwcode - (void *)blob->fw->data)); - risc_size = be32_to_cpu(fwcode[2]); - ql_dbg(ql_dbg_init, vha, 0x172, - "-> array size %x dwords\n", risc_size); - if (risc_size == 0 || risc_size == ~0) - goto default_template; - - dlen = (risc_size - 8) * sizeof(*fwcode); - ql_dbg(ql_dbg_init, vha, 0x0173, - "-> template allocating %x bytes...\n", dlen); - ha->fw_dump_template = vmalloc(dlen); - if (!ha->fw_dump_template) { - ql_log(ql_log_warn, vha, 0x0174, - "Failed fwdump template allocate %x bytes.\n", risc_size); - goto default_template; - } - - fwcode += 7; - risc_size -= 8; - dcode = ha->fw_dump_template; - for (i = 0; i < risc_size; i++) - dcode[i] = le32_to_cpu(fwcode[i]); - - if (!qla27xx_fwdt_template_valid(dcode)) { - ql_log(ql_log_warn, vha, 0x0175, - "Failed fwdump template validate\n"); - goto default_template; - } - - dlen = qla27xx_fwdt_template_size(dcode); - ql_dbg(ql_dbg_init, vha, 0x0176, - "-> template size %x bytes\n", dlen); - if (dlen > risc_size * sizeof(*fwcode)) { - ql_log(ql_log_warn, vha, 0x0177, - "Failed fwdump template exceeds array by %zx bytes\n", - (size_t)(dlen - risc_size * sizeof(*fwcode))); - goto default_template; - } - ha->fw_dump_template_len = dlen; - return rval; + templates = (risc_attr & BIT_9) ? 2 : 1; + ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates); + for (j = 0; j < templates; j++, fwdt++) { + if (fwdt->template) + vfree(fwdt->template); + fwdt->template = NULL; + fwdt->length = 0; + + risc_size = be32_to_cpu(fwcode[2]); + ql_dbg(ql_dbg_init, vha, 0x0171, + "-> fwdt%u template array at %#x (%#x dwords)\n", + j, (uint32_t)((void *)fwcode - (void *)blob->fw->data), + risc_size); + if (!risc_size || !~risc_size) { + ql_dbg(ql_dbg_init, vha, 0x0172, + "-> fwdt%u failed to read array\n", j); + goto failed; + } -default_template: - ql_log(ql_log_warn, vha, 0x0178, "Using default fwdump template\n"); - if (ha->fw_dump_template) - vfree(ha->fw_dump_template); - ha->fw_dump_template = NULL; - ha->fw_dump_template_len = 0; - - dlen = qla27xx_fwdt_template_default_size(); - ql_dbg(ql_dbg_init, vha, 0x0179, - "-> template allocating %x bytes...\n", dlen); - ha->fw_dump_template = vmalloc(dlen); - if (!ha->fw_dump_template) { - ql_log(ql_log_warn, vha, 0x017a, - "Failed fwdump template allocate %x bytes.\n", risc_size); - goto failed_template; - } - - dcode = ha->fw_dump_template; - risc_size = dlen / sizeof(*fwcode); - fwcode = qla27xx_fwdt_template_default(); - for (i = 0; i < risc_size; i++) - dcode[i] = be32_to_cpu(fwcode[i]); - - if (!qla27xx_fwdt_template_valid(ha->fw_dump_template)) { - ql_log(ql_log_warn, vha, 0x017b, - "Failed fwdump template validate\n"); - goto failed_template; - } - - dlen = qla27xx_fwdt_template_size(ha->fw_dump_template); - ql_dbg(ql_dbg_init, vha, 0x017c, - "-> template size %x bytes\n", dlen); - ha->fw_dump_template_len = dlen; - return rval; + /* skip header and ignore checksum */ + fwcode += 7; + risc_size -= 8; + + ql_dbg(ql_dbg_init, vha, 0x0173, + "-> fwdt%u template allocate template %#x words...\n", + j, risc_size); + fwdt->template = vmalloc(risc_size * sizeof(*dcode)); + if (!fwdt->template) { + ql_log(ql_log_warn, vha, 0x0174, + "-> fwdt%u failed allocate template.\n", j); + goto failed; + } -failed_template: - ql_log(ql_log_warn, vha, 0x017d, "Failed default fwdump template\n"); - if (ha->fw_dump_template) - vfree(ha->fw_dump_template); - ha->fw_dump_template = NULL; - ha->fw_dump_template_len = 0; - return rval; + dcode = fwdt->template; + for (i = 0; i < risc_size; i++) + dcode[i] = fwcode[i]; + + if (!qla27xx_fwdt_template_valid(dcode)) { + ql_log(ql_log_warn, vha, 0x0175, + "-> fwdt%u failed template validate\n", j); + goto failed; + } + + dlen = qla27xx_fwdt_template_size(dcode); + ql_dbg(ql_dbg_init, vha, 0x0176, + "-> fwdt%u template size %#lx bytes (%#lx words)\n", + j, dlen, dlen / sizeof(*dcode)); + if (dlen > risc_size * sizeof(*dcode)) { + ql_log(ql_log_warn, vha, 0x0177, + "-> fwdt%u template exceeds array (%-lu bytes)\n", + j, dlen - risc_size * sizeof(*dcode)); + goto failed; + } + + fwdt->length = dlen; + ql_dbg(ql_dbg_init, vha, 0x0178, + "-> fwdt%u loaded template ok\n", j); + + fwcode += risc_size + 1; + } + + return QLA_SUCCESS; + +failed: + if (fwdt->template) + vfree(fwdt->template); + fwdt->template = NULL; + fwdt->length = 0; + + return QLA_SUCCESS; } int @@ -7796,32 +7978,50 @@ qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; struct qla_hw_data *ha = vha->hw; + struct active_regions active_regions = { }; if (ql2xfwloadbin == 2) goto try_blob_fw; - /* - * FW Load priority: + /* FW Load priority: * 1) Firmware residing in flash. * 2) Firmware via request-firmware interface (.bin file). - * 3) Golden-Firmware residing in flash -- limited operation. + * 3) Golden-Firmware residing in flash -- (limited operation). */ + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + goto try_primary_fw; + + qla27xx_get_active_image(vha, &active_regions); + + if (active_regions.global != QLA27XX_SECONDARY_IMAGE) + goto try_primary_fw; + + ql_dbg(ql_dbg_init, vha, 0x008b, + "Loading secondary firmware image.\n"); + rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec); + if (!rval) + return rval; + +try_primary_fw: + ql_dbg(ql_dbg_init, vha, 0x008b, + "Loading primary firmware image.\n"); rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw); - if (rval == QLA_SUCCESS) + if (!rval) return rval; try_blob_fw: rval = qla24xx_load_risc_blob(vha, srisc_addr); - if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw) + if (!rval || !ha->flt_region_gold_fw) return rval; ql_log(ql_log_info, vha, 0x0099, "Attempting to fallback to golden firmware.\n"); rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw); - if (rval != QLA_SUCCESS) + if (rval) return rval; - ql_log(ql_log_info, vha, 0x009a, "Update operational firmware.\n"); + ql_log(ql_log_info, vha, 0x009a, "Need firmware flash update.\n"); ha->flags.running_gold_fw = 1; return rval; } @@ -7990,25 +8190,48 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) uint32_t chksum; uint16_t cnt; struct qla_hw_data *ha = vha->hw; + uint32_t faddr; + struct active_regions active_regions = { }; rval = QLA_SUCCESS; icb = (struct init_cb_81xx *)ha->init_cb; nv = ha->nvram; /* Determine NVRAM starting address. */ - ha->nvram_size = sizeof(struct nvram_81xx); + ha->nvram_size = sizeof(*nv); ha->vpd_size = FA_NVRAM_VPD_SIZE; if (IS_P3P_TYPE(ha) || IS_QLA8031(ha)) ha->vpd_size = FA_VPD_SIZE_82XX; + if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) + qla28xx_get_aux_images(vha, &active_regions); + /* Get VPD data into cache */ ha->vpd = ha->nvram + VPD_OFFSET; - ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2, - ha->vpd_size); + + faddr = ha->flt_region_vpd; + if (IS_QLA28XX(ha)) { + if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) + faddr = ha->flt_region_vpd_sec; + ql_dbg(ql_dbg_init, vha, 0x0110, + "Loading %s nvram image.\n", + active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? + "primary" : "secondary"); + } + qla24xx_read_flash_data(vha, ha->vpd, faddr, ha->vpd_size >> 2); /* Get NVRAM data into cache and calculate checksum. */ - ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2, - ha->nvram_size); + faddr = ha->flt_region_nvram; + if (IS_QLA28XX(ha)) { + if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) + faddr = ha->flt_region_nvram_sec; + } + ql_dbg(ql_dbg_init, vha, 0x0110, + "Loading %s nvram image.\n", + active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? + "primary" : "secondary"); + qla24xx_read_flash_data(vha, ha->nvram, faddr, ha->nvram_size >> 2); + dptr = (uint32_t *)nv; for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) chksum += le32_to_cpu(*dptr); @@ -8016,17 +8239,16 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x0111, "Contents of NVRAM:\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0112, - (uint8_t *)nv, ha->nvram_size); + nv, ha->nvram_size); /* Bad NVRAM data, set defaults parameters. */ - if (chksum || nv->id[0] != 'I' || nv->id[1] != 'S' || nv->id[2] != 'P' - || nv->id[3] != ' ' || - nv->nvram_version < cpu_to_le16(ICB_VERSION)) { + if (chksum || memcmp("ISP ", nv->id, sizeof(nv->id)) || + le16_to_cpu(nv->nvram_version) < ICB_VERSION) { /* Reset NVRAM data. */ ql_log(ql_log_info, vha, 0x0073, - "Inconsistent NVRAM detected: checksum=0x%x id=%c " - "version=0x%x.\n", chksum, nv->id[0], - le16_to_cpu(nv->nvram_version)); + "Inconsistent NVRAM checksum=%#x id=%.4s version=%#x.\n", + chksum, nv->id, le16_to_cpu(nv->nvram_version)); + ql_dump_buffer(ql_dbg_init, vha, 0x0073, nv, sizeof(*nv)); ql_log(ql_log_info, vha, 0x0074, "Falling back to functioning (yet invalid -- WWPN) " "defaults.\n"); @@ -8215,7 +8437,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) ha->login_retry_count = ql2xloginretrycount; /* if not running MSI-X we need handshaking on interrupts */ - if (!vha->hw->flags.msix_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha))) + if (!vha->hw->flags.msix_enabled && + (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) icb->firmware_options_2 |= cpu_to_le32(BIT_22); /* Enable ZIO. */ @@ -8248,12 +8471,6 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) /* N2N: driver will initiate Login instead of FW */ icb->firmware_options_3 |= BIT_8; - if (IS_QLA27XX(ha)) { - icb->firmware_options_3 |= BIT_8; - ql_dbg(ql_log_info, vha, 0x0075, - "Enabling direct connection.\n"); - } - if (rval) { ql_log(ql_log_warn, vha, 0x0076, "NVRAM configuration failed.\n"); @@ -8661,7 +8878,7 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, qpair->msix->in_use = 1; list_add_tail(&qpair->qp_list_elem, &vha->qp_list); qpair->pdev = ha->pdev; - if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) + if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) qpair->reqq_start_iocbs = qla_83xx_start_iocbs; mutex_unlock(&ha->mq_lock); diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index e1015d9482a5..7ff09b4b3098 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -467,7 +467,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) req->ring_ptr++; /* Set chip new ring index. */ - if (ha->mqenable || IS_QLA27XX(ha)) { + if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { WRT_REG_DWORD(req->req_q_in, req->ring_index); } else if (IS_QLA83XX(ha)) { WRT_REG_DWORD(req->req_q_in, req->ring_index); @@ -2330,7 +2330,8 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) if (req->cnt < req_cnt + 2) { if (qpair->use_shadow_reg) cnt = *req->out_ptr; - else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) + else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) cnt = RD_REG_DWORD(®->isp25mq.req_q_out); else if (IS_P3P_TYPE(ha)) cnt = RD_REG_DWORD(®->isp82.req_q_out); diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 3b5887832bc6..523598ebd937 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -708,12 +708,15 @@ skip_rio: break; case MBA_SYSTEM_ERR: /* System Error */ - mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) ? + mbx = (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) ? RD_REG_WORD(®24->mailbox7) : 0; ql_log(ql_log_warn, vha, 0x5003, "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); - + ha->fw_dump_mpi = + (IS_QLA27XX(ha) || IS_QLA28XX(ha)) && + RD_REG_WORD(®24->mailbox7) & BIT_8; ha->isp_ops->fw_dump(vha, 1); ha->flags.fw_init_done = 0; QLA_FW_STOPPED(ha); @@ -1372,7 +1375,7 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, le16_to_cpu(mbx->status_flags)); ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029, - (uint8_t *)mbx, sizeof(*mbx)); + mbx, sizeof(*mbx)); goto logio_done; } @@ -1516,7 +1519,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, bsg_reply->reply_payload_rcv_len = 0; } ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035, - (uint8_t *)pkt, sizeof(*pkt)); + pkt, sizeof(*pkt)); } else { res = DID_OK << 16; bsg_reply->reply_payload_rcv_len = @@ -1656,7 +1659,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), fw_status, sizeof(fw_status)); ql_dump_buffer(ql_dbg_user + ql_dbg_buffer, vha, 0x5056, - (uint8_t *)pkt, sizeof(*pkt)); + pkt, sizeof(*pkt)); } else { res = DID_OK << 16; @@ -1700,7 +1703,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, fcport->d_id.b.area, fcport->d_id.b.al_pa, logio->entry_status); ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, - (uint8_t *)logio, sizeof(*logio)); + logio, sizeof(*logio)); goto logio_done; } @@ -1846,8 +1849,8 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) } if (iocb->u.tmf.data != QLA_SUCCESS) - ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055, - (uint8_t *)sts, sizeof(*sts)); + ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055, + sts, sizeof(*sts)); sp->done(sp, 0); } @@ -3014,7 +3017,8 @@ process_err: qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); break; case ABTS_RECV_24XX: - if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) { /* ensure that the ATIO queue is empty */ qlt_handle_abts_recv(vha, rsp, (response_t *)pkt); @@ -3087,7 +3091,7 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha) struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && - !IS_QLA27XX(ha)) + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return; rval = QLA_SUCCESS; @@ -3431,7 +3435,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) min_vecs++; } - if (USER_CTRL_IRQ(ha) || !shost_use_blk_mq(vha->host)) { + if (USER_CTRL_IRQ(ha) || !ha->mqiobase) { /* user wants to control IRQ setting for target mode */ ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, ha->msix_count, PCI_IRQ_MSIX); @@ -3537,7 +3541,7 @@ msix_register_fail: } /* Enable MSI-X vector for response queue update for queue 0 */ - if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (ha->msixbase && ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || ql2xmqsupport)) @@ -3568,7 +3572,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) /* If possible, enable MSI-X. */ if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && - !IS_QLAFX00(ha) && !IS_QLA27XX(ha))) + !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))) goto skip_msi; if (ql2xenablemsix == 2) @@ -3607,7 +3611,7 @@ skip_msix: if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && - !IS_QLA27XX(ha)) + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) goto skip_msi; ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 5400696e1f6b..9f2fb1028f61 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -634,14 +634,15 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, mcp->out_mb |= MBX_4; } - mcp->in_mb = MBX_0; + mcp->in_mb = MBX_1|MBX_0; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1023, - "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, "Done %s.\n", __func__); @@ -656,7 +657,7 @@ static inline uint16_t qla25xx_set_sfp_lr_dist(struct qla_hw_data *ha) { uint16_t mb4 = BIT_0; - if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) mb4 |= ha->long_range_distance << LR_DIST_FW_POS; return mb4; @@ -666,7 +667,7 @@ static inline uint16_t qla25xx_set_nvr_lr_dist(struct qla_hw_data *ha) { uint16_t mb4 = BIT_0; - if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { struct nvram_81xx *nv = ha->nvram; mb4 |= LR_DIST_FW_FIELD(nv->enhanced_features); @@ -711,7 +712,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) mcp->mb[4] = 0; ha->flags.using_lr_setting = 0; if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || - IS_QLA27XX(ha)) { + IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (ql2xautodetectsfp) { if (ha->flags.detected_lr_sfp) { mcp->mb[4] |= @@ -730,19 +731,20 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) } } - if (ql2xnvmeenable && IS_QLA27XX(ha)) + if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha))) mcp->mb[4] |= NVME_ENABLE_FLAG; - if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { struct nvram_81xx *nv = ha->nvram; /* set minimum speed if specified in nvram */ - if (nv->min_link_speed >= 2 && - nv->min_link_speed <= 5) { + if (nv->min_supported_speed >= 2 && + nv->min_supported_speed <= 5) { mcp->mb[4] |= BIT_4; - mcp->mb[11] = nv->min_link_speed; + mcp->mb[11] |= nv->min_supported_speed & 0xF; mcp->out_mb |= MBX_11; mcp->in_mb |= BIT_5; - vha->min_link_speed_feat = nv->min_link_speed; + vha->min_supported_speed = + nv->min_supported_speed; } } @@ -770,34 +772,39 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1026, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); - } else { - if (IS_FWI2_CAPABLE(ha)) { - ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2]; - ql_dbg(ql_dbg_mbx, vha, 0x119a, - "fw_ability_mask=%x.\n", ha->fw_ability_mask); - ql_dbg(ql_dbg_mbx, vha, 0x1027, - "exchanges=%x.\n", mcp->mb[1]); - if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { - ha->max_speed_sup = mcp->mb[2] & BIT_0; - ql_dbg(ql_dbg_mbx, vha, 0x119b, - "Maximum speed supported=%s.\n", - ha->max_speed_sup ? "32Gps" : "16Gps"); - if (vha->min_link_speed_feat) { - ha->min_link_speed = mcp->mb[5]; - ql_dbg(ql_dbg_mbx, vha, 0x119c, - "Minimum speed set=%s.\n", - mcp->mb[5] == 5 ? "32Gps" : - mcp->mb[5] == 4 ? "16Gps" : - mcp->mb[5] == 3 ? "8Gps" : - mcp->mb[5] == 2 ? "4Gps" : - "unknown"); - } - } + return rval; + } + + if (!IS_FWI2_CAPABLE(ha)) + goto done; + + ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2]; + ql_dbg(ql_dbg_mbx, vha, 0x119a, + "fw_ability_mask=%x.\n", ha->fw_ability_mask); + ql_dbg(ql_dbg_mbx, vha, 0x1027, "exchanges=%x.\n", mcp->mb[1]); + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1); + ql_dbg(ql_dbg_mbx, vha, 0x119b, "max_supported_speed=%s.\n", + ha->max_supported_speed == 0 ? "16Gps" : + ha->max_supported_speed == 1 ? "32Gps" : + ha->max_supported_speed == 2 ? "64Gps" : "unknown"); + if (vha->min_supported_speed) { + ha->min_supported_speed = mcp->mb[5] & + (BIT_0 | BIT_1 | BIT_2); + ql_dbg(ql_dbg_mbx, vha, 0x119c, + "min_supported_speed=%s.\n", + ha->min_supported_speed == 6 ? "64Gps" : + ha->min_supported_speed == 5 ? "32Gps" : + ha->min_supported_speed == 4 ? "16Gps" : + ha->min_supported_speed == 3 ? "8Gps" : + ha->min_supported_speed == 2 ? "4Gps" : "unknown"); } - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, - "Done.\n"); } +done: + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, + "Done %s.\n", __func__); + return rval; } @@ -1053,10 +1060,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8; if (IS_FWI2_CAPABLE(ha)) mcp->in_mb |= MBX_17|MBX_16|MBX_15; - if (IS_QLA27XX(ha)) + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) mcp->in_mb |= MBX_25|MBX_24|MBX_23|MBX_22|MBX_21|MBX_20|MBX_19|MBX_18| - MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8; + MBX_14|MBX_13|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7; mcp->flags = 0; mcp->tov = MBX_TOV_SECONDS; @@ -1122,7 +1129,10 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) } } - if (IS_QLA27XX(ha)) { + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + ha->serdes_version[0] = mcp->mb[7] & 0xff; + ha->serdes_version[1] = mcp->mb[8] >> 8; + ha->serdes_version[2] = mcp->mb[8] & 0xff; ha->mpi_version[0] = mcp->mb[10] & 0xff; ha->mpi_version[1] = mcp->mb[11] >> 8; ha->mpi_version[2] = mcp->mb[11] & 0xff; @@ -1133,6 +1143,13 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20]; ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22]; ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24]; + if (IS_QLA28XX(ha)) { + if (mcp->mb[16] & BIT_10) { + ql_log(ql_log_info, vha, 0xffff, + "FW support secure flash updates\n"); + ha->flags.secure_fw = 1; + } + } } failed: @@ -1638,7 +1655,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; if (IS_FWI2_CAPABLE(vha->hw)) mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; - if (IS_QLA27XX(vha->hw)) + if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) mcp->in_mb |= MBX_15; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; @@ -1692,7 +1709,7 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, } } - if (IS_QLA27XX(vha->hw)) + if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) vha->bbcr = mcp->mb[15]; } @@ -1808,7 +1825,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) } /* 1 and 2 should normally be captured. */ mcp->in_mb = MBX_2|MBX_1|MBX_0; - if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) /* mb3 is additional info about the installed SFP. */ mcp->in_mb |= MBX_3; mcp->buf_size = size; @@ -1819,10 +1836,20 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x104d, - "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x,.\n", + "Failed=%x mb[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); + if (ha->init_cb) { + ql_dbg(ql_dbg_mbx, vha, 0x104d, "init_cb:\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, + 0x0104d, ha->init_cb, sizeof(*ha->init_cb)); + } + if (ha->ex_init_cb && ha->ex_init_cb->ex_version) { + ql_dbg(ql_dbg_mbx, vha, 0x104d, "ex_init_cb:\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, + 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb)); + } } else { - if (IS_QLA27XX(ha)) { + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (mcp->mb[2] == 6 || mcp->mb[3] == 2) ql_dbg(ql_dbg_mbx, vha, 0x119d, "Invalid SFP/Validation Failed\n"); @@ -2076,7 +2103,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); } else { - if (IS_QLA27XX(ha)) { + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (mcp->mb[2] == 6 || mcp->mb[3] == 2) ql_dbg(ql_dbg_mbx, vha, 0x119e, "Invalid SFP/Validation Failed\n"); @@ -2859,7 +2886,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha) mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; mcp->out_mb = MBX_0; mcp->in_mb = MBX_11|MBX_10|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; - if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) || IS_QLA27XX(vha->hw)) + if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || + IS_QLA27XX(ha) || IS_QLA28XX(ha)) mcp->in_mb |= MBX_12; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; @@ -2884,7 +2912,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha) ha->orig_fw_iocb_count = mcp->mb[10]; if (ha->flags.npiv_supported) ha->max_npiv_vports = mcp->mb[11]; - if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) + if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) ha->fw_max_fcf_count = mcp->mb[12]; } @@ -3323,7 +3352,7 @@ qla2x00_write_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t data) mbx_cmd_t *mcp = &mc; if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && - !IS_QLA27XX(vha->hw)) + !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1182, @@ -3362,7 +3391,7 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data) mbx_cmd_t *mcp = &mc; if (!IS_QLA25XX(vha->hw) && !IS_QLA2031(vha->hw) && - !IS_QLA27XX(vha->hw)) + !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1185, @@ -3631,7 +3660,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma, "Entered %s.\n", __func__); if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && - !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw)) + !IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && + !IS_QLA28XX(vha->hw)) return QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(vha->hw->pdev))) @@ -3744,7 +3774,7 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, rval = qla2x00_mailbox_command(vha, mcp); /* Return mailbox statuses. */ - if (mb != NULL) { + if (mb) { mb[0] = mcp->mb[0]; mb[1] = mcp->mb[1]; mb[3] = mcp->mb[3]; @@ -3779,7 +3809,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, mcp->mb[0] = MBC_PORT_PARAMS; mcp->mb[1] = loop_id; mcp->mb[2] = BIT_0; - mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0); + mcp->mb[3] = port_speed & 0x3F; mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_1|MBX_0; @@ -3788,7 +3818,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id, rval = qla2x00_mailbox_command(vha, mcp); /* Return mailbox statuses. */ - if (mb != NULL) { + if (mb) { mb[0] = mcp->mb[0]; mb[1] = mcp->mb[1]; mb[3] = mcp->mb[3]; @@ -4230,7 +4260,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111c, "Dump of Verify Request.\n"); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111e, - (uint8_t *)mn, sizeof(*mn)); + mn, sizeof(*mn)); rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120); if (rval != QLA_SUCCESS) { @@ -4242,7 +4272,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status) ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1110, "Dump of Verify Response.\n"); ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1118, - (uint8_t *)mn, sizeof(*mn)); + mn, sizeof(*mn)); status[0] = le16_to_cpu(mn->p.rsp.comp_status); status[1] = status[0] == CS_VCS_CHIP_FAILURE ? @@ -4318,7 +4348,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) mcp->mb[12] = req->qos; mcp->mb[11] = req->vp_idx; mcp->mb[13] = req->rid; - if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) mcp->mb[15] = 0; mcp->mb[4] = req->id; @@ -4332,9 +4362,10 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) mcp->flags = MBX_DMA_OUT; mcp->tov = MBX_TOV_SECONDS * 2; - if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) + if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) mcp->in_mb |= MBX_1; - if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { mcp->out_mb |= MBX_15; /* debug q create issue in SR-IOV */ mcp->in_mb |= MBX_9 | MBX_8 | MBX_7; @@ -4343,7 +4374,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) spin_lock_irqsave(&ha->hardware_lock, flags); if (!(req->options & BIT_0)) { WRT_REG_DWORD(req->req_q_in, 0); - if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) WRT_REG_DWORD(req->req_q_out, 0); } spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -4387,7 +4418,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) mcp->mb[5] = rsp->length; mcp->mb[14] = rsp->msix->entry; mcp->mb[13] = rsp->rid; - if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) mcp->mb[15] = 0; mcp->mb[4] = rsp->id; @@ -4404,7 +4435,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) if (IS_QLA81XX(ha)) { mcp->out_mb |= MBX_12|MBX_11|MBX_10; mcp->in_mb |= MBX_1; - } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { mcp->out_mb |= MBX_15|MBX_12|MBX_11|MBX_10; mcp->in_mb |= MBX_1; /* debug q create issue in SR-IOV */ @@ -4414,7 +4445,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) spin_lock_irqsave(&ha->hardware_lock, flags); if (!(rsp->options & BIT_0)) { WRT_REG_DWORD(rsp->rsp_q_out, 0); - if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) WRT_REG_DWORD(rsp->rsp_q_in, 0); } @@ -4472,7 +4503,7 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size) "Entered %s.\n", __func__); if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && - !IS_QLA27XX(vha->hw)) + !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; @@ -4504,7 +4535,7 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable) mbx_cmd_t *mcp = &mc; if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && - !IS_QLA27XX(vha->hw)) + !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df, @@ -4539,7 +4570,7 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) mbx_cmd_t *mcp = &mc; if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw) && - !IS_QLA27XX(vha->hw)) + !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, @@ -4570,6 +4601,42 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish) } int +qla81xx_fac_semaphore_access(scsi_qla_host_t *vha, int lock) +{ + int rval = QLA_SUCCESS; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct qla_hw_data *ha = vha->hw; + + if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return rval; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; + mcp->mb[1] = (lock ? FAC_OPT_CMD_LOCK_SEMAPHORE : + FAC_OPT_CMD_UNLOCK_SEMAPHORE); + mcp->out_mb = MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10e3, + "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", + rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4, + "Done %s.\n", __func__); + } + + return rval; +} + +int qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha) { int rval = 0; @@ -4818,10 +4885,10 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp, if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x10e9, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); - if (mcp->mb[0] == MBS_COMMAND_ERROR && - mcp->mb[1] == 0x22) + if (mcp->mb[0] == MBS_COMMAND_ERROR && mcp->mb[1] == 0x22) { /* sfp is not there */ rval = QLA_INTERFACE_ERROR; + } } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, "Done %s.\n", __func__); @@ -5161,13 +5228,14 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) mcp->mb[3] = MSW(data); mcp->mb[8] = MSW(risc_addr); mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; - mcp->in_mb = MBX_0; + mcp->in_mb = MBX_1|MBX_0; mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1101, - "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + "Failed=%x mb[0]=%x mb[1]=%x.\n", + rval, mcp->mb[0], mcp->mb[1]); } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, "Done %s.\n", __func__); @@ -5278,7 +5346,7 @@ qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode) mcp->out_mb = MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; - if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) mcp->in_mb |= MBX_4|MBX_3; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; @@ -5316,7 +5384,7 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha) mcp->mb[1] = QLA_GET_DATA_RATE; mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_2|MBX_1|MBX_0; - if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) mcp->in_mb |= MBX_3; mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; @@ -5346,7 +5414,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb) "Entered %s.\n", __func__); if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) && - !IS_QLA27XX(ha)) + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return QLA_FUNCTION_FAILED; mcp->mb[0] = MBC_GET_PORT_CONFIG; mcp->out_mb = MBX_0; @@ -5842,7 +5910,7 @@ qla83xx_wr_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130, @@ -5917,7 +5985,7 @@ qla83xx_rd_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t *data) struct qla_hw_data *ha = vha->hw; unsigned long retry_max_time = jiffies + (2 * HZ); - if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx, vha, 0x114b, "Entered %s.\n", __func__); @@ -5967,7 +6035,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) mbx_cmd_t *mcp = &mc; struct qla_hw_data *ha = vha->hw; - if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) + if (!IS_QLA83XX(ha)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); @@ -6101,7 +6169,8 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha, mbx_cmd_t *mcp = &mc; dma_addr_t dd_dma; - if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw)) + if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && + !IS_QLA28XX(vha->hw)) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, @@ -6507,3 +6576,101 @@ int qla24xx_res_count_wait(struct scsi_qla_host *vha, done: return rval; } + +int qla28xx_secure_flash_update(scsi_qla_host_t *vha, uint16_t opts, + uint16_t region, uint32_t len, dma_addr_t sfub_dma_addr, + uint32_t sfub_len) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + mcp->mb[0] = MBC_SECURE_FLASH_UPDATE; + mcp->mb[1] = opts; + mcp->mb[2] = region; + mcp->mb[3] = MSW(len); + mcp->mb[4] = LSW(len); + mcp->mb[5] = MSW(sfub_dma_addr); + mcp->mb[6] = LSW(sfub_dma_addr); + mcp->mb[7] = MSW(MSD(sfub_dma_addr)); + mcp->mb[8] = LSW(MSD(sfub_dma_addr)); + mcp->mb[9] = sfub_len; + mcp->out_mb = + MBX_9|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s(%ld): failed rval 0x%x, %x %x %x", + __func__, vha->host_no, rval, mcp->mb[0], mcp->mb[1], + mcp->mb[2]); + } + + return rval; +} + +int qla2xxx_write_remote_register(scsi_qla_host_t *vha, uint32_t addr, + uint32_t data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_WRITE_REMOTE_REG; + mcp->mb[1] = LSW(addr); + mcp->mb[2] = MSW(addr); + mcp->mb[3] = LSW(data); + mcp->mb[4] = MSW(data); + mcp->out_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10e9, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, + "Done %s.\n", __func__); + } + + return rval; +} + +int qla2xxx_read_remote_register(scsi_qla_host_t *vha, uint32_t addr, + uint32_t *data) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8, + "Entered %s.\n", __func__); + + mcp->mb[0] = MBC_READ_REMOTE_REG; + mcp->mb[1] = LSW(addr); + mcp->mb[2] = MSW(addr); + mcp->out_mb = MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + *data = (uint32_t)((((uint32_t)mcp->mb[4]) << 16) | mcp->mb[3]); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x10e9, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea, + "Done %s.\n", __func__); + } + + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 60f964c53c01..8abd42795d28 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -629,17 +629,20 @@ qlafx00_soc_cpu_reset(scsi_qla_host_t *vha) * * Returns 0 on success. */ -void +int qlafx00_soft_reset(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; + int rval = QLA_FUNCTION_FAILED; if (unlikely(pci_channel_offline(ha->pdev) && ha->flags.pci_channel_io_perm_failure)) - return; + return rval; ha->isp_ops->disable_intrs(ha); qlafx00_soc_cpu_reset(vha); + + return QLA_SUCCESS; } /** @@ -1138,8 +1141,8 @@ qlafx00_find_all_targets(scsi_qla_host_t *vha, ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088, "Listing Target bit map...\n"); - ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha, - 0x2089, (uint8_t *)ha->gid_list, 32); + ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha, 0x2089, + ha->gid_list, 32); /* Allocate temporary rmtport for any new rmtports discovered. */ new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); @@ -1913,8 +1916,7 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) phost_info->domainname, phost_info->hostdriver); ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d, - (uint8_t *)phost_info, - sizeof(struct host_system_info)); + phost_info, sizeof(*phost_info)); } } @@ -1968,7 +1970,7 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) vha->d_id.b.al_pa = pinfo->port_id[2]; qlafx00_update_host_attr(vha, pinfo); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141, - (uint8_t *)pinfo, 16); + pinfo, 16); } else if (fx_type == FXDISC_GET_TGT_NODE_INFO) { struct qlafx00_tgt_node_info *pinfo = (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr; @@ -1976,12 +1978,12 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE); fcport->port_type = FCT_TARGET; ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144, - (uint8_t *)pinfo, 16); + pinfo, 16); } else if (fx_type == FXDISC_GET_TGT_NODE_LIST) { struct qlafx00_tgt_node_info *pinfo = (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr; ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146, - (uint8_t *)pinfo, 16); + pinfo, 16); memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE); } else if (fx_type == FXDISC_ABORT_IOCTL) fdisc->u.fxiocb.result = @@ -2248,18 +2250,16 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req, fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply); - memcpy(fw_sts_ptr, (uint8_t *)&fstatus, - sizeof(struct qla_mt_iocb_rsp_fx00)); + memcpy(fw_sts_ptr, &fstatus, sizeof(fstatus)); bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t); ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, - sp->fcport->vha, 0x5080, - (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00)); + sp->vha, 0x5080, pkt, sizeof(*pkt)); ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, - sp->fcport->vha, 0x5074, - (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00)); + sp->vha, 0x5074, + fw_sts_ptr, sizeof(fstatus)); res = bsg_reply->result = DID_OK << 16; bsg_reply->reply_payload_rcv_len = @@ -2597,7 +2597,7 @@ qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) /* Move sense data. */ ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e, - (uint8_t *)pkt, sizeof(sts_cont_entry_t)); + pkt, sizeof(*pkt)); memcpy(sense_ptr, pkt->data, sense_sz); ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a, sense_ptr, sense_sz); @@ -3056,13 +3056,13 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt, if (avail_dsds == 0 && cont == 1) { cont = 0; memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, - REQUEST_ENTRY_SIZE); + sizeof(lcont_pkt)); } } if (avail_dsds != 0 && cont == 1) { memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt, - REQUEST_ENTRY_SIZE); + sizeof(lcont_pkt)); } } @@ -3172,9 +3172,9 @@ qlafx00_start_scsi(srb_t *sp) lcmd_pkt.entry_status = (uint8_t) rsp->id; ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e, - (uint8_t *)cmd->cmnd, cmd->cmd_len); + cmd->cmnd, cmd->cmd_len); ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032, - (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE); + &lcmd_pkt, sizeof(lcmd_pkt)); memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE); wmb(); @@ -3454,10 +3454,8 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) } ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, - sp->vha, 0x3047, - (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00)); + sp->vha, 0x3047, &fx_iocb, sizeof(fx_iocb)); - memcpy_toio((void __iomem *)pfxiocb, &fx_iocb, - sizeof(struct fxdisc_entry_fx00)); + memcpy_toio((void __iomem *)pfxiocb, &fx_iocb, sizeof(fx_iocb)); wmb(); } diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 4d0aaae12253..77136571dcc9 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -1757,11 +1757,13 @@ qla82xx_pci_config(scsi_qla_host_t *vha) * * Returns 0 on success. */ -void +int qla82xx_reset_chip(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; ha->isp_ops->disable_intrs(ha); + + return QLA_SUCCESS; } void qla82xx_config_rings(struct scsi_qla_host *vha) @@ -2658,8 +2660,8 @@ done: /* * Address and length are byte address */ -uint8_t * -qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, +void * +qla82xx_read_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { scsi_block_requests(vha->host); @@ -2767,15 +2769,14 @@ write_done: } int -qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, +qla82xx_write_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { int rval; /* Suspend HBA. */ scsi_block_requests(vha->host); - rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset, - length >> 2); + rval = qla82xx_write_flash_data(vha, buf, offset, length >> 2); scsi_unblock_requests(vha->host); /* Convert return ISP82xx to generic */ diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c index fe856b602e03..691f81d7b1a3 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.c +++ b/drivers/scsi/qla2xxx/qla_nx2.c @@ -559,12 +559,12 @@ exit_lock_error: /* * Address and length are byte address */ -uint8_t * -qla8044_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, +void * +qla8044_read_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { scsi_block_requests(vha->host); - if (qla8044_read_flash_data(vha, (uint8_t *)buf, offset, length / 4) + if (qla8044_read_flash_data(vha, buf, offset, length / 4) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xb08d, "%s: Failed to read from flash\n", @@ -3797,7 +3797,7 @@ qla8044_write_flash_dword_mode(scsi_qla_host_t *vha, uint32_t *dwptr, } int -qla8044_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, +qla8044_write_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { int rval = QLA_FUNCTION_FAILED, i, burst_iter_count; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 03029d167eb7..1c6189bc8d2c 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -42,7 +42,7 @@ static struct kmem_cache *ctx_cachep; /* * error level for logging */ -int ql_errlev = ql_log_all; +uint ql_errlev = 0x8001; static int ql2xenableclass2; module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR); @@ -426,7 +426,7 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req, qla_cpu_update(rsp->qpair, raw_smp_processor_id()); ha->base_qpair->pdev = ha->pdev; - if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) + if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) ha->base_qpair->reqq_start_iocbs = qla_83xx_start_iocbs; } @@ -2748,6 +2748,24 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) ha->device_type |= DT_T10_PI; ha->fw_srisc_address = RISC_START_ADDRESS_2400; break; + case PCI_DEVICE_ID_QLOGIC_ISP2081: + case PCI_DEVICE_ID_QLOGIC_ISP2089: + ha->isp_type |= DT_ISP2081; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->device_type |= DT_IIDMA; + ha->device_type |= DT_T10_PI; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; + case PCI_DEVICE_ID_QLOGIC_ISP2281: + case PCI_DEVICE_ID_QLOGIC_ISP2289: + ha->isp_type |= DT_ISP2281; + ha->device_type |= DT_ZIO_SUPPORTED; + ha->device_type |= DT_FWI2; + ha->device_type |= DT_IIDMA; + ha->device_type |= DT_T10_PI; + ha->fw_srisc_address = RISC_START_ADDRESS_2400; + break; } if (IS_QLA82XX(ha)) @@ -2755,7 +2773,8 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) else { /* Get adapter physical port no from interrupt pin register. */ pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no); - if (IS_QLA27XX(ha)) + if (IS_QLA25XX(ha) || IS_QLA2031(ha) || + IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->port_no--; else ha->port_no = !(ha->port_no & 1); @@ -2852,7 +2871,11 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271 || - pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261) { + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2261 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) { bars = pci_select_bars(pdev, IORESOURCE_MEM); mem_only = 1; ql_dbg_pci(ql_dbg_init, pdev, 0x0007, @@ -2901,7 +2924,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) /* Set EEH reset type to fundamental if required by hba */ if (IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha) || - IS_QLA83XX(ha) || IS_QLA27XX(ha)) + IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) pdev->needs_freset = 1; ha->prev_topology = 0; @@ -3080,6 +3103,23 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX; ha->nvram_conf_off = ~0; ha->nvram_data_off = ~0; + } else if (IS_QLA28XX(ha)) { + ha->portnum = PCI_FUNC(ha->pdev->devfn); + ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; + ha->mbx_count = MAILBOX_REGISTER_COUNT; + req_length = REQUEST_ENTRY_CNT_24XX; + rsp_length = RESPONSE_ENTRY_CNT_2300; + ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; + ha->max_loop_id = SNS_LAST_LOOP_ID_2300; + ha->init_cb_size = sizeof(struct mid_init_cb_81xx); + ha->gid_list_info_size = 8; + ha->optrom_size = OPTROM_SIZE_28XX; + ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX; + ha->isp_ops = &qla27xx_isp_ops; + ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_28XX; + ha->flash_data_off = FARX_ACCESS_FLASH_DATA_28XX; + ha->nvram_conf_off = ~0; + ha->nvram_data_off = ~0; } ql_dbg_pci(ql_dbg_init, pdev, 0x001e, @@ -3245,7 +3285,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) req->req_q_out = &ha->iobase->isp24.req_q_out; rsp->rsp_q_in = &ha->iobase->isp24.rsp_q_in; rsp->rsp_q_out = &ha->iobase->isp24.rsp_q_out; - if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) { req->req_q_in = &ha->mqiobase->isp25mq.req_q_in; req->req_q_out = &ha->mqiobase->isp25mq.req_q_out; rsp->rsp_q_in = &ha->mqiobase->isp25mq.rsp_q_in; @@ -3579,7 +3620,8 @@ qla2x00_shutdown(struct pci_dev *pdev) if (ha->eft) qla2x00_disable_eft_trace(vha); - if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) { + if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) { if (ha->flags.fw_started) qla2x00_abort_isp_cleanup(vha); } else { @@ -3684,7 +3726,8 @@ qla2x00_unmap_iobases(struct qla_hw_data *ha) if (ha->mqiobase) iounmap(ha->mqiobase); - if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) && ha->msixbase) + if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) && + ha->msixbase) iounmap(ha->msixbase); } } @@ -3735,9 +3778,8 @@ qla2x00_remove_one(struct pci_dev *pdev) } qla2x00_wait_for_hba_ready(base_vha); - qla2x00_wait_for_sess_deletion(base_vha); - - if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) { + if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) { if (ha->flags.fw_started) qla2x00_abort_isp_cleanup(base_vha); } else if (!IS_QLAFX00(ha)) { @@ -3751,6 +3793,8 @@ qla2x00_remove_one(struct pci_dev *pdev) qla2x00_try_to_stop_firmware(base_vha); } + + qla2x00_wait_for_sess_deletion(base_vha); /* * if UNLOAD flag is already set, then continue unload, @@ -4225,7 +4269,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, ha->npiv_info = NULL; /* Get consistent memory allocated for EX-INIT-CB. */ - if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) { + if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) { ha->ex_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &ha->ex_init_cb_dma); if (!ha->ex_init_cb) @@ -4267,8 +4312,20 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, goto fail_sfp_data; } + ha->flt = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, &ha->flt_dma, + GFP_KERNEL); + if (!ha->flt) { + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, + "Unable to allocate memory for FLT.\n"); + goto fail_flt_buffer; + } + return 0; +fail_flt_buffer: + dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, + ha->sfp_data, ha->sfp_data_dma); fail_sfp_data: kfree(ha->loop_id_map); fail_loop_id_map: @@ -4604,6 +4661,9 @@ qla2x00_free_exchoffld_buffer(struct qla_hw_data *ha) static void qla2x00_free_fw_dump(struct qla_hw_data *ha) { + struct fwdt *fwdt = ha->fwdt; + uint j; + if (ha->fce) dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce, ha->fce_dma); @@ -4614,8 +4674,6 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha) if (ha->fw_dump) vfree(ha->fw_dump); - if (ha->fw_dump_template) - vfree(ha->fw_dump_template); ha->fce = NULL; ha->fce_dma = 0; @@ -4626,8 +4684,13 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha) ha->fw_dump_reading = 0; ha->fw_dump = NULL; ha->fw_dump_len = 0; - ha->fw_dump_template = NULL; - ha->fw_dump_template_len = 0; + + for (j = 0; j < 2; j++, fwdt++) { + if (fwdt->template) + vfree(fwdt->template); + fwdt->template = NULL; + fwdt->length = 0; + } } /* @@ -4668,6 +4731,10 @@ qla2x00_mem_free(struct qla_hw_data *ha) dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data, ha->sfp_data_dma); + if (ha->flt) + dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, + ha->flt, ha->flt_dma); + if (ha->ms_iocb) dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); @@ -6690,7 +6757,7 @@ qla2x00_timer(scsi_qla_host_t *vha) if (!vha->vp_idx && (atomic_read(&ha->zio_threshold) != ha->last_zio_threshold) && (ha->zio_mode == QLA_ZIO_MODE_6) && - (IS_QLA83XX(ha) || IS_QLA27XX(ha))) { + (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) { ql_log(ql_log_info, vha, 0x3002, "Sched: Set ZIO exchange threshold to %d.\n", ha->last_zio_threshold); @@ -6736,7 +6803,6 @@ qla2x00_timer(scsi_qla_host_t *vha) /* Firmware interface routines. */ -#define FW_BLOBS 11 #define FW_ISP21XX 0 #define FW_ISP22XX 1 #define FW_ISP2300 2 @@ -6748,6 +6814,7 @@ qla2x00_timer(scsi_qla_host_t *vha) #define FW_ISP2031 8 #define FW_ISP8031 9 #define FW_ISP27XX 10 +#define FW_ISP28XX 11 #define FW_FILE_ISP21XX "ql2100_fw.bin" #define FW_FILE_ISP22XX "ql2200_fw.bin" @@ -6760,11 +6827,12 @@ qla2x00_timer(scsi_qla_host_t *vha) #define FW_FILE_ISP2031 "ql2600_fw.bin" #define FW_FILE_ISP8031 "ql8300_fw.bin" #define FW_FILE_ISP27XX "ql2700_fw.bin" +#define FW_FILE_ISP28XX "ql2800_fw.bin" static DEFINE_MUTEX(qla_fw_lock); -static struct fw_blob qla_fw_blobs[FW_BLOBS] = { +static struct fw_blob qla_fw_blobs[] = { { .name = FW_FILE_ISP21XX, .segs = { 0x1000, 0 }, }, { .name = FW_FILE_ISP22XX, .segs = { 0x1000, 0 }, }, { .name = FW_FILE_ISP2300, .segs = { 0x800, 0 }, }, @@ -6776,6 +6844,8 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = { { .name = FW_FILE_ISP2031, }, { .name = FW_FILE_ISP8031, }, { .name = FW_FILE_ISP27XX, }, + { .name = FW_FILE_ISP28XX, }, + { .name = NULL, }, }; struct fw_blob * @@ -6806,10 +6876,15 @@ qla2x00_request_firmware(scsi_qla_host_t *vha) blob = &qla_fw_blobs[FW_ISP8031]; } else if (IS_QLA27XX(ha)) { blob = &qla_fw_blobs[FW_ISP27XX]; + } else if (IS_QLA28XX(ha)) { + blob = &qla_fw_blobs[FW_ISP28XX]; } else { return NULL; } + if (!blob->name) + return NULL; + mutex_lock(&qla_fw_lock); if (blob->fw) goto out; @@ -6819,7 +6894,6 @@ qla2x00_request_firmware(scsi_qla_host_t *vha) "Failed to load firmware image (%s).\n", blob->name); blob->fw = NULL; blob = NULL; - goto out; } out: @@ -6830,11 +6904,11 @@ out: static void qla2x00_release_firmware(void) { - int idx; + struct fw_blob *blob; mutex_lock(&qla_fw_lock); - for (idx = 0; idx < FW_BLOBS; idx++) - release_firmware(qla_fw_blobs[idx].fw); + for (blob = qla_fw_blobs; blob->name; blob++) + release_firmware(blob->fw); mutex_unlock(&qla_fw_lock); } @@ -7182,7 +7256,7 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost) int rc; scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata; - if (USER_CTRL_IRQ(vha->hw)) + if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase) rc = blk_mq_map_queues(&shost->tag_set); else rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev, 0); @@ -7221,6 +7295,11 @@ static struct pci_device_id qla2xxx_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2261) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2061) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2081) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) }, { 0 }, }; MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 2a3055c799fb..320c25f3a79a 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -429,66 +429,64 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat) static inline uint32_t flash_conf_addr(struct qla_hw_data *ha, uint32_t faddr) { - return ha->flash_conf_off | faddr; + return ha->flash_conf_off + faddr; } static inline uint32_t flash_data_addr(struct qla_hw_data *ha, uint32_t faddr) { - return ha->flash_data_off | faddr; + return ha->flash_data_off + faddr; } static inline uint32_t nvram_conf_addr(struct qla_hw_data *ha, uint32_t naddr) { - return ha->nvram_conf_off | naddr; + return ha->nvram_conf_off + naddr; } static inline uint32_t nvram_data_addr(struct qla_hw_data *ha, uint32_t naddr) { - return ha->nvram_data_off | naddr; + return ha->nvram_data_off + naddr; } -static uint32_t -qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr) +static int +qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t *data) { - int rval; - uint32_t cnt, data; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + ulong cnt = 30000; WRT_REG_DWORD(®->flash_addr, addr & ~FARX_DATA_FLAG); - /* Wait for READ cycle to complete. */ - rval = QLA_SUCCESS; - for (cnt = 3000; - (RD_REG_DWORD(®->flash_addr) & FARX_DATA_FLAG) == 0 && - rval == QLA_SUCCESS; cnt--) { - if (cnt) - udelay(10); - else - rval = QLA_FUNCTION_TIMEOUT; + + while (cnt--) { + if (RD_REG_DWORD(®->flash_addr) & FARX_DATA_FLAG) { + *data = RD_REG_DWORD(®->flash_data); + return QLA_SUCCESS; + } + udelay(10); cond_resched(); } - /* TODO: What happens if we time out? */ - data = 0xDEADDEAD; - if (rval == QLA_SUCCESS) - data = RD_REG_DWORD(®->flash_data); - - return data; + ql_log(ql_log_warn, pci_get_drvdata(ha->pdev), 0x7090, + "Flash read dword at %x timeout.\n", addr); + *data = 0xDEADDEAD; + return QLA_FUNCTION_TIMEOUT; } uint32_t * qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, uint32_t dwords) { - uint32_t i; + ulong i; struct qla_hw_data *ha = vha->hw; /* Dword reads to flash. */ - for (i = 0; i < dwords; i++, faddr++) - dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, - flash_data_addr(ha, faddr))); + faddr = flash_data_addr(ha, faddr); + for (i = 0; i < dwords; i++, faddr++, dwptr++) { + if (qla24xx_read_flash_dword(ha, faddr, dwptr)) + break; + cpu_to_le32s(dwptr); + } return dwptr; } @@ -496,35 +494,37 @@ qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, static int qla24xx_write_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t data) { - int rval; - uint32_t cnt; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + ulong cnt = 500000; WRT_REG_DWORD(®->flash_data, data); - RD_REG_DWORD(®->flash_data); /* PCI Posting. */ WRT_REG_DWORD(®->flash_addr, addr | FARX_DATA_FLAG); - /* Wait for Write cycle to complete. */ - rval = QLA_SUCCESS; - for (cnt = 500000; (RD_REG_DWORD(®->flash_addr) & FARX_DATA_FLAG) && - rval == QLA_SUCCESS; cnt--) { - if (cnt) - udelay(10); - else - rval = QLA_FUNCTION_TIMEOUT; + + while (cnt--) { + if (!(RD_REG_DWORD(®->flash_addr) & FARX_DATA_FLAG)) + return QLA_SUCCESS; + udelay(10); cond_resched(); } - return rval; + + ql_log(ql_log_warn, pci_get_drvdata(ha->pdev), 0x7090, + "Flash write dword at %x timeout.\n", addr); + return QLA_FUNCTION_TIMEOUT; } static void qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id, uint8_t *flash_id) { - uint32_t ids; + uint32_t faddr, ids = 0; - ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x03ab)); - *man_id = LSB(ids); - *flash_id = MSB(ids); + *man_id = *flash_id = 0; + + faddr = flash_conf_addr(ha, 0x03ab); + if (!qla24xx_read_flash_dword(ha, faddr, &ids)) { + *man_id = LSB(ids); + *flash_id = MSB(ids); + } /* Check if man_id and flash_id are valid. */ if (ids != 0xDEADDEAD && (*man_id == 0 || *flash_id == 0)) { @@ -534,9 +534,11 @@ qla24xx_get_flash_manufacturer(struct qla_hw_data *ha, uint8_t *man_id, * Example: ATMEL 0x00 01 45 1F * Extract MFG and Dev ID from last two bytes. */ - ids = qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x009f)); - *man_id = LSB(ids); - *flash_id = MSB(ids); + faddr = flash_conf_addr(ha, 0x009f); + if (!qla24xx_read_flash_dword(ha, faddr, &ids)) { + *man_id = LSB(ids); + *flash_id = MSB(ids); + } } } @@ -545,12 +547,12 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) { const char *loc, *locations[] = { "DEF", "PCI" }; uint32_t pcihdr, pcids; - uint32_t *dcode; - uint8_t *buf, *bcode, last_image; uint16_t cnt, chksum, *wptr; - struct qla_flt_location *fltl; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; + struct qla_flt_location *fltl = (void *)req->ring; + uint32_t *dcode = (void *)req->ring; + uint8_t *buf = (void *)req->ring, *bcode, last_image; /* * FLT-location structure resides after the last PCI region. @@ -571,12 +573,13 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { *start = FA_FLASH_LAYOUT_ADDR_83; goto end; + } else if (IS_QLA28XX(ha)) { + *start = FA_FLASH_LAYOUT_ADDR_28; + goto end; } + /* Begin with first PCI expansion ROM header. */ - buf = (uint8_t *)req->ring; - dcode = (uint32_t *)req->ring; pcihdr = 0; - last_image = 1; do { /* Verify PCI expansion ROM header. */ qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20); @@ -601,22 +604,19 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) } while (!last_image); /* Now verify FLT-location structure. */ - fltl = (struct qla_flt_location *)req->ring; - qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, - sizeof(struct qla_flt_location) >> 2); - if (fltl->sig[0] != 'Q' || fltl->sig[1] != 'F' || - fltl->sig[2] != 'L' || fltl->sig[3] != 'T') + qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, sizeof(*fltl) >> 2); + if (memcmp(fltl->sig, "QFLT", 4)) goto end; - wptr = (uint16_t *)req->ring; - cnt = sizeof(struct qla_flt_location) >> 1; + wptr = (void *)req->ring; + cnt = sizeof(*fltl) / sizeof(*wptr); for (chksum = 0; cnt--; wptr++) chksum += le16_to_cpu(*wptr); if (chksum) { ql_log(ql_log_fatal, vha, 0x0045, "Inconsistent FLTL detected: checksum=0x%x.\n", chksum); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010e, - buf, sizeof(struct qla_flt_location)); + fltl, sizeof(*fltl)); return QLA_FUNCTION_FAILED; } @@ -634,7 +634,7 @@ end: static void qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) { - const char *loc, *locations[] = { "DEF", "FLT" }; + const char *locations[] = { "DEF", "FLT" }, *loc = locations[1]; const uint32_t def_fw[] = { FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR, FA_RISC_CODE_ADDR_81 }; const uint32_t def_boot[] = @@ -664,20 +664,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) const uint32_t fcp_prio_cfg1[] = { FA_FCP_PRIO1_ADDR, FA_FCP_PRIO1_ADDR_25, 0 }; - uint32_t def; - uint16_t *wptr; - uint16_t cnt, chksum; - uint32_t start; - struct qla_flt_header *flt; - struct qla_flt_region *region; - struct qla_hw_data *ha = vha->hw; - struct req_que *req = ha->req_q_map[0]; - def = 0; - if (IS_QLA25XX(ha)) - def = 1; - else if (IS_QLA81XX(ha)) - def = 2; + struct qla_hw_data *ha = vha->hw; + uint32_t def = IS_QLA81XX(ha) ? 2 : IS_QLA25XX(ha) ? 1 : 0; + struct qla_flt_header *flt = (void *)ha->flt; + struct qla_flt_region *region = (void *)&flt[1]; + uint16_t *wptr, cnt, chksum; + uint32_t start; /* Assign FCP prio region since older adapters may not have FLT, or FCP prio region in it's FLT. @@ -686,12 +679,11 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) fcp_prio_cfg0[def] : fcp_prio_cfg1[def]; ha->flt_region_flt = flt_addr; - wptr = (uint16_t *)req->ring; - flt = (struct qla_flt_header *)req->ring; - region = (struct qla_flt_region *)&flt[1]; - ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring, - flt_addr << 2, OPTROM_BURST_SIZE); - if (*wptr == cpu_to_le16(0xffff)) + wptr = (uint16_t *)ha->flt; + qla24xx_read_flash_data(vha, (void *)flt, flt_addr, + (sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE) >> 2); + + if (le16_to_cpu(*wptr) == 0xffff) goto no_flash_data; if (flt->version != cpu_to_le16(1)) { ql_log(ql_log_warn, vha, 0x0047, @@ -701,7 +693,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) goto no_flash_data; } - cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1; + cnt = (sizeof(*flt) + le16_to_cpu(flt->length)) / sizeof(*wptr); for (chksum = 0; cnt--; wptr++) chksum += le16_to_cpu(*wptr); if (chksum) { @@ -712,18 +704,20 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) goto no_flash_data; } - loc = locations[1]; - cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region); + cnt = le16_to_cpu(flt->length) / sizeof(*region); for ( ; cnt; cnt--, region++) { /* Store addresses as DWORD offsets. */ start = le32_to_cpu(region->start) >> 2; ql_dbg(ql_dbg_init, vha, 0x0049, - "FLT[%02x]: start=0x%x " - "end=0x%x size=0x%x.\n", le32_to_cpu(region->code) & 0xff, - start, le32_to_cpu(region->end) >> 2, - le32_to_cpu(region->size)); - - switch (le32_to_cpu(region->code) & 0xff) { + "FLT[%#x]: start=%#x end=%#x size=%#x.\n", + le16_to_cpu(region->code), start, + le32_to_cpu(region->end) >> 2, + le32_to_cpu(region->size) >> 2); + if (region->attribute) + ql_log(ql_dbg_init, vha, 0xffff, + "Region %x is secure\n", region->code); + + switch (le16_to_cpu(region->code)) { case FLT_REG_FCOE_FW: if (!IS_QLA8031(ha)) break; @@ -753,13 +747,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) ha->flt_region_vpd = start; break; case FLT_REG_VPD_2: - if (!IS_QLA27XX(ha)) + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) break; if (ha->port_no == 2) ha->flt_region_vpd = start; break; case FLT_REG_VPD_3: - if (!IS_QLA27XX(ha)) + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) break; if (ha->port_no == 3) ha->flt_region_vpd = start; @@ -777,13 +771,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) ha->flt_region_nvram = start; break; case FLT_REG_NVRAM_2: - if (!IS_QLA27XX(ha)) + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) break; if (ha->port_no == 2) ha->flt_region_nvram = start; break; case FLT_REG_NVRAM_3: - if (!IS_QLA27XX(ha)) + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) break; if (ha->port_no == 3) ha->flt_region_nvram = start; @@ -847,36 +841,74 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) ha->flt_region_nvram = start; break; case FLT_REG_IMG_PRI_27XX: - if (IS_QLA27XX(ha)) + if (IS_QLA27XX(ha) && !IS_QLA28XX(ha)) ha->flt_region_img_status_pri = start; break; case FLT_REG_IMG_SEC_27XX: - if (IS_QLA27XX(ha)) + if (IS_QLA27XX(ha) && !IS_QLA28XX(ha)) ha->flt_region_img_status_sec = start; break; case FLT_REG_FW_SEC_27XX: - if (IS_QLA27XX(ha)) + if (IS_QLA27XX(ha) && !IS_QLA28XX(ha)) ha->flt_region_fw_sec = start; break; case FLT_REG_BOOTLOAD_SEC_27XX: - if (IS_QLA27XX(ha)) + if (IS_QLA27XX(ha) && !IS_QLA28XX(ha)) ha->flt_region_boot_sec = start; break; + case FLT_REG_AUX_IMG_PRI_28XX: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + ha->flt_region_aux_img_status_pri = start; + break; + case FLT_REG_AUX_IMG_SEC_28XX: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + ha->flt_region_aux_img_status_sec = start; + break; + case FLT_REG_NVRAM_SEC_28XX_0: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + if (ha->port_no == 0) + ha->flt_region_nvram_sec = start; + break; + case FLT_REG_NVRAM_SEC_28XX_1: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + if (ha->port_no == 1) + ha->flt_region_nvram_sec = start; + break; + case FLT_REG_NVRAM_SEC_28XX_2: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + if (ha->port_no == 2) + ha->flt_region_nvram_sec = start; + break; + case FLT_REG_NVRAM_SEC_28XX_3: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + if (ha->port_no == 3) + ha->flt_region_nvram_sec = start; + break; case FLT_REG_VPD_SEC_27XX_0: - if (IS_QLA27XX(ha)) - ha->flt_region_vpd_sec = start; + case FLT_REG_VPD_SEC_28XX_0: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + ha->flt_region_vpd_nvram_sec = start; + if (ha->port_no == 0) + ha->flt_region_vpd_sec = start; + } break; case FLT_REG_VPD_SEC_27XX_1: - if (IS_QLA27XX(ha)) - ha->flt_region_vpd_sec = start; + case FLT_REG_VPD_SEC_28XX_1: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + if (ha->port_no == 1) + ha->flt_region_vpd_sec = start; break; case FLT_REG_VPD_SEC_27XX_2: - if (IS_QLA27XX(ha)) - ha->flt_region_vpd_sec = start; + case FLT_REG_VPD_SEC_28XX_2: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + if (ha->port_no == 2) + ha->flt_region_vpd_sec = start; break; case FLT_REG_VPD_SEC_27XX_3: - if (IS_QLA27XX(ha)) - ha->flt_region_vpd_sec = start; + case FLT_REG_VPD_SEC_28XX_3: + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + if (ha->port_no == 3) + ha->flt_region_vpd_sec = start; break; } } @@ -912,22 +944,19 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha) #define FLASH_BLK_SIZE_32K 0x8000 #define FLASH_BLK_SIZE_64K 0x10000 const char *loc, *locations[] = { "MID", "FDT" }; + struct qla_hw_data *ha = vha->hw; + struct req_que *req = ha->req_q_map[0]; uint16_t cnt, chksum; - uint16_t *wptr; - struct qla_fdt_layout *fdt; + uint16_t *wptr = (void *)req->ring; + struct qla_fdt_layout *fdt = (void *)req->ring; uint8_t man_id, flash_id; uint16_t mid = 0, fid = 0; - struct qla_hw_data *ha = vha->hw; - struct req_que *req = ha->req_q_map[0]; - wptr = (uint16_t *)req->ring; - fdt = (struct qla_fdt_layout *)req->ring; - ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring, - ha->flt_region_fdt << 2, OPTROM_BURST_SIZE); - if (*wptr == cpu_to_le16(0xffff)) + qla24xx_read_flash_data(vha, (void *)fdt, ha->flt_region_fdt, + OPTROM_BURST_DWORDS); + if (le16_to_cpu(*wptr) == 0xffff) goto no_flash_data; - if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' || - fdt->sig[3] != 'D') + if (memcmp(fdt->sig, "QLID", 4)) goto no_flash_data; for (cnt = 0, chksum = 0; cnt < sizeof(*fdt) >> 1; cnt++, wptr++) @@ -938,7 +967,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha) " checksum=0x%x id=%c version0x%x.\n", chksum, fdt->sig[0], le16_to_cpu(fdt->version)); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0113, - (uint8_t *)fdt, sizeof(*fdt)); + fdt, sizeof(*fdt)); goto no_flash_data; } @@ -1019,8 +1048,7 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha) return; wptr = (uint32_t *)req->ring; - ha->isp_ops->read_optrom(vha, (uint8_t *)req->ring, - QLA82XX_IDC_PARAM_ADDR , 8); + ha->isp_ops->read_optrom(vha, req->ring, QLA82XX_IDC_PARAM_ADDR, 8); if (*wptr == cpu_to_le32(0xffffffff)) { ha->fcoe_dev_init_timeout = QLA82XX_ROM_DEV_INIT_TIMEOUT; @@ -1045,7 +1073,8 @@ qla2xxx_get_flash_info(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) && - !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && !IS_QLA27XX(ha)) + !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return QLA_SUCCESS; ret = qla2xxx_find_flt_start(vha, &flt_addr); @@ -1081,8 +1110,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) if (IS_QLA8044(ha)) return; - ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, - ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header)); + ha->isp_ops->read_optrom(vha, &hdr, ha->flt_region_npiv_conf << 2, + sizeof(struct qla_npiv_header)); if (hdr.version == cpu_to_le16(0xffff)) return; if (hdr.version != cpu_to_le16(1)) { @@ -1101,8 +1130,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) return; } - ha->isp_ops->read_optrom(vha, (uint8_t *)data, - ha->flt_region_npiv_conf << 2, NPIV_CONFIG_SIZE); + ha->isp_ops->read_optrom(vha, data, ha->flt_region_npiv_conf << 2, + NPIV_CONFIG_SIZE); cnt = (sizeof(hdr) + le16_to_cpu(hdr.entries) * sizeof(*entry)) >> 1; for (wptr = data, chksum = 0; cnt--; wptr++) @@ -1139,10 +1168,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) vid.node_name = wwn_to_u64(entry->node_name); ql_dbg(ql_dbg_user, vha, 0x7093, - "NPIV[%02x]: wwpn=%llx " - "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt, - (unsigned long long)vid.port_name, - (unsigned long long)vid.node_name, + "NPIV[%02x]: wwpn=%llx wwnn=%llx vf_id=%#x Q_qos=%#x F_qos=%#x.\n", + cnt, vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id), entry->q_qos, entry->f_qos); @@ -1150,10 +1177,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) vport = fc_vport_create(vha->host, 0, &vid); if (!vport) ql_log(ql_log_warn, vha, 0x7094, - "NPIV-Config Failed to create vport [%02x]: " - "wwpn=%llx wwnn=%llx.\n", cnt, - (unsigned long long)vid.port_name, - (unsigned long long)vid.node_name); + "NPIV-Config Failed to create vport [%02x]: wwpn=%llx wwnn=%llx.\n", + cnt, vid.port_name, vid.node_name); } } done: @@ -1188,9 +1213,10 @@ done: static int qla24xx_protect_flash(scsi_qla_host_t *vha) { - uint32_t cnt; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + ulong cnt = 300; + uint32_t faddr, dword; if (ha->flags.fac_supported) return qla81xx_fac_do_write_enable(vha, 0); @@ -1199,11 +1225,14 @@ qla24xx_protect_flash(scsi_qla_host_t *vha) goto skip_wrt_protect; /* Enable flash write-protection and wait for completion. */ - qla24xx_write_flash_dword(ha, flash_conf_addr(ha, 0x101), - ha->fdt_wrt_disable); - for (cnt = 300; cnt && - qla24xx_read_flash_dword(ha, flash_conf_addr(ha, 0x005)) & BIT_0; - cnt--) { + faddr = flash_conf_addr(ha, 0x101); + qla24xx_write_flash_dword(ha, faddr, ha->fdt_wrt_disable); + faddr = flash_conf_addr(ha, 0x5); + while (cnt--) { + if (!qla24xx_read_flash_dword(ha, faddr, &dword)) { + if (!(dword & BIT_0)) + break; + } udelay(10); } @@ -1211,7 +1240,6 @@ skip_wrt_protect: /* Disable flash write. */ WRT_REG_DWORD(®->ctrl_status, RD_REG_DWORD(®->ctrl_status) & ~CSRX_FLASH_ENABLE); - RD_REG_DWORD(®->ctrl_status); /* PCI Posting. */ return QLA_SUCCESS; } @@ -1239,107 +1267,103 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, uint32_t dwords) { int ret; - uint32_t liter; - uint32_t sec_mask, rest_addr; - uint32_t fdata; + ulong liter; + ulong dburst = OPTROM_BURST_DWORDS; /* burst size in dwords */ + uint32_t sec_mask, rest_addr, fdata; dma_addr_t optrom_dma; void *optrom = NULL; struct qla_hw_data *ha = vha->hw; - /* Prepare burst-capable write on supported ISPs. */ - if ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || - IS_QLA27XX(ha)) && - !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) { - optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, - &optrom_dma, GFP_KERNEL); - if (!optrom) { - ql_log(ql_log_warn, vha, 0x7095, - "Unable to allocate " - "memory for optrom burst write (%x KB).\n", - OPTROM_BURST_SIZE / 1024); - } - } + if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + goto next; - rest_addr = (ha->fdt_block_size >> 2) - 1; - sec_mask = ~rest_addr; + /* Allocate dma buffer for burst write */ + optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, + &optrom_dma, GFP_KERNEL); + if (!optrom) { + ql_log(ql_log_warn, vha, 0x7095, + "Failed allocate burst (%x bytes)\n", OPTROM_BURST_SIZE); + } +next: + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, + "Unprotect flash...\n"); ret = qla24xx_unprotect_flash(vha); - if (ret != QLA_SUCCESS) { + if (ret) { ql_log(ql_log_warn, vha, 0x7096, - "Unable to unprotect flash for update.\n"); + "Failed to unprotect flash.\n"); goto done; } + rest_addr = (ha->fdt_block_size >> 2) - 1; + sec_mask = ~rest_addr; for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) { fdata = (faddr & sec_mask) << 2; /* Are we at the beginning of a sector? */ - if ((faddr & rest_addr) == 0) { - /* Do sector unprotect. */ - if (ha->fdt_unprotect_sec_cmd) - qla24xx_write_flash_dword(ha, - ha->fdt_unprotect_sec_cmd, - (fdata & 0xff00) | ((fdata << 16) & - 0xff0000) | ((fdata >> 16) & 0xff)); + if (!(faddr & rest_addr)) { + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, + "Erase sector %#x...\n", faddr); + ret = qla24xx_erase_sector(vha, fdata); - if (ret != QLA_SUCCESS) { + if (ret) { ql_dbg(ql_dbg_user, vha, 0x7007, - "Unable to erase erase sector: address=%x.\n", - faddr); + "Failed to erase sector %x.\n", faddr); break; } } - /* Go with burst-write. */ - if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) { - /* Copy data to DMA'ble buffer. */ - memcpy(optrom, dwptr, OPTROM_BURST_SIZE); + if (optrom) { + /* If smaller than a burst remaining */ + if (dwords - liter < dburst) + dburst = dwords - liter; + /* Copy to dma buffer */ + memcpy(optrom, dwptr, dburst << 2); + + /* Burst write */ + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, + "Write burst (%#lx dwords)...\n", dburst); ret = qla2x00_load_ram(vha, optrom_dma, - flash_data_addr(ha, faddr), - OPTROM_BURST_DWORDS); - if (ret != QLA_SUCCESS) { - ql_log(ql_log_warn, vha, 0x7097, - "Unable to burst-write optrom segment " - "(%x/%x/%llx).\n", ret, - flash_data_addr(ha, faddr), - (unsigned long long)optrom_dma); - ql_log(ql_log_warn, vha, 0x7098, - "Reverting to slow-write.\n"); - - dma_free_coherent(&ha->pdev->dev, - OPTROM_BURST_SIZE, optrom, optrom_dma); - optrom = NULL; - } else { - liter += OPTROM_BURST_DWORDS - 1; - faddr += OPTROM_BURST_DWORDS - 1; - dwptr += OPTROM_BURST_DWORDS - 1; + flash_data_addr(ha, faddr), dburst); + if (!ret) { + liter += dburst - 1; + faddr += dburst - 1; + dwptr += dburst - 1; continue; } + + ql_log(ql_log_warn, vha, 0x7097, + "Failed burst-write at %x (%p/%#llx)....\n", + flash_data_addr(ha, faddr), optrom, + (u64)optrom_dma); + + dma_free_coherent(&ha->pdev->dev, + OPTROM_BURST_SIZE, optrom, optrom_dma); + optrom = NULL; + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + break; + ql_log(ql_log_warn, vha, 0x7098, + "Reverting to slow write...\n"); } + /* Slow write */ ret = qla24xx_write_flash_dword(ha, flash_data_addr(ha, faddr), cpu_to_le32(*dwptr)); - if (ret != QLA_SUCCESS) { + if (ret) { ql_dbg(ql_dbg_user, vha, 0x7006, - "Unable to program flash address=%x data=%x.\n", - faddr, *dwptr); + "Failed slopw write %x (%x)\n", faddr, *dwptr); break; } - - /* Do sector protect. */ - if (ha->fdt_unprotect_sec_cmd && - ((faddr & rest_addr) == rest_addr)) - qla24xx_write_flash_dword(ha, - ha->fdt_protect_sec_cmd, - (fdata & 0xff00) | ((fdata << 16) & - 0xff0000) | ((fdata >> 16) & 0xff)); } + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, + "Protect flash...\n"); ret = qla24xx_protect_flash(vha); - if (ret != QLA_SUCCESS) + if (ret) ql_log(ql_log_warn, vha, 0x7099, - "Unable to protect flash after update.\n"); + "Failed to protect flash\n"); done: if (optrom) dma_free_coherent(&ha->pdev->dev, @@ -1349,7 +1373,7 @@ done: } uint8_t * -qla2x00_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr, +qla2x00_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, uint32_t bytes) { uint32_t i; @@ -1368,27 +1392,30 @@ qla2x00_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr, } uint8_t * -qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr, +qla24xx_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, uint32_t bytes) { - uint32_t i; - uint32_t *dwptr; struct qla_hw_data *ha = vha->hw; + uint32_t *dwptr = buf; + uint32_t i; if (IS_P3P_TYPE(ha)) return buf; /* Dword reads to flash. */ - dwptr = (uint32_t *)buf; - for (i = 0; i < bytes >> 2; i++, naddr++) - dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, - nvram_data_addr(ha, naddr))); + naddr = nvram_data_addr(ha, naddr); + bytes >>= 2; + for (i = 0; i < bytes; i++, naddr++, dwptr++) { + if (qla24xx_read_flash_dword(ha, naddr, dwptr)) + break; + cpu_to_le32s(dwptr); + } return buf; } int -qla2x00_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr, +qla2x00_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, uint32_t bytes) { int ret, stat; @@ -1422,14 +1449,14 @@ qla2x00_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr, } int -qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr, +qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, uint32_t bytes) { - int ret; - uint32_t i; - uint32_t *dwptr; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + uint32_t *dwptr = buf; + uint32_t i; + int ret; ret = QLA_SUCCESS; @@ -1446,11 +1473,10 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr, qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0); /* Dword writes to flash. */ - dwptr = (uint32_t *)buf; - for (i = 0; i < bytes >> 2; i++, naddr++, dwptr++) { - ret = qla24xx_write_flash_dword(ha, - nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr)); - if (ret != QLA_SUCCESS) { + naddr = nvram_data_addr(ha, naddr); + bytes >>= 2; + for (i = 0; i < bytes; i++, naddr++, dwptr++) { + if (qla24xx_write_flash_dword(ha, naddr, cpu_to_le32(*dwptr))) { ql_dbg(ql_dbg_user, vha, 0x709a, "Unable to program nvram address=%x data=%x.\n", naddr, *dwptr); @@ -1470,31 +1496,34 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr, } uint8_t * -qla25xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr, +qla25xx_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, uint32_t bytes) { - uint32_t i; - uint32_t *dwptr; struct qla_hw_data *ha = vha->hw; + uint32_t *dwptr = buf; + uint32_t i; /* Dword reads to flash. */ - dwptr = (uint32_t *)buf; - for (i = 0; i < bytes >> 2; i++, naddr++) - dwptr[i] = cpu_to_le32(qla24xx_read_flash_dword(ha, - flash_data_addr(ha, ha->flt_region_vpd_nvram | naddr))); + naddr = flash_data_addr(ha, ha->flt_region_vpd_nvram | naddr); + bytes >>= 2; + for (i = 0; i < bytes; i++, naddr++, dwptr++) { + if (qla24xx_read_flash_dword(ha, naddr, dwptr)) + break; + + cpu_to_le32s(dwptr); + } return buf; } +#define RMW_BUFFER_SIZE (64 * 1024) int -qla25xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr, +qla25xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, uint32_t bytes) { struct qla_hw_data *ha = vha->hw; -#define RMW_BUFFER_SIZE (64 * 1024) - uint8_t *dbuf; + uint8_t *dbuf = vmalloc(RMW_BUFFER_SIZE); - dbuf = vmalloc(RMW_BUFFER_SIZE); if (!dbuf) return QLA_MEMORY_ALLOC_FAILED; ha->isp_ops->read_optrom(vha, dbuf, ha->flt_region_vpd_nvram << 2, @@ -1728,7 +1757,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha) { uint32_t led_select_value = 0; - if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) goto out; if (ha->port_no == 0) @@ -1749,13 +1778,14 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha) uint16_t orig_led_cfg[6]; uint32_t led_10_value, led_43_value; - if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha) && !IS_QLA27XX(ha)) + if (!IS_QLA83XX(ha) && !IS_QLA81XX(ha) && !IS_QLA27XX(ha) && + !IS_QLA28XX(ha)) return; if (!ha->beacon_blink_led) return; - if (IS_QLA27XX(ha)) { + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { qla2x00_write_ram_word(vha, 0x1003, 0x40000230); qla2x00_write_ram_word(vha, 0x1004, 0x40000230); } else if (IS_QLA2031(ha)) { @@ -1845,7 +1875,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha) return QLA_FUNCTION_FAILED; } - if (IS_QLA2031(ha) || IS_QLA27XX(ha)) + if (IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) goto skip_gpio; spin_lock_irqsave(&ha->hardware_lock, flags); @@ -1885,7 +1915,7 @@ qla24xx_beacon_off(struct scsi_qla_host *vha) ha->beacon_blink_led = 0; - if (IS_QLA2031(ha) || IS_QLA27XX(ha)) + if (IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) goto set_fw_options; if (IS_QLA8031(ha) || IS_QLA81XX(ha)) @@ -2314,8 +2344,8 @@ qla2x00_resume_hba(struct scsi_qla_host *vha) scsi_unblock_requests(vha->host); } -uint8_t * -qla2x00_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, +void * +qla2x00_read_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { uint32_t addr, midpoint; @@ -2349,12 +2379,12 @@ qla2x00_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, } int -qla2x00_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, +qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { int rval; - uint8_t man_id, flash_id, sec_number, data; + uint8_t man_id, flash_id, sec_number, *data; uint16_t wd; uint32_t addr, liter, sec_mask, rest_addr; struct qla_hw_data *ha = vha->hw; @@ -2483,7 +2513,7 @@ update_flash: for (addr = offset, liter = 0; liter < length; liter++, addr++) { - data = buf[liter]; + data = buf + liter; /* Are we at the beginning of a sector? */ if ((addr & rest_addr) == 0) { if (IS_QLA2322(ha) || IS_QLA6322(ha)) { @@ -2551,7 +2581,7 @@ update_flash: } } - if (qla2x00_program_flash_address(ha, addr, data, + if (qla2x00_program_flash_address(ha, addr, *data, man_id, flash_id)) { rval = QLA_FUNCTION_FAILED; break; @@ -2567,8 +2597,8 @@ update_flash: return rval; } -uint8_t * -qla24xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, +void * +qla24xx_read_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { struct qla_hw_data *ha = vha->hw; @@ -2578,7 +2608,7 @@ qla24xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); /* Go with read. */ - qla24xx_read_flash_data(vha, (uint32_t *)buf, offset >> 2, length >> 2); + qla24xx_read_flash_data(vha, (void *)buf, offset >> 2, length >> 2); /* Resume HBA. */ clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); @@ -2587,8 +2617,340 @@ qla24xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, return buf; } +static int +qla28xx_extract_sfub_and_verify(struct scsi_qla_host *vha, uint32_t *buf, + uint32_t len, uint32_t buf_size_without_sfub, uint8_t *sfub_buf) +{ + uint32_t *p, check_sum = 0; + int i; + + p = buf + buf_size_without_sfub; + + /* Extract SFUB from end of file */ + memcpy(sfub_buf, (uint8_t *)p, + sizeof(struct secure_flash_update_block)); + + for (i = 0; i < (sizeof(struct secure_flash_update_block) >> 2); i++) + check_sum += p[i]; + + check_sum = (~check_sum) + 1; + + if (check_sum != p[i]) { + ql_log(ql_log_warn, vha, 0x7097, + "SFUB checksum failed, 0x%x, 0x%x\n", + check_sum, p[i]); + return QLA_COMMAND_ERROR; + } + + return QLA_SUCCESS; +} + +static int +qla28xx_get_flash_region(struct scsi_qla_host *vha, uint32_t start, + struct qla_flt_region *region) +{ + struct qla_hw_data *ha = vha->hw; + struct qla_flt_header *flt; + struct qla_flt_region *flt_reg; + uint16_t cnt; + int rval = QLA_FUNCTION_FAILED; + + if (!ha->flt) + return QLA_FUNCTION_FAILED; + + flt = (struct qla_flt_header *)ha->flt; + flt_reg = (struct qla_flt_region *)&flt[1]; + cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region); + + for (; cnt; cnt--, flt_reg++) { + if (flt_reg->start == start) { + memcpy((uint8_t *)region, flt_reg, + sizeof(struct qla_flt_region)); + rval = QLA_SUCCESS; + break; + } + } + + return rval; +} + +static int +qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, + uint32_t dwords) +{ + struct qla_hw_data *ha = vha->hw; + ulong liter; + ulong dburst = OPTROM_BURST_DWORDS; /* burst size in dwords */ + uint32_t sec_mask, rest_addr, fdata; + void *optrom = NULL; + dma_addr_t optrom_dma; + int rval; + struct secure_flash_update_block *sfub; + dma_addr_t sfub_dma; + uint32_t offset = faddr << 2; + uint32_t buf_size_without_sfub = 0; + struct qla_flt_region region; + bool reset_to_rom = false; + uint32_t risc_size, risc_attr = 0; + uint32_t *fw_array = NULL; + + /* Retrieve region info - must be a start address passed in */ + rval = qla28xx_get_flash_region(vha, offset, ®ion); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xffff, + "Invalid address %x - not a region start address\n", + offset); + goto done; + } + + /* Allocate dma buffer for burst write */ + optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, + &optrom_dma, GFP_KERNEL); + if (!optrom) { + ql_log(ql_log_warn, vha, 0x7095, + "Failed allocate burst (%x bytes)\n", OPTROM_BURST_SIZE); + rval = QLA_COMMAND_ERROR; + goto done; + } + + /* + * If adapter supports secure flash and region is secure + * extract secure flash update block (SFUB) and verify + */ + if (ha->flags.secure_adapter && region.attribute) { + + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, + "Region %x is secure\n", region.code); + + if (region.code == FLT_REG_FW || + region.code == FLT_REG_FW_SEC_27XX) { + fw_array = dwptr; + + /* 1st fw array */ + risc_size = be32_to_cpu(fw_array[3]); + risc_attr = be32_to_cpu(fw_array[9]); + + buf_size_without_sfub = risc_size; + fw_array += risc_size; + + /* 2nd fw array */ + risc_size = be32_to_cpu(fw_array[3]); + + buf_size_without_sfub += risc_size; + fw_array += risc_size; + + /* 1st dump template */ + risc_size = be32_to_cpu(fw_array[2]); + + /* skip header and ignore checksum */ + buf_size_without_sfub += risc_size; + fw_array += risc_size; + + if (risc_attr & BIT_9) { + /* 2nd dump template */ + risc_size = be32_to_cpu(fw_array[2]); + + /* skip header and ignore checksum */ + buf_size_without_sfub += risc_size; + fw_array += risc_size; + } + } else { + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, + "Secure region %x not supported\n", + region.code); + rval = QLA_COMMAND_ERROR; + goto done; + } + + sfub = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct secure_flash_update_block), &sfub_dma, + GFP_KERNEL); + if (!sfub) { + ql_log(ql_log_warn, vha, 0xffff, + "Unable to allocate memory for SFUB\n"); + rval = QLA_COMMAND_ERROR; + goto done; + } + + rval = qla28xx_extract_sfub_and_verify(vha, dwptr, dwords, + buf_size_without_sfub, (uint8_t *)sfub); + + if (rval != QLA_SUCCESS) + goto done; + + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, + "SFUB extract and verify successful\n"); + } + + rest_addr = (ha->fdt_block_size >> 2) - 1; + sec_mask = ~rest_addr; + + /* Lock semaphore */ + rval = qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_LOCK); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xffff, + "Unable to lock flash semaphore."); + goto done; + } + + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, + "Unprotect flash...\n"); + rval = qla24xx_unprotect_flash(vha); + if (rval) { + qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK); + ql_log(ql_log_warn, vha, 0x7096, "Failed unprotect flash\n"); + goto done; + } + + for (liter = 0; liter < dwords; liter++, faddr++) { + fdata = (faddr & sec_mask) << 2; + + /* If start of sector */ + if (!(faddr & rest_addr)) { + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, + "Erase sector %#x...\n", faddr); + rval = qla24xx_erase_sector(vha, fdata); + if (rval) { + ql_dbg(ql_dbg_user, vha, 0x7007, + "Failed erase sector %#x\n", faddr); + goto write_protect; + } + } + } + + if (ha->flags.secure_adapter) { + /* + * If adapter supports secure flash but FW doesn't, + * disable write protect, release semaphore and reset + * chip to execute ROM code in order to update region securely + */ + if (!ha->flags.secure_fw) { + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, + "Disable Write and Release Semaphore."); + rval = qla24xx_protect_flash(vha); + if (rval != QLA_SUCCESS) { + qla81xx_fac_semaphore_access(vha, + FAC_SEMAPHORE_UNLOCK); + ql_log(ql_log_warn, vha, 0xffff, + "Unable to protect flash."); + goto done; + } + + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, + "Reset chip to ROM."); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + set_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + rval = qla2x00_wait_for_chip_reset(vha); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xffff, + "Unable to reset to ROM code."); + goto done; + } + reset_to_rom = true; + ha->flags.fac_supported = 0; + + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, + "Lock Semaphore"); + rval = qla2xxx_write_remote_register(vha, + FLASH_SEMAPHORE_REGISTER_ADDR, 0x00020002); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xffff, + "Unable to lock flash semaphore."); + goto done; + } + + /* Unprotect flash */ + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, + "Enable Write."); + rval = qla2x00_write_ram_word(vha, 0x7ffd0101, 0); + if (rval) { + ql_log(ql_log_warn, vha, 0x7096, + "Failed unprotect flash\n"); + goto done; + } + } + + /* If region is secure, send Secure Flash MB Cmd */ + if (region.attribute && buf_size_without_sfub) { + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, + "Sending Secure Flash MB Cmd\n"); + rval = qla28xx_secure_flash_update(vha, 0, region.code, + buf_size_without_sfub, sfub_dma, + sizeof(struct secure_flash_update_block)); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0xffff, + "Secure Flash MB Cmd failed %x.", rval); + goto write_protect; + } + } + + } + + /* re-init flash offset */ + faddr = offset >> 2; + + for (liter = 0; liter < dwords; liter++, faddr++, dwptr++) { + fdata = (faddr & sec_mask) << 2; + + /* If smaller than a burst remaining */ + if (dwords - liter < dburst) + dburst = dwords - liter; + + /* Copy to dma buffer */ + memcpy(optrom, dwptr, dburst << 2); + + /* Burst write */ + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, + "Write burst (%#lx dwords)...\n", dburst); + rval = qla2x00_load_ram(vha, optrom_dma, + flash_data_addr(ha, faddr), dburst); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x7097, + "Failed burst write at %x (%p/%#llx)...\n", + flash_data_addr(ha, faddr), optrom, + (u64)optrom_dma); + break; + } + + liter += dburst - 1; + faddr += dburst - 1; + dwptr += dburst - 1; + continue; + } + +write_protect: + ql_log(ql_log_warn + ql_dbg_verbose, vha, 0x7095, + "Protect flash...\n"); + rval = qla24xx_protect_flash(vha); + if (rval) { + qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK); + ql_log(ql_log_warn, vha, 0x7099, + "Failed protect flash\n"); + } + + if (reset_to_rom == true) { + /* Schedule DPC to restart the RISC */ + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + + rval = qla2x00_wait_for_hba_online(vha); + if (rval != QLA_SUCCESS) + ql_log(ql_log_warn, vha, 0xffff, + "Adapter did not come out of reset\n"); + } + +done: + if (optrom) + dma_free_coherent(&ha->pdev->dev, + OPTROM_BURST_SIZE, optrom, optrom_dma); + + return rval; +} + int -qla24xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, +qla24xx_write_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { int rval; @@ -2599,8 +2961,12 @@ qla24xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); /* Go with write. */ - rval = qla24xx_write_flash_data(vha, (uint32_t *)buf, offset >> 2, - length >> 2); + if (IS_QLA28XX(ha)) + rval = qla28xx_write_flash_data(vha, (uint32_t *)buf, + offset >> 2, length >> 2); + else + rval = qla24xx_write_flash_data(vha, (uint32_t *)buf, + offset >> 2, length >> 2); clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); scsi_unblock_requests(vha->host); @@ -2608,8 +2974,8 @@ qla24xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, return rval; } -uint8_t * -qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, +void * +qla25xx_read_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { int rval; @@ -2620,7 +2986,7 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, struct qla_hw_data *ha = vha->hw; if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || - IS_QLA27XX(ha)) + IS_QLA27XX(ha) || IS_QLA28XX(ha)) goto try_fast; if (offset & 0xfff) goto slow_read; @@ -2628,6 +2994,8 @@ qla25xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, goto slow_read; try_fast: + if (offset & 0xff) + goto slow_read; optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, &optrom_dma, GFP_KERNEL); if (!optrom) { @@ -2874,7 +3242,7 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf) "Dumping fw " "ver from flash:.\n"); ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010b, - (uint8_t *)dbyte, 8); + dbyte, 32); if ((dcode[0] == 0xffff && dcode[1] == 0xffff && dcode[2] == 0xffff && dcode[3] == 0xffff) || @@ -2905,8 +3273,8 @@ qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) { int ret = QLA_SUCCESS; uint32_t pcihdr, pcids; - uint32_t *dcode; - uint8_t *bcode; + uint32_t *dcode = mbuf; + uint8_t *bcode = mbuf; uint8_t code_type, last_image; struct qla_hw_data *ha = vha->hw; @@ -2918,17 +3286,14 @@ qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision)); memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); - dcode = mbuf; - /* Begin with first PCI expansion ROM header. */ pcihdr = ha->flt_region_boot << 2; last_image = 1; do { /* Verify PCI expansion ROM header. */ - ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcihdr, - 0x20 * 4); + ha->isp_ops->read_optrom(vha, dcode, pcihdr, 0x20 * 4); bcode = mbuf + (pcihdr % 4); - if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) { + if (memcmp(bcode, "\x55\xaa", 2)) { /* No signature */ ql_log(ql_log_fatal, vha, 0x0154, "No matching ROM signature.\n"); @@ -2939,13 +3304,11 @@ qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) /* Locate PCI data structure. */ pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]); - ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcids, - 0x20 * 4); + ha->isp_ops->read_optrom(vha, dcode, pcids, 0x20 * 4); bcode = mbuf + (pcihdr % 4); /* Validate signature of PCI data structure. */ - if (bcode[0x0] != 'P' || bcode[0x1] != 'C' || - bcode[0x2] != 'I' || bcode[0x3] != 'R') { + if (memcmp(bcode, "PCIR", 4)) { /* Incorrect header. */ ql_log(ql_log_fatal, vha, 0x0155, "PCI data struct not found pcir_adr=%x.\n", pcids); @@ -2996,8 +3359,7 @@ qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) /* Read firmware image information. */ memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); dcode = mbuf; - ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, ha->flt_region_fw << 2, - 0x20); + ha->isp_ops->read_optrom(vha, dcode, ha->flt_region_fw << 2, 0x20); bcode = mbuf + (pcihdr % 4); /* Validate signature of PCI data structure. */ @@ -3019,15 +3381,14 @@ int qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) { int ret = QLA_SUCCESS; - uint32_t pcihdr, pcids; - uint32_t *dcode; - uint8_t *bcode; + uint32_t pcihdr = 0, pcids = 0; + uint32_t *dcode = mbuf; + uint8_t *bcode = mbuf; uint8_t code_type, last_image; int i; struct qla_hw_data *ha = vha->hw; uint32_t faddr = 0; - - pcihdr = pcids = 0; + struct active_regions active_regions = { }; if (IS_P3P_TYPE(ha)) return ret; @@ -3040,18 +3401,19 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision)); memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); - dcode = mbuf; pcihdr = ha->flt_region_boot << 2; - if (IS_QLA27XX(ha) && - qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE) - pcihdr = ha->flt_region_boot_sec << 2; + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + qla27xx_get_active_image(vha, &active_regions); + if (active_regions.global == QLA27XX_SECONDARY_IMAGE) { + pcihdr = ha->flt_region_boot_sec << 2; + } + } - last_image = 1; do { /* Verify PCI expansion ROM header. */ qla24xx_read_flash_data(vha, dcode, pcihdr >> 2, 0x20); bcode = mbuf + (pcihdr % 4); - if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) { + if (memcmp(bcode, "\x55\xaa", 2)) { /* No signature */ ql_log(ql_log_fatal, vha, 0x0059, "No matching ROM signature.\n"); @@ -3066,11 +3428,11 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) bcode = mbuf + (pcihdr % 4); /* Validate signature of PCI data structure. */ - if (bcode[0x0] != 'P' || bcode[0x1] != 'C' || - bcode[0x2] != 'I' || bcode[0x3] != 'R') { + if (memcmp(bcode, "PCIR", 4)) { /* Incorrect header. */ ql_log(ql_log_fatal, vha, 0x005a, "PCI data struct not found pcir_adr=%x.\n", pcids); + ql_dump_buffer(ql_dbg_init, vha, 0x0059, dcode, 32); ret = QLA_FUNCTION_FAILED; break; } @@ -3117,30 +3479,24 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) /* Read firmware image information. */ memset(ha->fw_revision, 0, sizeof(ha->fw_revision)); - dcode = mbuf; faddr = ha->flt_region_fw; - if (IS_QLA27XX(ha) && - qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE) - faddr = ha->flt_region_fw_sec; - - qla24xx_read_flash_data(vha, dcode, faddr + 4, 4); - for (i = 0; i < 4; i++) - dcode[i] = be32_to_cpu(dcode[i]); + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + qla27xx_get_active_image(vha, &active_regions); + if (active_regions.global == QLA27XX_SECONDARY_IMAGE) + faddr = ha->flt_region_fw_sec; + } - if ((dcode[0] == 0xffffffff && dcode[1] == 0xffffffff && - dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) || - (dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 && - dcode[3] == 0)) { + qla24xx_read_flash_data(vha, dcode, faddr, 8); + if (qla24xx_risc_firmware_invalid(dcode)) { ql_log(ql_log_warn, vha, 0x005f, "Unrecognized fw revision at %x.\n", ha->flt_region_fw * 4); + ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32); } else { - ha->fw_revision[0] = dcode[0]; - ha->fw_revision[1] = dcode[1]; - ha->fw_revision[2] = dcode[2]; - ha->fw_revision[3] = dcode[3]; + for (i = 0; i < 4; i++) + ha->fw_revision[i] = be32_to_cpu(dcode[4+i]); ql_dbg(ql_dbg_init, vha, 0x0060, - "Firmware revision %d.%d.%d (%x).\n", + "Firmware revision (flash) %u.%u.%u (%x).\n", ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2], ha->fw_revision[3]); } @@ -3152,20 +3508,17 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) } memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version)); - dcode = mbuf; - ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, - ha->flt_region_gold_fw << 2, 32); - - if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF && - dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) { + faddr = ha->flt_region_gold_fw; + qla24xx_read_flash_data(vha, (void *)dcode, ha->flt_region_gold_fw, 8); + if (qla24xx_risc_firmware_invalid(dcode)) { ql_log(ql_log_warn, vha, 0x0056, - "Unrecognized golden fw at 0x%x.\n", - ha->flt_region_gold_fw * 4); + "Unrecognized golden fw at %#x.\n", faddr); + ql_dump_buffer(ql_dbg_init, vha, 0x0056, dcode, 32); return ret; } - for (i = 4; i < 8; i++) - ha->gold_fw_version[i-4] = be32_to_cpu(dcode[i]); + for (i = 0; i < 4; i++) + ha->gold_fw_version[i] = be32_to_cpu(dcode[4+i]); return ret; } @@ -3237,7 +3590,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha) fcp_prio_addr = ha->flt_region_fcp_prio; /* first read the fcp priority data header from flash */ - ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg, + ha->isp_ops->read_optrom(vha, ha->fcp_prio_cfg, fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE); if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 0)) @@ -3248,7 +3601,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha) len = ha->fcp_prio_cfg->num_entries * FCP_PRIO_CFG_ENTRY_SIZE; max_len = FCP_PRIO_CFG_SIZE - FCP_PRIO_CFG_HDR_SIZE; - ha->isp_ops->read_optrom(vha, (uint8_t *)&ha->fcp_prio_cfg->entry[0], + ha->isp_ops->read_optrom(vha, &ha->fcp_prio_cfg->entry[0], fcp_prio_addr << 2, (len < max_len ? len : max_len)); /* revalidate the entire FCP priority config data, including entries */ diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index b47141e7725b..4b42a1ff60d8 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -6939,7 +6939,7 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha) RD_REG_DWORD(ISP_ATIO_Q_OUT(vha)); if (ha->flags.msix_enabled) { - if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (IS_QLA2071(ha)) { /* 4 ports Baker: Enable Interrupt Handshake */ icb->msix_atio = 0; @@ -6954,7 +6954,7 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha) } } else { /* INTx|MSI */ - if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { icb->msix_atio = 0; icb->firmware_options_2 |= BIT_26; ql_dbg(ql_dbg_init, vha, 0xf072, @@ -7203,7 +7203,8 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) if (!QLA_TGT_MODE_ENABLED()) return; - if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha)) { + if ((ql2xenablemsix == 0) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || + IS_QLA28XX(ha)) { ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; } else { diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index 9e52500caff0..de696a07532e 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -7,103 +7,9 @@ #include "qla_def.h" #include "qla_tmpl.h" -/* note default template is in big endian */ -static const uint32_t ql27xx_fwdt_default_template[] = { - 0x63000000, 0xa4000000, 0x7c050000, 0x00000000, - 0x30000000, 0x01000000, 0x00000000, 0xc0406eb4, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x04010000, 0x14000000, 0x00000000, - 0x02000000, 0x44000000, 0x09010000, 0x10000000, - 0x00000000, 0x02000000, 0x01010000, 0x1c000000, - 0x00000000, 0x02000000, 0x00600000, 0x00000000, - 0xc0000000, 0x01010000, 0x1c000000, 0x00000000, - 0x02000000, 0x00600000, 0x00000000, 0xcc000000, - 0x01010000, 0x1c000000, 0x00000000, 0x02000000, - 0x10600000, 0x00000000, 0xd4000000, 0x01010000, - 0x1c000000, 0x00000000, 0x02000000, 0x700f0000, - 0x00000060, 0xf0000000, 0x00010000, 0x18000000, - 0x00000000, 0x02000000, 0x00700000, 0x041000c0, - 0x00010000, 0x18000000, 0x00000000, 0x02000000, - 0x10700000, 0x041000c0, 0x00010000, 0x18000000, - 0x00000000, 0x02000000, 0x40700000, 0x041000c0, - 0x01010000, 0x1c000000, 0x00000000, 0x02000000, - 0x007c0000, 0x01000000, 0xc0000000, 0x00010000, - 0x18000000, 0x00000000, 0x02000000, 0x007c0000, - 0x040300c4, 0x00010000, 0x18000000, 0x00000000, - 0x02000000, 0x007c0000, 0x040100c0, 0x01010000, - 0x1c000000, 0x00000000, 0x02000000, 0x007c0000, - 0x00000000, 0xc0000000, 0x00010000, 0x18000000, - 0x00000000, 0x02000000, 0x007c0000, 0x04200000, - 0x0b010000, 0x18000000, 0x00000000, 0x02000000, - 0x0c000000, 0x00000000, 0x02010000, 0x20000000, - 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, - 0xf0000000, 0x000000b0, 0x02010000, 0x20000000, - 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, - 0xf0000000, 0x000010b0, 0x02010000, 0x20000000, - 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, - 0xf0000000, 0x000020b0, 0x02010000, 0x20000000, - 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, - 0xf0000000, 0x000030b0, 0x02010000, 0x20000000, - 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, - 0xf0000000, 0x000040b0, 0x02010000, 0x20000000, - 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, - 0xf0000000, 0x000050b0, 0x02010000, 0x20000000, - 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, - 0xf0000000, 0x000060b0, 0x02010000, 0x20000000, - 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, - 0xf0000000, 0x000070b0, 0x02010000, 0x20000000, - 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, - 0xf0000000, 0x000080b0, 0x02010000, 0x20000000, - 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, - 0xf0000000, 0x000090b0, 0x02010000, 0x20000000, - 0x00000000, 0x02000000, 0x700f0000, 0x040100fc, - 0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000, - 0x00000000, 0x02000000, 0x0a000000, 0x040100c0, - 0x00010000, 0x18000000, 0x00000000, 0x02000000, - 0x0a000000, 0x04200080, 0x00010000, 0x18000000, - 0x00000000, 0x02000000, 0x00be0000, 0x041000c0, - 0x00010000, 0x18000000, 0x00000000, 0x02000000, - 0x10be0000, 0x041000c0, 0x00010000, 0x18000000, - 0x00000000, 0x02000000, 0x20be0000, 0x041000c0, - 0x00010000, 0x18000000, 0x00000000, 0x02000000, - 0x30be0000, 0x041000c0, 0x00010000, 0x18000000, - 0x00000000, 0x02000000, 0x00b00000, 0x041000c0, - 0x00010000, 0x18000000, 0x00000000, 0x02000000, - 0x10b00000, 0x041000c0, 0x00010000, 0x18000000, - 0x00000000, 0x02000000, 0x20b00000, 0x041000c0, - 0x00010000, 0x18000000, 0x00000000, 0x02000000, - 0x30b00000, 0x041000c0, 0x00010000, 0x18000000, - 0x00000000, 0x02000000, 0x00300000, 0x041000c0, - 0x00010000, 0x18000000, 0x00000000, 0x02000000, - 0x10300000, 0x041000c0, 0x00010000, 0x18000000, - 0x00000000, 0x02000000, 0x20300000, 0x041000c0, - 0x00010000, 0x18000000, 0x00000000, 0x02000000, - 0x30300000, 0x041000c0, 0x0a010000, 0x10000000, - 0x00000000, 0x02000000, 0x06010000, 0x1c000000, - 0x00000000, 0x02000000, 0x01000000, 0x00000200, - 0xff230200, 0x06010000, 0x1c000000, 0x00000000, - 0x02000000, 0x02000000, 0x00001000, 0x00000000, - 0x07010000, 0x18000000, 0x00000000, 0x02000000, - 0x00000000, 0x01000000, 0x07010000, 0x18000000, - 0x00000000, 0x02000000, 0x00000000, 0x02000000, - 0x07010000, 0x18000000, 0x00000000, 0x02000000, - 0x00000000, 0x03000000, 0x0d010000, 0x14000000, - 0x00000000, 0x02000000, 0x00000000, 0xff000000, - 0x10000000, 0x00000000, 0x00000080, -}; - -static inline void __iomem * -qla27xx_isp_reg(struct scsi_qla_host *vha) -{ - return &vha->hw->iobase->isp24; -} +#define ISPREG(vha) (&(vha)->hw->iobase->isp24) +#define IOBAR(reg) offsetof(typeof(*(reg)), iobase_addr) +#define IOBASE(vha) IOBAR(ISPREG(vha)) static inline void qla27xx_insert16(uint16_t value, void *buf, ulong *len) @@ -128,7 +34,6 @@ qla27xx_insert32(uint32_t value, void *buf, ulong *len) static inline void qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len) { - if (buf && mem && size) { buf += *len; memcpy(buf, mem, size); @@ -190,9 +95,9 @@ static inline void qla27xx_write_reg(__iomem struct device_reg_24xx *reg, uint offset, uint32_t data, void *buf) { - __iomem void *window = (void __iomem *)reg + offset; - if (buf) { + void __iomem *window = (void __iomem *)reg + offset; + WRT_REG_DWORD(window, data); } } @@ -205,7 +110,7 @@ qla27xx_read_window(__iomem struct device_reg_24xx *reg, void __iomem *window = (void __iomem *)reg + offset; void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width); - qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf); + qla27xx_write_reg(reg, IOBAR(reg), addr, buf); while (count--) { qla27xx_insert32(addr, buf, len); readn(window, buf, len); @@ -224,7 +129,7 @@ qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf) static inline struct qla27xx_fwdt_entry * qla27xx_next_entry(struct qla27xx_fwdt_entry *ent) { - return (void *)ent + ent->hdr.size; + return (void *)ent + le32_to_cpu(ent->hdr.size); } static struct qla27xx_fwdt_entry * @@ -254,12 +159,14 @@ static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { - struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + ulong addr = le32_to_cpu(ent->t256.base_addr); + uint offset = ent->t256.pci_offset; + ulong count = le16_to_cpu(ent->t256.reg_count); + uint width = ent->t256.reg_width; ql_dbg(ql_dbg_misc, vha, 0xd200, "%s: rdio t1 [%lx]\n", __func__, *len); - qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset, - ent->t256.reg_count, ent->t256.reg_width, buf, len); + qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len); return qla27xx_next_entry(ent); } @@ -268,12 +175,14 @@ static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { - struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + ulong addr = le32_to_cpu(ent->t257.base_addr); + uint offset = ent->t257.pci_offset; + ulong data = le32_to_cpu(ent->t257.write_data); ql_dbg(ql_dbg_misc, vha, 0xd201, "%s: wrio t1 [%lx]\n", __func__, *len); - qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf); - qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf); + qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf); + qla27xx_write_reg(ISPREG(vha), offset, data, buf); return qla27xx_next_entry(ent); } @@ -282,13 +191,17 @@ static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { - struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + uint banksel = ent->t258.banksel_offset; + ulong bank = le32_to_cpu(ent->t258.bank); + ulong addr = le32_to_cpu(ent->t258.base_addr); + uint offset = ent->t258.pci_offset; + uint count = le16_to_cpu(ent->t258.reg_count); + uint width = ent->t258.reg_width; ql_dbg(ql_dbg_misc, vha, 0xd202, "%s: rdio t2 [%lx]\n", __func__, *len); - qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf); - qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset, - ent->t258.reg_count, ent->t258.reg_width, buf, len); + qla27xx_write_reg(ISPREG(vha), banksel, bank, buf); + qla27xx_read_window(ISPREG(vha), addr, offset, count, width, buf, len); return qla27xx_next_entry(ent); } @@ -297,13 +210,17 @@ static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { - struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + ulong addr = le32_to_cpu(ent->t259.base_addr); + uint banksel = ent->t259.banksel_offset; + ulong bank = le32_to_cpu(ent->t259.bank); + uint offset = ent->t259.pci_offset; + ulong data = le32_to_cpu(ent->t259.write_data); ql_dbg(ql_dbg_misc, vha, 0xd203, "%s: wrio t2 [%lx]\n", __func__, *len); - qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf); - qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf); - qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf); + qla27xx_write_reg(ISPREG(vha), IOBASE(vha), addr, buf); + qla27xx_write_reg(ISPREG(vha), banksel, bank, buf); + qla27xx_write_reg(ISPREG(vha), offset, data, buf); return qla27xx_next_entry(ent); } @@ -312,12 +229,12 @@ static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { - struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + uint offset = ent->t260.pci_offset; ql_dbg(ql_dbg_misc, vha, 0xd204, "%s: rdpci [%lx]\n", __func__, *len); - qla27xx_insert32(ent->t260.pci_offset, buf, len); - qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len); + qla27xx_insert32(offset, buf, len); + qla27xx_read_reg(ISPREG(vha), offset, buf, len); return qla27xx_next_entry(ent); } @@ -326,11 +243,12 @@ static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { - struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + uint offset = ent->t261.pci_offset; + ulong data = le32_to_cpu(ent->t261.write_data); ql_dbg(ql_dbg_misc, vha, 0xd205, "%s: wrpci [%lx]\n", __func__, *len); - qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf); + qla27xx_write_reg(ISPREG(vha), offset, data, buf); return qla27xx_next_entry(ent); } @@ -339,51 +257,50 @@ static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { + uint area = ent->t262.ram_area; + ulong start = le32_to_cpu(ent->t262.start_addr); + ulong end = le32_to_cpu(ent->t262.end_addr); ulong dwords; - ulong start; - ulong end; ql_dbg(ql_dbg_misc, vha, 0xd206, "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len); - start = ent->t262.start_addr; - end = ent->t262.end_addr; - if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) { + if (area == T262_RAM_AREA_CRITICAL_RAM) { ; - } else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) { + } else if (area == T262_RAM_AREA_EXTERNAL_RAM) { end = vha->hw->fw_memory_size; if (buf) - ent->t262.end_addr = end; - } else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) { + ent->t262.end_addr = cpu_to_le32(end); + } else if (area == T262_RAM_AREA_SHARED_RAM) { start = vha->hw->fw_shared_ram_start; end = vha->hw->fw_shared_ram_end; if (buf) { - ent->t262.start_addr = start; - ent->t262.end_addr = end; + ent->t262.start_addr = cpu_to_le32(start); + ent->t262.end_addr = cpu_to_le32(end); } - } else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) { + } else if (area == T262_RAM_AREA_DDR_RAM) { start = vha->hw->fw_ddr_ram_start; end = vha->hw->fw_ddr_ram_end; if (buf) { - ent->t262.start_addr = start; - ent->t262.end_addr = end; + ent->t262.start_addr = cpu_to_le32(start); + ent->t262.end_addr = cpu_to_le32(end); } - } else if (ent->t262.ram_area == T262_RAM_AREA_MISC) { + } else if (area == T262_RAM_AREA_MISC) { if (buf) { - ent->t262.start_addr = start; - ent->t262.end_addr = end; + ent->t262.start_addr = cpu_to_le32(start); + ent->t262.end_addr = cpu_to_le32(end); } } else { ql_dbg(ql_dbg_misc, vha, 0xd022, - "%s: unknown area %x\n", __func__, ent->t262.ram_area); + "%s: unknown area %x\n", __func__, area); qla27xx_skip_entry(ent, buf); goto done; } if (end < start || start == 0 || end == 0) { ql_dbg(ql_dbg_misc, vha, 0xd023, - "%s: unusable range (start=%x end=%x)\n", __func__, - ent->t262.end_addr, ent->t262.start_addr); + "%s: unusable range (start=%lx end=%lx)\n", + __func__, start, end); qla27xx_skip_entry(ent, buf); goto done; } @@ -402,13 +319,14 @@ static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { + uint type = ent->t263.queue_type; uint count = 0; uint i; uint length; - ql_dbg(ql_dbg_misc, vha, 0xd207, - "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len); - if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) { + ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd207, + "%s: getq(%x) [%lx]\n", __func__, type, *len); + if (type == T263_QUEUE_TYPE_REQ) { for (i = 0; i < vha->hw->max_req_queues; i++) { struct req_que *req = vha->hw->req_q_map[i]; @@ -422,7 +340,7 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, count++; } } - } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) { + } else if (type == T263_QUEUE_TYPE_RSP) { for (i = 0; i < vha->hw->max_rsp_queues; i++) { struct rsp_que *rsp = vha->hw->rsp_q_map[i]; @@ -450,7 +368,7 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, } } else { ql_dbg(ql_dbg_misc, vha, 0xd026, - "%s: unknown queue %x\n", __func__, ent->t263.queue_type); + "%s: unknown queue %x\n", __func__, type); qla27xx_skip_entry(ent, buf); } @@ -496,12 +414,10 @@ static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { - struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); - - ql_dbg(ql_dbg_misc, vha, 0xd209, + ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd209, "%s: pause risc [%lx]\n", __func__, *len); if (buf) - qla24xx_pause_risc(reg, vha->hw); + qla24xx_pause_risc(ISPREG(vha), vha->hw); return qla27xx_next_entry(ent); } @@ -522,11 +438,12 @@ static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { - struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + uint offset = ent->t267.pci_offset; + ulong data = le32_to_cpu(ent->t267.data); ql_dbg(ql_dbg_misc, vha, 0xd20b, "%s: dis intr [%lx]\n", __func__, *len); - qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf); + qla27xx_write_reg(ISPREG(vha), offset, data, buf); return qla27xx_next_entry(ent); } @@ -622,17 +539,16 @@ static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { - struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); - ulong dwords = ent->t270.count; - ulong addr = ent->t270.addr; + ulong addr = le32_to_cpu(ent->t270.addr); + ulong dwords = le32_to_cpu(ent->t270.count); ql_dbg(ql_dbg_misc, vha, 0xd20e, "%s: rdremreg [%lx]\n", __func__, *len); - qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf); + qla27xx_write_reg(ISPREG(vha), IOBASE_ADDR, 0x40, buf); while (dwords--) { - qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf); + qla27xx_write_reg(ISPREG(vha), 0xc0, addr|0x80000000, buf); qla27xx_insert32(addr, buf, len); - qla27xx_read_reg(reg, 0xc4, buf, len); + qla27xx_read_reg(ISPREG(vha), 0xc4, buf, len); addr += sizeof(uint32_t); } @@ -643,15 +559,14 @@ static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { - struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); - ulong addr = ent->t271.addr; - ulong data = ent->t271.data; + ulong addr = le32_to_cpu(ent->t271.addr); + ulong data = le32_to_cpu(ent->t271.data); ql_dbg(ql_dbg_misc, vha, 0xd20f, "%s: wrremreg [%lx]\n", __func__, *len); - qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf); - qla27xx_write_reg(reg, 0xc4, data, buf); - qla27xx_write_reg(reg, 0xc0, addr, buf); + qla27xx_write_reg(ISPREG(vha), IOBASE(vha), 0x40, buf); + qla27xx_write_reg(ISPREG(vha), 0xc4, data, buf); + qla27xx_write_reg(ISPREG(vha), 0xc0, addr, buf); return qla27xx_next_entry(ent); } @@ -660,8 +575,8 @@ static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { - ulong dwords = ent->t272.count; - ulong start = ent->t272.addr; + ulong dwords = le32_to_cpu(ent->t272.count); + ulong start = le32_to_cpu(ent->t272.addr); ql_dbg(ql_dbg_misc, vha, 0xd210, "%s: rdremram [%lx]\n", __func__, *len); @@ -680,8 +595,8 @@ static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { - ulong dwords = ent->t273.count; - ulong addr = ent->t273.addr; + ulong dwords = le32_to_cpu(ent->t273.count); + ulong addr = le32_to_cpu(ent->t273.addr); uint32_t value; ql_dbg(ql_dbg_misc, vha, 0xd211, @@ -703,12 +618,13 @@ static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { + ulong type = ent->t274.queue_type; uint count = 0; uint i; - ql_dbg(ql_dbg_misc, vha, 0xd212, - "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len); - if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) { + ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd212, + "%s: getqsh(%lx) [%lx]\n", __func__, type, *len); + if (type == T274_QUEUE_TYPE_REQ_SHAD) { for (i = 0; i < vha->hw->max_req_queues; i++) { struct req_que *req = vha->hw->req_q_map[i]; @@ -720,7 +636,7 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, count++; } } - } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) { + } else if (type == T274_QUEUE_TYPE_RSP_SHAD) { for (i = 0; i < vha->hw->max_rsp_queues; i++) { struct rsp_que *rsp = vha->hw->rsp_q_map[i]; @@ -746,7 +662,7 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, } } else { ql_dbg(ql_dbg_misc, vha, 0xd02f, - "%s: unknown queue %x\n", __func__, ent->t274.queue_type); + "%s: unknown queue %lx\n", __func__, type); qla27xx_skip_entry(ent, buf); } @@ -765,23 +681,26 @@ qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { ulong offset = offsetof(typeof(*ent), t275.buffer); + ulong length = le32_to_cpu(ent->t275.length); + ulong size = le32_to_cpu(ent->hdr.size); + void *buffer = ent->t275.buffer; - ql_dbg(ql_dbg_misc, vha, 0xd213, - "%s: buffer(%x) [%lx]\n", __func__, ent->t275.length, *len); - if (!ent->t275.length) { + ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd213, + "%s: buffer(%lx) [%lx]\n", __func__, length, *len); + if (!length) { ql_dbg(ql_dbg_misc, vha, 0xd020, "%s: buffer zero length\n", __func__); qla27xx_skip_entry(ent, buf); goto done; } - if (offset + ent->t275.length > ent->hdr.size) { + if (offset + length > size) { + length = size - offset; ql_dbg(ql_dbg_misc, vha, 0xd030, - "%s: buffer overflow\n", __func__); - qla27xx_skip_entry(ent, buf); - goto done; + "%s: buffer overflow, truncate [%lx]\n", __func__, length); + ent->t275.length = cpu_to_le32(length); } - qla27xx_insertbuf(ent->t275.buffer, ent->t275.length, buf, len); + qla27xx_insertbuf(buffer, length, buf, len); done: return qla27xx_next_entry(ent); } @@ -790,15 +709,22 @@ static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t276(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { - uint type = vha->hw->pdev->device >> 4 & 0xf; - uint func = vha->hw->port_no & 0x3; - ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd214, "%s: cond [%lx]\n", __func__, *len); - if (type != ent->t276.cond1 || func != ent->t276.cond2) { - ent = qla27xx_next_entry(ent); - qla27xx_skip_entry(ent, buf); + if (buf) { + ulong cond1 = le32_to_cpu(ent->t276.cond1); + ulong cond2 = le32_to_cpu(ent->t276.cond2); + uint type = vha->hw->pdev->device >> 4 & 0xf; + uint func = vha->hw->port_no & 0x3; + + if (type != cond1 || func != cond2) { + struct qla27xx_fwdt_template *tmp = buf; + + tmp->count--; + ent = qla27xx_next_entry(ent); + qla27xx_skip_entry(ent, buf); + } } return qla27xx_next_entry(ent); @@ -808,13 +734,15 @@ static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t277(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { - struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + ulong cmd_addr = le32_to_cpu(ent->t277.cmd_addr); + ulong wr_cmd_data = le32_to_cpu(ent->t277.wr_cmd_data); + ulong data_addr = le32_to_cpu(ent->t277.data_addr); ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd215, "%s: rdpep [%lx]\n", __func__, *len); - qla27xx_insert32(ent->t277.wr_cmd_data, buf, len); - qla27xx_write_reg(reg, ent->t277.cmd_addr, ent->t277.wr_cmd_data, buf); - qla27xx_read_reg(reg, ent->t277.data_addr, buf, len); + qla27xx_insert32(wr_cmd_data, buf, len); + qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf); + qla27xx_read_reg(ISPREG(vha), data_addr, buf, len); return qla27xx_next_entry(ent); } @@ -823,12 +751,15 @@ static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_t278(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { - struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha); + ulong cmd_addr = le32_to_cpu(ent->t278.cmd_addr); + ulong wr_cmd_data = le32_to_cpu(ent->t278.wr_cmd_data); + ulong data_addr = le32_to_cpu(ent->t278.data_addr); + ulong wr_data = le32_to_cpu(ent->t278.wr_data); ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd216, "%s: wrpep [%lx]\n", __func__, *len); - qla27xx_write_reg(reg, ent->t278.data_addr, ent->t278.wr_data, buf); - qla27xx_write_reg(reg, ent->t278.cmd_addr, ent->t278.wr_cmd_data, buf); + qla27xx_write_reg(ISPREG(vha), data_addr, wr_data, buf); + qla27xx_write_reg(ISPREG(vha), cmd_addr, wr_cmd_data, buf); return qla27xx_next_entry(ent); } @@ -837,8 +768,10 @@ static struct qla27xx_fwdt_entry * qla27xx_fwdt_entry_other(struct scsi_qla_host *vha, struct qla27xx_fwdt_entry *ent, void *buf, ulong *len) { + ulong type = le32_to_cpu(ent->hdr.type); + ql_dbg(ql_dbg_misc, vha, 0xd2ff, - "%s: type %x [%lx]\n", __func__, ent->hdr.type, *len); + "%s: other %lx [%lx]\n", __func__, type, *len); qla27xx_skip_entry(ent, buf); return qla27xx_next_entry(ent); @@ -893,36 +826,27 @@ static void qla27xx_walk_template(struct scsi_qla_host *vha, struct qla27xx_fwdt_template *tmp, void *buf, ulong *len) { - struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset; - ulong count = tmp->entry_count; + struct qla27xx_fwdt_entry *ent = (void *)tmp + + le32_to_cpu(tmp->entry_offset); + ulong type; + tmp->count = le32_to_cpu(tmp->entry_count); ql_dbg(ql_dbg_misc, vha, 0xd01a, - "%s: entry count %lx\n", __func__, count); - while (count--) { - ent = qla27xx_find_entry(ent->hdr.type)(vha, ent, buf, len); + "%s: entry count %u\n", __func__, tmp->count); + while (ent && tmp->count--) { + type = le32_to_cpu(ent->hdr.type); + ent = qla27xx_find_entry(type)(vha, ent, buf, len); if (!ent) break; } - if (count) + if (tmp->count) ql_dbg(ql_dbg_misc, vha, 0xd018, - "%s: entry residual count (%lx)\n", __func__, count); + "%s: entry count residual=+%u\n", __func__, tmp->count); if (ent) ql_dbg(ql_dbg_misc, vha, 0xd019, - "%s: missing end entry (%lx)\n", __func__, count); - - if (buf && *len != vha->hw->fw_dump_len) - ql_dbg(ql_dbg_misc, vha, 0xd01b, - "%s: length=%#lx residual=%+ld\n", - __func__, *len, vha->hw->fw_dump_len - *len); - - if (buf) { - ql_log(ql_log_warn, vha, 0xd015, - "Firmware dump saved to temp buffer (%lu/%p)\n", - vha->host_no, vha->hw->fw_dump); - qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); - } + "%s: missing end entry\n", __func__); } static void @@ -945,8 +869,8 @@ qla27xx_driver_info(struct qla27xx_fwdt_template *tmp) } static void -qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp, - struct scsi_qla_host *vha) +qla27xx_firmware_info(struct scsi_qla_host *vha, + struct qla27xx_fwdt_template *tmp) { tmp->firmware_version[0] = vha->hw->fw_major_version; tmp->firmware_version[1] = vha->hw->fw_minor_version; @@ -963,19 +887,19 @@ ql27xx_edit_template(struct scsi_qla_host *vha, { qla27xx_time_stamp(tmp); qla27xx_driver_info(tmp); - qla27xx_firmware_info(tmp, vha); + qla27xx_firmware_info(vha, tmp); } static inline uint32_t qla27xx_template_checksum(void *p, ulong size) { - uint32_t *buf = p; + __le32 *buf = p; uint64_t sum = 0; size /= sizeof(*buf); - while (size--) - sum += *buf++; + for ( ; size--; buf++) + sum += le32_to_cpu(*buf); sum = (sum & 0xffffffff) + (sum >> 32); @@ -991,29 +915,29 @@ qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp) static inline int qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp) { - return tmp->template_type == TEMPLATE_TYPE_FWDUMP; + return le32_to_cpu(tmp->template_type) == TEMPLATE_TYPE_FWDUMP; } -static void -qla27xx_execute_fwdt_template(struct scsi_qla_host *vha) +static ulong +qla27xx_execute_fwdt_template(struct scsi_qla_host *vha, + struct qla27xx_fwdt_template *tmp, void *buf) { - struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template; - ulong len; + ulong len = 0; if (qla27xx_fwdt_template_valid(tmp)) { len = tmp->template_size; - tmp = memcpy(vha->hw->fw_dump, tmp, len); + tmp = memcpy(buf, tmp, len); ql27xx_edit_template(vha, tmp); - qla27xx_walk_template(vha, tmp, tmp, &len); - vha->hw->fw_dump_len = len; - vha->hw->fw_dumped = 1; + qla27xx_walk_template(vha, tmp, buf, &len); } + + return len; } ulong -qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha) +qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p) { - struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template; + struct qla27xx_fwdt_template *tmp = p; ulong len = 0; if (qla27xx_fwdt_template_valid(tmp)) { @@ -1032,18 +956,6 @@ qla27xx_fwdt_template_size(void *p) return tmp->template_size; } -ulong -qla27xx_fwdt_template_default_size(void) -{ - return sizeof(ql27xx_fwdt_default_template); -} - -const void * -qla27xx_fwdt_template_default(void) -{ - return ql27xx_fwdt_default_template; -} - int qla27xx_fwdt_template_valid(void *p) { @@ -1051,7 +963,8 @@ qla27xx_fwdt_template_valid(void *p) if (!qla27xx_verify_template_header(tmp)) { ql_log(ql_log_warn, NULL, 0xd01c, - "%s: template type %x\n", __func__, tmp->template_type); + "%s: template type %x\n", __func__, + le32_to_cpu(tmp->template_type)); return false; } @@ -1074,17 +987,41 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked) spin_lock_irqsave(&vha->hw->hardware_lock, flags); #endif - if (!vha->hw->fw_dump) - ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n"); - else if (!vha->hw->fw_dump_template) - ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n"); - else if (vha->hw->fw_dumped) - ql_log(ql_log_warn, vha, 0xd300, - "Firmware has been previously dumped (%p)," - " -- ignoring request\n", vha->hw->fw_dump); - else { - QLA_FW_STOPPED(vha->hw); - qla27xx_execute_fwdt_template(vha); + if (!vha->hw->fw_dump) { + ql_log(ql_log_warn, vha, 0xd01e, "-> fwdump no buffer\n"); + } else if (vha->hw->fw_dumped) { + ql_log(ql_log_warn, vha, 0xd01f, + "-> Firmware already dumped (%p) -- ignoring request\n", + vha->hw->fw_dump); + } else { + struct fwdt *fwdt = vha->hw->fwdt; + uint j; + ulong len; + void *buf = vha->hw->fw_dump; + + for (j = 0; j < 2; j++, fwdt++, buf += len) { + ql_log(ql_log_warn, vha, 0xd011, + "-> fwdt%u running...\n", j); + if (!fwdt->template) { + ql_log(ql_log_warn, vha, 0xd012, + "-> fwdt%u no template\n", j); + break; + } + len = qla27xx_execute_fwdt_template(vha, + fwdt->template, buf); + if (len != fwdt->dump_size) { + ql_log(ql_log_warn, vha, 0xd013, + "-> fwdt%u fwdump residual=%+ld\n", + j, fwdt->dump_size - len); + } + } + vha->hw->fw_dump_len = buf - (void *)vha->hw->fw_dump; + vha->hw->fw_dumped = 1; + + ql_log(ql_log_warn, vha, 0xd015, + "-> Firmware dump saved to buffer (%lu/%p) <%lx>\n", + vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags); + qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); } #ifndef __CHECKER__ diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h index 5c2c2a8a19c4..d2a0014e8b21 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.h +++ b/drivers/scsi/qla2xxx/qla_tmpl.h @@ -11,12 +11,12 @@ #define IOBASE_ADDR offsetof(struct device_reg_24xx, iobase_addr) struct __packed qla27xx_fwdt_template { - uint32_t template_type; - uint32_t entry_offset; + __le32 template_type; + __le32 entry_offset; uint32_t template_size; - uint32_t reserved_1; + uint32_t count; /* borrow field for running/residual count */ - uint32_t entry_count; + __le32 entry_count; uint32_t template_version; uint32_t capture_timestamp; uint32_t template_checksum; @@ -65,8 +65,8 @@ struct __packed qla27xx_fwdt_template { struct __packed qla27xx_fwdt_entry { struct __packed { - uint32_t type; - uint32_t size; + __le32 type; + __le32 size; uint32_t reserved_1; uint8_t capture_flags; @@ -81,36 +81,36 @@ struct __packed qla27xx_fwdt_entry { } t255; struct __packed { - uint32_t base_addr; + __le32 base_addr; uint8_t reg_width; - uint16_t reg_count; + __le16 reg_count; uint8_t pci_offset; } t256; struct __packed { - uint32_t base_addr; - uint32_t write_data; + __le32 base_addr; + __le32 write_data; uint8_t pci_offset; uint8_t reserved[3]; } t257; struct __packed { - uint32_t base_addr; + __le32 base_addr; uint8_t reg_width; - uint16_t reg_count; + __le16 reg_count; uint8_t pci_offset; uint8_t banksel_offset; uint8_t reserved[3]; - uint32_t bank; + __le32 bank; } t258; struct __packed { - uint32_t base_addr; - uint32_t write_data; + __le32 base_addr; + __le32 write_data; uint8_t reserved[2]; uint8_t pci_offset; uint8_t banksel_offset; - uint32_t bank; + __le32 bank; } t259; struct __packed { @@ -121,14 +121,14 @@ struct __packed qla27xx_fwdt_entry { struct __packed { uint8_t pci_offset; uint8_t reserved[3]; - uint32_t write_data; + __le32 write_data; } t261; struct __packed { uint8_t ram_area; uint8_t reserved[3]; - uint32_t start_addr; - uint32_t end_addr; + __le32 start_addr; + __le32 end_addr; } t262; struct __packed { @@ -158,7 +158,7 @@ struct __packed qla27xx_fwdt_entry { struct __packed { uint8_t pci_offset; uint8_t reserved[3]; - uint32_t data; + __le32 data; } t267; struct __packed { @@ -173,23 +173,23 @@ struct __packed qla27xx_fwdt_entry { } t269; struct __packed { - uint32_t addr; - uint32_t count; + __le32 addr; + __le32 count; } t270; struct __packed { - uint32_t addr; - uint32_t data; + __le32 addr; + __le32 data; } t271; struct __packed { - uint32_t addr; - uint32_t count; + __le32 addr; + __le32 count; } t272; struct __packed { - uint32_t addr; - uint32_t count; + __le32 addr; + __le32 count; } t273; struct __packed { @@ -199,26 +199,26 @@ struct __packed qla27xx_fwdt_entry { } t274; struct __packed { - uint32_t length; + __le32 length; uint8_t buffer[]; } t275; struct __packed { - uint32_t cond1; - uint32_t cond2; + __le32 cond1; + __le32 cond2; } t276; struct __packed { - uint32_t cmd_addr; - uint32_t wr_cmd_data; - uint32_t data_addr; + __le32 cmd_addr; + __le32 wr_cmd_data; + __le32 data_addr; } t277; struct __packed { - uint32_t cmd_addr; - uint32_t wr_cmd_data; - uint32_t data_addr; - uint32_t wr_data; + __le32 cmd_addr; + __le32 wr_cmd_data; + __le32 data_addr; + __le32 wr_data; } t278; }; }; diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 0690dac24081..0f8cca27c183 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,9 +7,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "10.00.00.14-k" +#define QLA2XXX_VERSION "10.01.00.15-k" #define QLA_DRIVER_MAJOR_VER 10 -#define QLA_DRIVER_MINOR_VER 0 +#define QLA_DRIVER_MINOR_VER 1 #define QLA_DRIVER_PATCH_VER 0 #define QLA_DRIVER_BETA_VER 0 diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 4055cf8cc978..95fb01883b3c 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -26,8 +26,11 @@ */ #define MARVELL_PHY_ID_88E6390 0x01410f90 +#define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4) + /* struct phy_device dev_flags definitions */ #define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 #define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002 +#define MARVELL_PHY_LED0_LINK_LED1_ACTIVE 0x00000004 #endif /* _MARVELL_PHY_H */ |