Home Home > GIT Browse > vanilla
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2018-07-17 12:36:13 +0200
committerTakashi Iwai <tiwai@suse.de>2018-07-17 12:36:13 +0200
commitf3f8e1c883eb9b02d323af42dbd0cae5b59ee6ad (patch)
treebb86dc3596747de4cf133da395228eb661209a38
parent8d9a35bb53da0649ffa7100bcb608e0b797bfad9 (diff)
parent6b2af6fd9a2b6a726f88607a476063fa1d6e4c72 (diff)
Merge branch 'users/tbogendoerfer/SLE15/for-next' into SLE15rpm-4.12.14-25.3
Pull RDMA fixes from Thomas Bogendoerfer
-rw-r--r--patches.drivers/RDMA-bnxt_re-Fix-broken-RoCE-driver-due-to-recent-L2.patch379
-rw-r--r--patches.drivers/RDMA-bnxt_re-Remove-redundant-bnxt_qplib_disable_nq-.patch27
-rw-r--r--patches.drivers/bnxt_en-Add-BCM5745X-NPAR-device-IDs.patch42
-rw-r--r--patches.drivers/bnxt_en-Add-IRQ-remapping-logic.patch150
-rw-r--r--patches.drivers/bnxt_en-Add-TC-to-hardware-QoS-queue-mapping-logic.patch195
-rw-r--r--patches.drivers/bnxt_en-Add-ULP-calls-to-stop-and-restart-IRQs.patch239
-rw-r--r--patches.drivers/bnxt_en-Add-cache-line-size-setting-to-optimize-perf.patch58
-rw-r--r--patches.drivers/bnxt_en-Add-extended-port-statistics-support.patch202
-rw-r--r--patches.drivers/bnxt_en-Add-support-for-ndo_set_vf_trust.patch156
-rw-r--r--patches.drivers/bnxt_en-Add-the-new-firmware-API-to-query-hardware-r.patch101
-rw-r--r--patches.drivers/bnxt_en-Adjust-default-rings-for-multi-port-NICs.patch38
-rw-r--r--patches.drivers/bnxt_en-Always-forward-VF-MAC-address-to-the-PF.patch52
-rw-r--r--patches.drivers/bnxt_en-Change-IRQ-assignment-for-RDMA-driver.patch150
-rw-r--r--patches.drivers/bnxt_en-Check-max_tx_scheduler_inputs-value-from-fir.patch101
-rw-r--r--patches.drivers/bnxt_en-Check-the-lengths-of-encapsulated-firmware-r.patch74
-rw-r--r--patches.drivers/bnxt_en-Check-unsupported-speeds-in-bnxt_update_link.patch31
-rw-r--r--patches.drivers/bnxt_en-Display-function-level-rx-tx_discard_pkts-vi.patch96
-rw-r--r--patches.drivers/bnxt_en-Do-not-allow-VF-to-read-EEPROM.patch31
-rw-r--r--patches.drivers/bnxt_en-Do-not-set-firmware-time-from-VF-driver-on-o.patch29
-rw-r--r--patches.drivers/bnxt_en-Don-t-reserve-rings-on-VF-when-min-rings-wer.patch78
-rw-r--r--patches.drivers/bnxt_en-Eliminate-duplicate-barriers-on-weakly-order.patch59
-rw-r--r--patches.drivers/bnxt_en-Expand-bnxt_check_rings-to-check-all-resourc.patch152
-rw-r--r--patches.drivers/bnxt_en-Fix-NULL-pointer-dereference-at-bnxt_free_ir.patch35
-rw-r--r--patches.drivers/bnxt_en-Fix-ethtool-x-crash-when-device-is-down.patch45
-rw-r--r--patches.drivers/bnxt_en-Fix-firmware-message-delay-loop-regression.patch86
-rw-r--r--patches.drivers/bnxt_en-Fix-regressions-when-setting-up-MQPRIO-TX-ri.patch55
-rw-r--r--patches.drivers/bnxt_en-Fix-vnic-accounting-in-the-bnxt_check_rings-.patch160
-rw-r--r--patches.drivers/bnxt_en-Forward-VF-MAC-address-to-the-PF.patch140
-rw-r--r--patches.drivers/bnxt_en-Implement-new-method-for-the-PF-to-assign-SR.patch213
-rw-r--r--patches.drivers/bnxt_en-Implement-new-method-to-reserve-rings.patch363
-rw-r--r--patches.drivers/bnxt_en-Improve-resource-accounting-for-SRIOV.patch49
-rw-r--r--patches.drivers/bnxt_en-Improve-ring-allocation-logic.patch150
-rw-r--r--patches.drivers/bnxt_en-Improve-valid-bit-checking-in-firmware-respo.patch102
-rw-r--r--patches.drivers/bnxt_en-Include-additional-hardware-port-statistics-.patch39
-rw-r--r--patches.drivers/bnxt_en-Increase-RING_IDLE-minimum-threshold-to-50.patch29
-rw-r--r--patches.drivers/bnxt_en-Need-to-include-RDMA-rings-in-bnxt_check_rin.patch30
-rw-r--r--patches.drivers/bnxt_en-Pass-complete-VLAN-TCI-to-the-stack.patch49
-rw-r--r--patches.drivers/bnxt_en-Read-phy-eeprom-A2h-address-only-when-optica.patch91
-rw-r--r--patches.drivers/bnxt_en-Refactor-bnxt_close_nic.patch70
-rw-r--r--patches.drivers/bnxt_en-Refactor-bnxt_need_reserve_rings.patch105
-rw-r--r--patches.drivers/bnxt_en-Refactor-hardware-resource-data-structures.patch431
-rw-r--r--patches.drivers/bnxt_en-Refactor-the-functions-to-reserve-hardware-r.patch140
-rw-r--r--patches.drivers/bnxt_en-Remap-TC-to-hardware-queues-when-configuring.patch158
-rw-r--r--patches.drivers/bnxt_en-Reserve-RSS-and-L2-contexts-for-VF.patch83
-rw-r--r--patches.drivers/bnxt_en-Reserve-completion-rings-and-MSIX-for-bnxt_r.patch219
-rw-r--r--patches.drivers/bnxt_en-Reserve-resources-for-RFS.patch56
-rw-r--r--patches.drivers/bnxt_en-Reserve-rings-at-driver-open-if-none-was-res.patch75
-rw-r--r--patches.drivers/bnxt_en-Reserve-rings-in-bnxt_set_channels-if-device.patch32
-rw-r--r--patches.drivers/bnxt_en-Restore-MSIX-after-disabling-SRIOV.patch61
-rw-r--r--patches.drivers/bnxt_en-Set-initial-default-RX-and-TX-ring-numbers-t.patch64
-rw-r--r--patches.drivers/bnxt_en-Simplify-ring-alloc-free-error-messages.patch76
-rw-r--r--patches.drivers/bnxt_en-Support-max-mtu-with-VF-reps.patch76
-rw-r--r--patches.drivers/bnxt_en-Update-firmware-interface-to-1.9.0.patch12126
-rw-r--r--patches.drivers/bnxt_en-Update-firmware-interface-to-1.9.1.15.patch568
-rw-r--r--patches.drivers/bnxt_en-Use-a-dedicated-VNIC-mode-for-RDMA.patch70
-rw-r--r--patches.drivers/bnxt_en-close-open-NIC-only-when-the-interface-is-in.patch57
-rw-r--r--patches.drivers/bnxt_en-export-a-common-switchdev-PARENT_ID-for-all-.patch138
-rw-r--r--patches.drivers/bnxt_en-reduce-timeout-on-initial-HWRM-calls.patch86
-rw-r--r--patches.drivers/bpf-make-bnxt-compatible-w-bpf_xdp_adjust_tail.patch35
-rw-r--r--patches.drivers/ethernet-broadcom-Use-zeroing-memory-allocator-than-.patch72
-rw-r--r--series.conf60
61 files changed, 18934 insertions, 0 deletions
diff --git a/patches.drivers/RDMA-bnxt_re-Fix-broken-RoCE-driver-due-to-recent-L2.patch b/patches.drivers/RDMA-bnxt_re-Fix-broken-RoCE-driver-due-to-recent-L2.patch
new file mode 100644
index 0000000000..cdf732f797
--- /dev/null
+++ b/patches.drivers/RDMA-bnxt_re-Fix-broken-RoCE-driver-due-to-recent-L2.patch
@@ -0,0 +1,379 @@
+From: Devesh Sharma <devesh.sharma@broadcom.com>
+Date: Fri, 25 May 2018 12:01:21 -0400
+Subject: RDMA/bnxt_re: Fix broken RoCE driver due to recent L2 driver changes
+Patch-mainline: v4.17
+Git-commit: 6e04b103568983bd699fac96b80a9b96ede68118
+References: bsc#1086283 FATE#324874
+
+The recent changes in Broadcom's ethernet driver(L2 driver) broke
+RoCE functionality in terms of MSIx vector allocation and
+de-allocation.
+
+There is a possibility that L2 driver would initiate MSIx vector
+reallocation depending upon the requests coming from administrator.
+In such cases L2 driver needs to free up all the MSIx vectors
+allocated previously and reallocate/initialize those.
+
+If RoCE driver is loaded and reshuffling is attempted, there will be
+kernel crashes because RoCE driver would still be holding the MSIx
+vectors but L2 driver would attempt to free in-use vectors. Thus
+leading to a kernel crash.
+
+Making changes in roce driver to fix crashes described above.
+As part of solution L2 driver tells RoCE driver to release
+the MSIx vector whenever there is a need. When RoCE driver
+get message it sync up with all the running tasklets and IRQ
+handlers and releases the vectors. L2 driver send one more
+message to RoCE driver to resume the MSIx vectors. L2 driver
+guarantees that RoCE vector do not change during reshuffling.
+
+Fixes: ec86f14ea506 ("bnxt_en: Add ULP calls to stop and restart IRQs.")
+Fixes: 08654eb213a8 ("bnxt_en: Change IRQ assignment for RDMA driver.")
+Signed-off-by: Devesh Sharma <devesh.sharma@broadcom.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/bnxt_re/main.c | 55 ++++++++++++++++
+ drivers/infiniband/hw/bnxt_re/qplib_fp.c | 94 ++++++++++++++++++-----------
+ drivers/infiniband/hw/bnxt_re/qplib_fp.h | 3
+ drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | 61 +++++++++++++-----
+ drivers/infiniband/hw/bnxt_re/qplib_rcfw.h | 3
+ 5 files changed, 163 insertions(+), 53 deletions(-)
+
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -185,12 +185,65 @@ static void bnxt_re_shutdown(void *p)
+ bnxt_re_ib_unreg(rdev, false);
+ }
+
++static void bnxt_re_stop_irq(void *handle)
++{
++ struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
++ struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
++ struct bnxt_qplib_nq *nq;
++ int indx;
++
++ for (indx = BNXT_RE_NQ_IDX; indx < rdev->num_msix; indx++) {
++ nq = &rdev->nq[indx - 1];
++ bnxt_qplib_nq_stop_irq(nq, false);
++ }
++
++ bnxt_qplib_rcfw_stop_irq(rcfw, false);
++}
++
++static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
++{
++ struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
++ struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
++ struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
++ struct bnxt_qplib_nq *nq;
++ int indx, rc;
++
++ if (!ent) {
++ /* Not setting the f/w timeout bit in rcfw.
++ * During the driver unload the first command
++ * to f/w will timeout and that will set the
++ * timeout bit.
++ */
++ dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n");
++ return;
++ }
++
++ /* Vectors may change after restart, so update with new vectors
++ * in device sctructure.
++ */
++ for (indx = 0; indx < rdev->num_msix; indx++)
++ rdev->msix_entries[indx].vector = ent[indx].vector;
++
++ bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
++ false);
++ for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
++ nq = &rdev->nq[indx - 1];
++ rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
++ msix_ent[indx].vector, false);
++ if (rc)
++ dev_warn(rdev_to_dev(rdev),
++ "Failed to reinit NQ index %d\n", indx - 1);
++ }
++}
++
+ static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
+ .ulp_async_notifier = NULL,
+ .ulp_stop = bnxt_re_stop,
+ .ulp_start = bnxt_re_start,
+ .ulp_sriov_config = bnxt_re_sriov_config,
+- .ulp_shutdown = bnxt_re_shutdown
++ .ulp_shutdown = bnxt_re_shutdown,
++ .ulp_irq_stop = bnxt_re_stop_irq,
++ .ulp_irq_restart = bnxt_re_start_irq
+ };
+
+ /* RoCE -> Net driver */
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -341,22 +341,32 @@ static irqreturn_t bnxt_qplib_nq_irq(int
+ return IRQ_HANDLED;
+ }
+
++void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
++{
++ tasklet_disable(&nq->worker);
++ /* Mask h/w interrupt */
++ NQ_DB(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
++ /* Sync with last running IRQ handler */
++ synchronize_irq(nq->vector);
++ if (kill)
++ tasklet_kill(&nq->worker);
++ if (nq->requested) {
++ irq_set_affinity_hint(nq->vector, NULL);
++ free_irq(nq->vector, nq);
++ nq->requested = false;
++ }
++}
++
+ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
+ {
+ if (nq->cqn_wq) {
+ destroy_workqueue(nq->cqn_wq);
+ nq->cqn_wq = NULL;
+ }
++
+ /* Make sure the HW is stopped! */
+- synchronize_irq(nq->vector);
+- tasklet_disable(&nq->worker);
+- tasklet_kill(&nq->worker);
++ bnxt_qplib_nq_stop_irq(nq, true);
+
+- if (nq->requested) {
+- irq_set_affinity_hint(nq->vector, NULL);
+- free_irq(nq->vector, nq);
+- nq->requested = false;
+- }
+ if (nq->bar_reg_iomem)
+ iounmap(nq->bar_reg_iomem);
+ nq->bar_reg_iomem = NULL;
+@@ -366,6 +376,40 @@ void bnxt_qplib_disable_nq(struct bnxt_q
+ nq->vector = 0;
+ }
+
++int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
++ int msix_vector, bool need_init)
++{
++ int rc;
++
++ if (nq->requested)
++ return -EFAULT;
++
++ nq->vector = msix_vector;
++ if (need_init)
++ tasklet_init(&nq->worker, bnxt_qplib_service_nq,
++ (unsigned long)nq);
++ else
++ tasklet_enable(&nq->worker);
++
++ snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
++ rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
++ if (rc)
++ return rc;
++
++ cpumask_clear(&nq->mask);
++ cpumask_set_cpu(nq_indx, &nq->mask);
++ rc = irq_set_affinity_hint(nq->vector, &nq->mask);
++ if (rc) {
++ dev_warn(&nq->pdev->dev,
++ "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
++ nq->vector, nq_indx);
++ }
++ nq->requested = true;
++ NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
++
++ return rc;
++}
++
+ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
+ int nq_idx, int msix_vector, int bar_reg_offset,
+ int (*cqn_handler)(struct bnxt_qplib_nq *nq,
+@@ -377,41 +421,17 @@ int bnxt_qplib_enable_nq(struct pci_dev
+ resource_size_t nq_base;
+ int rc = -1;
+
+- nq->pdev = pdev;
+- nq->vector = msix_vector;
+ if (cqn_handler)
+ nq->cqn_handler = cqn_handler;
+
+ if (srqn_handler)
+ nq->srqn_handler = srqn_handler;
+
+- tasklet_init(&nq->worker, bnxt_qplib_service_nq, (unsigned long)nq);
+-
+ /* Have a task to schedule CQ notifiers in post send case */
+ nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq");
+ if (!nq->cqn_wq)
+- goto fail;
+-
+- nq->requested = false;
+- memset(nq->name, 0, 32);
+- sprintf(nq->name, "bnxt_qplib_nq-%d", nq_idx);
+- rc = request_irq(nq->vector, bnxt_qplib_nq_irq, 0, nq->name, nq);
+- if (rc) {
+- dev_err(&nq->pdev->dev,
+- "Failed to request IRQ for NQ: %#x", rc);
+- goto fail;
+- }
++ return -ENOMEM;
+
+- cpumask_clear(&nq->mask);
+- cpumask_set_cpu(nq_idx, &nq->mask);
+- rc = irq_set_affinity_hint(nq->vector, &nq->mask);
+- if (rc) {
+- dev_warn(&nq->pdev->dev,
+- "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
+- nq->vector, nq_idx);
+- }
+-
+- nq->requested = true;
+ nq->bar_reg = NQ_CONS_PCI_BAR_REGION;
+ nq->bar_reg_off = bar_reg_offset;
+ nq_base = pci_resource_start(pdev, nq->bar_reg);
+@@ -424,7 +444,13 @@ int bnxt_qplib_enable_nq(struct pci_dev
+ rc = -ENOMEM;
+ goto fail;
+ }
+- NQ_DB_REARM(nq->bar_reg_iomem, nq->hwq.cons, nq->hwq.max_elements);
++
++ rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
++ if (rc) {
++ dev_err(&nq->pdev->dev,
++ "QPLIB: Failed to request irq for nq-idx %d", nq_idx);
++ goto fail;
++ }
+
+ return 0;
+ fail:
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+@@ -467,7 +467,10 @@ struct bnxt_qplib_nq_work {
+ struct bnxt_qplib_cq *cq;
+ };
+
++void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill);
+ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
++int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
++ int msix_vector, bool need_init);
+ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
+ int nq_idx, int msix_vector, int bar_reg_offset,
+ int (*cqn_handler)(struct bnxt_qplib_nq *nq,
+--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+@@ -586,19 +586,29 @@ fail:
+ return -ENOMEM;
+ }
+
+-void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
++void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
+ {
+- unsigned long indx;
+-
+- /* Make sure the HW channel is stopped! */
+- synchronize_irq(rcfw->vector);
+ tasklet_disable(&rcfw->worker);
+- tasklet_kill(&rcfw->worker);
++ /* Mask h/w interrupts */
++ CREQ_DB(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
++ rcfw->creq.max_elements);
++ /* Sync with last running IRQ-handler */
++ synchronize_irq(rcfw->vector);
++ if (kill)
++ tasklet_kill(&rcfw->worker);
+
+ if (rcfw->requested) {
+ free_irq(rcfw->vector, rcfw);
+ rcfw->requested = false;
+ }
++}
++
++void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
++{
++ unsigned long indx;
++
++ bnxt_qplib_rcfw_stop_irq(rcfw, true);
++
+ if (rcfw->cmdq_bar_reg_iomem)
+ iounmap(rcfw->cmdq_bar_reg_iomem);
+ rcfw->cmdq_bar_reg_iomem = NULL;
+@@ -618,6 +628,31 @@ void bnxt_qplib_disable_rcfw_channel(str
+ rcfw->vector = 0;
+ }
+
++int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
++ bool need_init)
++{
++ int rc;
++
++ if (rcfw->requested)
++ return -EFAULT;
++
++ rcfw->vector = msix_vector;
++ if (need_init)
++ tasklet_init(&rcfw->worker,
++ bnxt_qplib_service_creq, (unsigned long)rcfw);
++ else
++ tasklet_enable(&rcfw->worker);
++ rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
++ "bnxt_qplib_creq", rcfw);
++ if (rc)
++ return rc;
++ rcfw->requested = true;
++ CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
++ rcfw->creq.max_elements);
++
++ return 0;
++}
++
+ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
+ struct bnxt_qplib_rcfw *rcfw,
+ int msix_vector,
+@@ -679,27 +714,17 @@ int bnxt_qplib_enable_rcfw_channel(struc
+ rcfw->creq_qp_event_processed = 0;
+ rcfw->creq_func_event_processed = 0;
+
+- rcfw->vector = msix_vector;
+ if (aeq_handler)
+ rcfw->aeq_handler = aeq_handler;
++ init_waitqueue_head(&rcfw->waitq);
+
+- tasklet_init(&rcfw->worker, bnxt_qplib_service_creq,
+- (unsigned long)rcfw);
+-
+- rcfw->requested = false;
+- rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
+- "bnxt_qplib_creq", rcfw);
++ rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
+ if (rc) {
+ dev_err(&rcfw->pdev->dev,
+ "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
+ bnxt_qplib_disable_rcfw_channel(rcfw);
+ return rc;
+ }
+- rcfw->requested = true;
+-
+- init_waitqueue_head(&rcfw->waitq);
+-
+- CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, 0, rcfw->creq.max_elements);
+
+ init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
+ init.cmdq_size_cmdq_lvl = cpu_to_le16(
+--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+@@ -195,7 +195,10 @@ struct bnxt_qplib_rcfw {
+ void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
+ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
+ struct bnxt_qplib_rcfw *rcfw, int qp_tbl_sz);
++void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
+ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
++int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
++ bool need_init);
+ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
+ struct bnxt_qplib_rcfw *rcfw,
+ int msix_vector,
diff --git a/patches.drivers/RDMA-bnxt_re-Remove-redundant-bnxt_qplib_disable_nq-.patch b/patches.drivers/RDMA-bnxt_re-Remove-redundant-bnxt_qplib_disable_nq-.patch
new file mode 100644
index 0000000000..c8cd5616c4
--- /dev/null
+++ b/patches.drivers/RDMA-bnxt_re-Remove-redundant-bnxt_qplib_disable_nq-.patch
@@ -0,0 +1,27 @@
+From: Arvind Yadav <arvind.yadav.cs@gmail.com>
+Date: Tue, 14 Nov 2017 18:20:56 +0530
+Subject: RDMA/bnxt_re: Remove redundant bnxt_qplib_disable_nq() call
+Patch-mainline: v4.16-rc1
+Git-commit: 302784729e7fb29d5888686fe83b42bb18f81ab8
+References: bsc#1086283 FATE#324874
+
+The bnxt_qplib_disable_nq() call is redundant as it occurs
+after 'goto fail' and hence it called twice. Remove it.
+
+Signed-off-by: Arvind Yadav <arvind.yadav.cs@gmail.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/infiniband/hw/bnxt_re/qplib_fp.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -415,7 +415,6 @@ int bnxt_qplib_enable_nq(struct pci_dev
+ if (rc) {
+ dev_err(&nq->pdev->dev,
+ "Failed to request IRQ for NQ: %#x", rc);
+- bnxt_qplib_disable_nq(nq);
+ goto fail;
+ }
+
diff --git a/patches.drivers/bnxt_en-Add-BCM5745X-NPAR-device-IDs.patch b/patches.drivers/bnxt_en-Add-BCM5745X-NPAR-device-IDs.patch
new file mode 100644
index 0000000000..e5a29d2841
--- /dev/null
+++ b/patches.drivers/bnxt_en-Add-BCM5745X-NPAR-device-IDs.patch
@@ -0,0 +1,42 @@
+From: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Date: Wed, 17 Jan 2018 03:21:13 -0500
+Subject: bnxt_en: Add BCM5745X NPAR device IDs
+Patch-mainline: v4.16-rc1
+Git-commit: 92abef361bd233ea2a99db9e9a637626f523f82e
+References: bsc#1086282 FATE#324873
+
+Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -107,6 +107,7 @@ enum board_idx {
+ BCM57416_NPAR,
+ BCM57452,
+ BCM57454,
++ BCM5745x_NPAR,
+ BCM58802,
+ BCM58804,
+ BCM58808,
+@@ -147,6 +148,7 @@ static const struct {
+ [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
+ [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
++ [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
+ [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
+ [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
+@@ -156,6 +158,8 @@ static const struct {
+ };
+
+ static const struct pci_device_id bnxt_pci_tbl[] = {
++ { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
++ { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
+ { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
diff --git a/patches.drivers/bnxt_en-Add-IRQ-remapping-logic.patch b/patches.drivers/bnxt_en-Add-IRQ-remapping-logic.patch
new file mode 100644
index 0000000000..953747a24b
--- /dev/null
+++ b/patches.drivers/bnxt_en-Add-IRQ-remapping-logic.patch
@@ -0,0 +1,150 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Sat, 31 Mar 2018 13:54:18 -0400
+Subject: bnxt_en: Add IRQ remapping logic.
+Patch-mainline: v4.17-rc1
+Git-commit: e5811b8c09df9bc80eabc95339fceded23f16289
+References: bsc#1086282 FATE#324873
+
+Add remapping logic so that bnxt_en can use any arbitrary MSIX vectors.
+This will allow the driver to reserve one range of MSIX vectors to be
+used by both bnxt_en and bnxt_re. bnxt_en can now skip over the MSIX
+vectors used by bnxt_re.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 59 +++++++++++++++++++++---------
+ 1 file changed, 42 insertions(+), 17 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -2424,8 +2424,10 @@ static void bnxt_free_cp_rings(struct bn
+
+ static int bnxt_alloc_cp_rings(struct bnxt *bp)
+ {
+- int i, rc;
++ int i, rc, ulp_base_vec, ulp_msix;
+
++ ulp_msix = bnxt_get_ulp_msix_num(bp);
++ ulp_base_vec = bnxt_get_ulp_msix_base(bp);
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+@@ -2440,7 +2442,11 @@ static int bnxt_alloc_cp_rings(struct bn
+ rc = bnxt_alloc_ring(bp, ring);
+ if (rc)
+ return rc;
+- ring->map_idx = i;
++
++ if (ulp_msix && i >= ulp_base_vec)
++ ring->map_idx = i + ulp_msix;
++ else
++ ring->map_idx = i;
+ }
+ return 0;
+ }
+@@ -3359,6 +3365,15 @@ static void bnxt_disable_int(struct bnxt
+ }
+ }
+
++static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
++{
++ struct bnxt_napi *bnapi = bp->bnapi[n];
++ struct bnxt_cp_ring_info *cpr;
++
++ cpr = &bnapi->cp_ring;
++ return cpr->cp_ring_struct.map_idx;
++}
++
+ static void bnxt_disable_int_sync(struct bnxt *bp)
+ {
+ int i;
+@@ -3366,8 +3381,11 @@ static void bnxt_disable_int_sync(struct
+ atomic_inc(&bp->intr_sem);
+
+ bnxt_disable_int(bp);
+- for (i = 0; i < bp->cp_nr_rings; i++)
+- synchronize_irq(bp->irq_tbl[i].vector);
++ for (i = 0; i < bp->cp_nr_rings; i++) {
++ int map_idx = bnxt_cp_num_to_irq_num(bp, i);
++
++ synchronize_irq(bp->irq_tbl[map_idx].vector);
++ }
+ }
+
+ static void bnxt_enable_int(struct bnxt *bp)
+@@ -5769,6 +5787,7 @@ static void bnxt_setup_msix(struct bnxt
+ }
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
++ int map_idx = bnxt_cp_num_to_irq_num(bp, i);
+ char *attr;
+
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+@@ -5778,9 +5797,9 @@ static void bnxt_setup_msix(struct bnxt
+ else
+ attr = "tx";
+
+- snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr,
+- i);
+- bp->irq_tbl[i].handler = bnxt_msix;
++ snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
++ attr, i);
++ bp->irq_tbl[map_idx].handler = bnxt_msix;
+ }
+ }
+
+@@ -6004,7 +6023,9 @@ static void bnxt_free_irq(struct bnxt *b
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+- irq = &bp->irq_tbl[i];
++ int map_idx = bnxt_cp_num_to_irq_num(bp, i);
++
++ irq = &bp->irq_tbl[map_idx];
+ if (irq->requested) {
+ if (irq->have_cpumask) {
+ irq_set_affinity_hint(irq->vector, NULL);
+@@ -6023,14 +6044,25 @@ static int bnxt_request_irq(struct bnxt
+ int i, j, rc = 0;
+ unsigned long flags = 0;
+ #ifdef CONFIG_RFS_ACCEL
+- struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
++ struct cpu_rmap *rmap;
+ #endif
+
++ rc = bnxt_setup_int_mode(bp);
++ if (rc) {
++ netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
++ rc);
++ return rc;
++ }
++#ifdef CONFIG_RFS_ACCEL
++ rmap = bp->dev->rx_cpu_rmap;
++#endif
+ if (!(bp->flags & BNXT_FLAG_USING_MSIX))
+ flags = IRQF_SHARED;
+
+ for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
+- struct bnxt_irq *irq = &bp->irq_tbl[i];
++ int map_idx = bnxt_cp_num_to_irq_num(bp, i);
++ struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
++
+ #ifdef CONFIG_RFS_ACCEL
+ if (rmap && bp->bnapi[i]->rx_ring) {
+ rc = irq_cpu_rmap_add(rmap, irq->vector);
+@@ -6738,13 +6770,6 @@ static int __bnxt_open_nic(struct bnxt *
+ rc = bnxt_reserve_rings(bp);
+ if (rc)
+ return rc;
+-
+- rc = bnxt_setup_int_mode(bp);
+- if (rc) {
+- netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
+- rc);
+- return rc;
+- }
+ }
+ if ((bp->flags & BNXT_FLAG_RFS) &&
+ !(bp->flags & BNXT_FLAG_USING_MSIX)) {
diff --git a/patches.drivers/bnxt_en-Add-TC-to-hardware-QoS-queue-mapping-logic.patch b/patches.drivers/bnxt_en-Add-TC-to-hardware-QoS-queue-mapping-logic.patch
new file mode 100644
index 0000000000..ba782f8620
--- /dev/null
+++ b/patches.drivers/bnxt_en-Add-TC-to-hardware-QoS-queue-mapping-logic.patch
@@ -0,0 +1,195 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Thu, 26 Apr 2018 17:44:31 -0400
+Subject: bnxt_en: Add TC to hardware QoS queue mapping logic.
+Patch-mainline: v4.18-rc1
+Git-commit: 2e8ef77ee0ff1117251a48f79d2d57d65afd0495
+References: bsc#1086282 FATE#324873
+
+The current driver maps MQPRIO traffic classes directly 1:1 to the
+internal hardware queues (TC0 maps to hardware queue 0, etc). This
+direct mapping requires the internal hardware queues to be reconfigured
+from lossless to lossy and vice versa when necessary. This
+involves reconfiguring internal buffer thresholds which is
+disruptive and not always reliable.
+
+Implement a new scheme to map TCs to internal hardware queues by
+matching up their PFC requirements. This will eliminate the need
+to reconfigure a hardware queue internal buffers at run time. After
+remapping, the NIC is closed and opened for the new TC to hardware
+queues to take effect.
+
+This patch only adds the basic mapping logic.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 5 +-
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1
+ drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c | 65 ++++++++++++++++----------
+ 3 files changed, 47 insertions(+), 24 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -2363,6 +2363,7 @@ static int bnxt_alloc_tx_rings(struct bn
+ for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+ struct bnxt_ring_struct *ring;
++ u8 qidx;
+
+ ring = &txr->tx_ring_struct;
+
+@@ -2391,7 +2392,8 @@ static int bnxt_alloc_tx_rings(struct bn
+
+ memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
+ }
+- ring->queue_id = bp->q_info[j].queue_id;
++ qidx = bp->tc_to_qidx[j];
++ ring->queue_id = bp->q_info[qidx].queue_id;
+ if (i < bp->tx_nr_rings_xdp)
+ continue;
+ if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
+@@ -5254,6 +5256,7 @@ static int bnxt_hwrm_queue_qportcfg(stru
+ for (i = 0; i < bp->max_tc; i++) {
+ bp->q_info[i].queue_id = *qptr++;
+ bp->q_info[i].queue_profile = *qptr++;
++ bp->tc_to_qidx[i] = i;
+ }
+
+ qportcfg_exit:
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1232,6 +1232,7 @@ struct bnxt {
+ u8 max_tc;
+ u8 max_lltc; /* lossless TCs */
+ struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
++ u8 tc_to_qidx[BNXT_MAX_QUEUE];
+
+ unsigned int current_interval;
+ #define BNXT_TIMER_INTERVAL HZ
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+@@ -21,6 +21,21 @@
+ #include "bnxt_dcb.h"
+
+ #ifdef CONFIG_BNXT_DCB
++static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id)
++{
++ int i, j;
++
++ for (i = 0; i < bp->max_tc; i++) {
++ if (bp->q_info[i].queue_id == queue_id) {
++ for (j = 0; j < bp->max_tc; j++) {
++ if (bp->tc_to_qidx[j] == i)
++ return j;
++ }
++ }
++ }
++ return -EINVAL;
++}
++
+ static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets)
+ {
+ struct hwrm_queue_pri2cos_cfg_input req = {0};
+@@ -33,10 +48,13 @@ static int bnxt_hwrm_queue_pri2cos_cfg(s
+
+ pri2cos = &req.pri0_cos_queue_id;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
++ u8 qidx;
++
+ req.enables |= cpu_to_le32(
+ QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i);
+
+- pri2cos[i] = bp->q_info[ets->prio_tc[i]].queue_id;
++ qidx = bp->tc_to_qidx[ets->prio_tc[i]];
++ pri2cos[i] = bp->q_info[qidx].queue_id;
+ }
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ return rc;
+@@ -55,17 +73,15 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ u8 *pri2cos = &resp->pri0_cos_queue_id;
+- int i, j;
++ int i;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ u8 queue_id = pri2cos[i];
++ int tc;
+
+- for (j = 0; j < bp->max_tc; j++) {
+- if (bp->q_info[j].queue_id == queue_id) {
+- ets->prio_tc[i] = j;
+- break;
+- }
+- }
++ tc = bnxt_queue_to_tc(bp, queue_id);
++ if (tc >= 0)
++ ets->prio_tc[i] = tc;
+ }
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+@@ -81,13 +97,15 @@ static int bnxt_hwrm_queue_cos2bw_cfg(st
+ void *data;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
+- data = &req.unused_0;
+- for (i = 0; i < max_tc; i++, data += sizeof(cos2bw) - 4) {
++ for (i = 0; i < max_tc; i++) {
++ u8 qidx;
++
+ req.enables |= cpu_to_le32(
+ QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
+
+ memset(&cos2bw, 0, sizeof(cos2bw));
+- cos2bw.queue_id = bp->q_info[i].queue_id;
++ qidx = bp->tc_to_qidx[i];
++ cos2bw.queue_id = bp->q_info[qidx].queue_id;
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
+ cos2bw.tsa =
+ QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP;
+@@ -103,8 +121,9 @@ static int bnxt_hwrm_queue_cos2bw_cfg(st
+ cpu_to_le32((ets->tc_tx_bw[i] * 100) |
+ BW_VALUE_UNIT_PERCENT1_100);
+ }
++ data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4);
+ memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
+- if (i == 0) {
++ if (qidx == 0) {
+ req.queue_id0 = cos2bw.queue_id;
+ req.unused_0 = 0;
+ }
+@@ -132,22 +151,22 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(s
+
+ data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
+ for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
+- int j;
++ int tc;
+
+ memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4);
+ if (i == 0)
+ cos2bw.queue_id = resp->queue_id0;
+
+- for (j = 0; j < bp->max_tc; j++) {
+- if (bp->q_info[j].queue_id != cos2bw.queue_id)
+- continue;
+- if (cos2bw.tsa ==
+- QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) {
+- ets->tc_tsa[j] = IEEE_8021QAZ_TSA_STRICT;
+- } else {
+- ets->tc_tsa[j] = IEEE_8021QAZ_TSA_ETS;
+- ets->tc_tx_bw[j] = cos2bw.bw_weight;
+- }
++ tc = bnxt_queue_to_tc(bp, cos2bw.queue_id);
++ if (tc < 0)
++ continue;
++
++ if (cos2bw.tsa ==
++ QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP) {
++ ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_STRICT;
++ } else {
++ ets->tc_tsa[tc] = IEEE_8021QAZ_TSA_ETS;
++ ets->tc_tx_bw[tc] = cos2bw.bw_weight;
+ }
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
diff --git a/patches.drivers/bnxt_en-Add-ULP-calls-to-stop-and-restart-IRQs.patch b/patches.drivers/bnxt_en-Add-ULP-calls-to-stop-and-restart-IRQs.patch
new file mode 100644
index 0000000000..452cc0715f
--- /dev/null
+++ b/patches.drivers/bnxt_en-Add-ULP-calls-to-stop-and-restart-IRQs.patch
@@ -0,0 +1,239 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Sat, 31 Mar 2018 13:54:21 -0400
+Subject: bnxt_en: Add ULP calls to stop and restart IRQs.
+Patch-mainline: v4.17-rc1
+Git-commit: ec86f14ea5064e36ee111297bdb376dda4cba264
+References: bsc#1086282 FATE#324873
+
+When the driver needs to re-initailize the IRQ vectors, we make the
+new ulp_irq_stop() call to tell the RDMA driver to disable and free
+the IRQ vectors. After IRQ vectors have been re-initailized, we
+make the ulp_irq_restart() call to tell the RDMA driver that
+IRQs can be restarted.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 7 +-
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | 81 +++++++++++++++++++++++---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h | 19 +++---
+ 3 files changed, 90 insertions(+), 17 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -6009,8 +6009,10 @@ int bnxt_reserve_rings(struct bnxt *bp)
+ }
+ if ((bp->flags & BNXT_FLAG_NEW_RM) &&
+ (bnxt_get_num_msix(bp) != bp->total_irqs)) {
++ bnxt_ulp_irq_stop(bp);
+ bnxt_clear_int_mode(bp);
+ rc = bnxt_init_int_mode(bp);
++ bnxt_ulp_irq_restart(bp, rc);
+ if (rc)
+ return rc;
+ }
+@@ -8484,16 +8486,15 @@ int bnxt_restore_pf_fw_resources(struct
+ int rc;
+
+ ASSERT_RTNL();
+- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
+- return 0;
+-
+ bnxt_hwrm_func_qcaps(bp);
+
+ if (netif_running(bp->dev))
+ __bnxt_close_nic(bp, true, false);
+
++ bnxt_ulp_irq_stop(bp);
+ bnxt_clear_int_mode(bp);
+ rc = bnxt_init_int_mode(bp);
++ bnxt_ulp_irq_restart(bp, rc);
+
+ if (netif_running(bp->dev)) {
+ if (rc)
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+@@ -1,6 +1,6 @@
+ /* Broadcom NetXtreme-C/E network driver.
+ *
+- * Copyright (c) 2016 Broadcom Limited
++ * Copyright (c) 2016-2018 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+@@ -101,13 +101,27 @@ static int bnxt_unregister_dev(struct bn
+ return 0;
+ }
+
++static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
++{
++ struct bnxt_en_dev *edev = bp->edev;
++ int num_msix, idx, i;
++
++ num_msix = edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
++ idx = edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
++ for (i = 0; i < num_msix; i++) {
++ ent[i].vector = bp->irq_tbl[idx + i].vector;
++ ent[i].ring_idx = idx + i;
++ ent[i].db_offset = (idx + i) * 0x80;
++ }
++}
++
+ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
+ struct bnxt_msix_entry *ent, int num_msix)
+ {
+ struct net_device *dev = edev->net;
+ struct bnxt *bp = netdev_priv(dev);
+ int max_idx, max_cp_rings;
+- int avail_msix, i, idx;
++ int avail_msix, idx;
+ int rc = 0;
+
+ ASSERT_RTNL();
+@@ -154,13 +168,10 @@ static int bnxt_req_msix_vecs(struct bnx
+ avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings;
+ edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
+ }
+- for (i = 0; i < avail_msix; i++) {
+- ent[i].vector = bp->irq_tbl[idx + i].vector;
+- ent[i].ring_idx = idx + i;
+- ent[i].db_offset = (idx + i) * 0x80;
+- }
++ bnxt_fill_msix_vecs(bp, ent);
+ bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) - avail_msix);
+ bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
++ edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
+ return avail_msix;
+ }
+
+@@ -174,11 +185,15 @@ static int bnxt_free_msix_vecs(struct bn
+ if (ulp_id != BNXT_ROCE_ULP)
+ return -EINVAL;
+
++ if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
++ return 0;
++
+ max_cp_rings = bnxt_get_max_func_cp_rings(bp);
+ msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
+ bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
+ edev->ulp_tbl[ulp_id].msix_requested = 0;
+ bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) + msix_requested);
++ edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
+ if (netif_running(dev)) {
+ bnxt_close_nic(bp, true, false);
+ bnxt_open_nic(bp, true, false);
+@@ -340,6 +355,58 @@ void bnxt_ulp_shutdown(struct bnxt *bp)
+ }
+ }
+
++void bnxt_ulp_irq_stop(struct bnxt *bp)
++{
++ struct bnxt_en_dev *edev = bp->edev;
++ struct bnxt_ulp_ops *ops;
++
++ if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
++ return;
++
++ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
++ struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
++
++ if (!ulp->msix_requested)
++ return;
++
++ ops = rtnl_dereference(ulp->ulp_ops);
++ if (!ops || !ops->ulp_irq_stop)
++ return;
++ ops->ulp_irq_stop(ulp->handle);
++ }
++}
++
++void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
++{
++ struct bnxt_en_dev *edev = bp->edev;
++ struct bnxt_ulp_ops *ops;
++
++ if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
++ return;
++
++ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
++ struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
++ struct bnxt_msix_entry *ent = NULL;
++
++ if (!ulp->msix_requested)
++ return;
++
++ ops = rtnl_dereference(ulp->ulp_ops);
++ if (!ops || !ops->ulp_irq_restart)
++ return;
++
++ if (!err) {
++ ent = kcalloc(ulp->msix_requested, sizeof(*ent),
++ GFP_KERNEL);
++ if (!ent)
++ return;
++ bnxt_fill_msix_vecs(bp, ent);
++ }
++ ops->ulp_irq_restart(ulp->handle, ent);
++ kfree(ent);
++ }
++}
++
+ void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
+ {
+ u16 event_id = le16_to_cpu(cmpl->event_id);
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+@@ -1,6 +1,6 @@
+ /* Broadcom NetXtreme-C/E network driver.
+ *
+- * Copyright (c) 2016 Broadcom Limited
++ * Copyright (c) 2016-2018 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+@@ -20,6 +20,12 @@
+ struct hwrm_async_event_cmpl;
+ struct bnxt;
+
++struct bnxt_msix_entry {
++ u32 vector;
++ u32 ring_idx;
++ u32 db_offset;
++};
++
+ struct bnxt_ulp_ops {
+ /* async_notifier() cannot sleep (in BH context) */
+ void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
+@@ -27,12 +33,8 @@ struct bnxt_ulp_ops {
+ void (*ulp_start)(void *);
+ void (*ulp_sriov_config)(void *, int);
+ void (*ulp_shutdown)(void *);
+-};
+-
+-struct bnxt_msix_entry {
+- u32 vector;
+- u32 ring_idx;
+- u32 db_offset;
++ void (*ulp_irq_stop)(void *);
++ void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
+ };
+
+ struct bnxt_fw_msg {
+@@ -61,6 +63,7 @@ struct bnxt_en_dev {
+ #define BNXT_EN_FLAG_ROCEV2_CAP 0x2
+ #define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \
+ BNXT_EN_FLAG_ROCEV2_CAP)
++ #define BNXT_EN_FLAG_MSIX_REQUESTED 0x4
+ const struct bnxt_en_ops *en_ops;
+ struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
+ };
+@@ -92,6 +95,8 @@ void bnxt_ulp_stop(struct bnxt *bp);
+ void bnxt_ulp_start(struct bnxt *bp);
+ void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
+ void bnxt_ulp_shutdown(struct bnxt *bp);
++void bnxt_ulp_irq_stop(struct bnxt *bp);
++void bnxt_ulp_irq_restart(struct bnxt *bp, int err);
+ void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
+ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev);
+
diff --git a/patches.drivers/bnxt_en-Add-cache-line-size-setting-to-optimize-perf.patch b/patches.drivers/bnxt_en-Add-cache-line-size-setting-to-optimize-perf.patch
new file mode 100644
index 0000000000..4762bbcd59
--- /dev/null
+++ b/patches.drivers/bnxt_en-Add-cache-line-size-setting-to-optimize-perf.patch
@@ -0,0 +1,58 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Wed, 17 Jan 2018 03:21:15 -0500
+Subject: bnxt_en: Add cache line size setting to optimize performance.
+Patch-mainline: v4.16-rc1
+Git-commit: c3480a603773cfc5d8aa44dbbee6c96e0f9d4d9d
+References: bsc#1086282 FATE#324873
+
+The chip supports 64-byte and 128-byte cache line size for more optimal
+DMA performance when matched to the CPU cache line size. The default is 64.
+If the system is using 128-byte cache line size, set it to 128.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -5357,6 +5357,28 @@ static int bnxt_hwrm_set_br_mode(struct
+ return rc;
+ }
+
++static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
++{
++ struct hwrm_func_cfg_input req = {0};
++ int rc;
++
++ if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
++ return 0;
++
++ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
++ req.fid = cpu_to_le16(0xffff);
++ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
++ req.cache_linesize = FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64;
++ if (size == 128)
++ req.cache_linesize =
++ FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128;
++
++ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
++ if (rc)
++ rc = -EIO;
++ return rc;
++}
++
+ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+ {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
+@@ -8561,6 +8583,8 @@ static int bnxt_init_one(struct pci_dev
+ else
+ device_set_wakeup_capable(&pdev->dev, false);
+
++ bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
++
+ if (BNXT_PF(bp)) {
+ if (!bnxt_pf_wq) {
+ bnxt_pf_wq =
diff --git a/patches.drivers/bnxt_en-Add-extended-port-statistics-support.patch b/patches.drivers/bnxt_en-Add-extended-port-statistics-support.patch
new file mode 100644
index 0000000000..243700af7e
--- /dev/null
+++ b/patches.drivers/bnxt_en-Add-extended-port-statistics-support.patch
@@ -0,0 +1,202 @@
+From: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Date: Sat, 31 Mar 2018 13:54:12 -0400
+Subject: bnxt_en: Add extended port statistics support
+Patch-mainline: v4.17-rc1
+Git-commit: 00db3cba35211cd7d458d378a5931fadfa86a17c
+References: bsc#1086282 FATE#324873
+
+Gather periodic extended port statistics, if the device is PF and
+link is up.
+
+Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 45 +++++++++++++++++++++-
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 6 ++
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 32 +++++++++++++++
+ 3 files changed, 81 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -3034,12 +3034,21 @@ static void bnxt_free_stats(struct bnxt
+ u32 size, i;
+ struct pci_dev *pdev = bp->pdev;
+
++ bp->flags &= ~BNXT_FLAG_PORT_STATS;
++ bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
++
+ if (bp->hw_rx_port_stats) {
+ dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
+ bp->hw_rx_port_stats,
+ bp->hw_rx_port_stats_map);
+ bp->hw_rx_port_stats = NULL;
+- bp->flags &= ~BNXT_FLAG_PORT_STATS;
++ }
++
++ if (bp->hw_rx_port_stats_ext) {
++ dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
++ bp->hw_rx_port_stats_ext,
++ bp->hw_rx_port_stats_ext_map);
++ bp->hw_rx_port_stats_ext = NULL;
+ }
+
+ if (!bp->bnapi)
+@@ -3095,6 +3104,21 @@ static int bnxt_alloc_stats(struct bnxt
+ bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
+ sizeof(struct rx_port_stats) + 512;
+ bp->flags |= BNXT_FLAG_PORT_STATS;
++
++ /* Display extended statistics only if FW supports it */
++ if (bp->hwrm_spec_code < 0x10804 ||
++ bp->hwrm_spec_code == 0x10900)
++ return 0;
++
++ bp->hw_rx_port_stats_ext =
++ dma_zalloc_coherent(&pdev->dev,
++ sizeof(struct rx_port_stats_ext),
++ &bp->hw_rx_port_stats_ext_map,
++ GFP_KERNEL);
++ if (!bp->hw_rx_port_stats_ext)
++ return 0;
++
++ bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
+ }
+ return 0;
+ }
+@@ -5285,6 +5309,21 @@ static int bnxt_hwrm_port_qstats(struct
+ return rc;
+ }
+
++static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
++{
++ struct hwrm_port_qstats_ext_input req = {0};
++ struct bnxt_pf_info *pf = &bp->pf;
++
++ if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
++ return 0;
++
++ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
++ req.port_id = cpu_to_le16(pf->port_id);
++ req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
++ req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
++ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
++}
++
+ static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
+ {
+ if (bp->vxlan_port_cnt) {
+@@ -7424,8 +7463,10 @@ static void bnxt_sp_task(struct work_str
+ bnxt_hwrm_tunnel_dst_port_free(
+ bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
+ }
+- if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
++ if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
+ bnxt_hwrm_port_qstats(bp);
++ bnxt_hwrm_port_qstats_ext(bp);
++ }
+
+ if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+ int rc;
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1144,6 +1144,7 @@ struct bnxt {
+ #define BNXT_FLAG_DIM 0x2000000
+ #define BNXT_FLAG_ROCE_MIRROR_CAP 0x4000000
+ #define BNXT_FLAG_NEW_RM 0x8000000
++ #define BNXT_FLAG_PORT_STATS_EXT 0x10000000
+
+ #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
+ BNXT_FLAG_RFS | \
+@@ -1263,8 +1264,10 @@ struct bnxt {
+
+ struct rx_port_stats *hw_rx_port_stats;
+ struct tx_port_stats *hw_tx_port_stats;
++ struct rx_port_stats_ext *hw_rx_port_stats_ext;
+ dma_addr_t hw_rx_port_stats_map;
+ dma_addr_t hw_tx_port_stats_map;
++ dma_addr_t hw_rx_port_stats_ext_map;
+ int hw_port_stats_size;
+
+ u16 hwrm_max_req_len;
+@@ -1375,6 +1378,9 @@ struct bnxt {
+ ((offsetof(struct tx_port_stats, counter) + \
+ sizeof(struct rx_port_stats) + 512) / 8)
+
++#define BNXT_RX_STATS_EXT_OFFSET(counter) \
++ (offsetof(struct rx_port_stats_ext, counter) / 8)
++
+ #define I2C_DEV_ADDR_A0 0xa0
+ #define I2C_DEV_ADDR_A2 0xa2
+ #define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -125,6 +125,9 @@ static int bnxt_set_coalesce(struct net_
+ #define BNXT_TX_STATS_ENTRY(counter) \
+ { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
+
++#define BNXT_RX_STATS_EXT_ENTRY(counter) \
++ { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
++
+ static const struct {
+ long offset;
+ char string[ETH_GSTRING_LEN];
+@@ -211,7 +214,19 @@ static const struct {
+ BNXT_TX_STATS_ENTRY(tx_stat_error),
+ };
+
++static const struct {
++ long offset;
++ char string[ETH_GSTRING_LEN];
++} bnxt_port_stats_ext_arr[] = {
++ BNXT_RX_STATS_EXT_ENTRY(link_down_events),
++ BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events),
++ BNXT_RX_STATS_EXT_ENTRY(resume_pause_events),
++ BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events),
++ BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
++};
++
+ #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
++#define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr)
+
+ static int bnxt_get_num_stats(struct bnxt *bp)
+ {
+@@ -220,6 +235,9 @@ static int bnxt_get_num_stats(struct bnx
+ if (bp->flags & BNXT_FLAG_PORT_STATS)
+ num_stats += BNXT_NUM_PORT_STATS;
+
++ if (bp->flags & BNXT_FLAG_PORT_STATS_EXT)
++ num_stats += BNXT_NUM_PORT_STATS_EXT;
++
+ return num_stats;
+ }
+
+@@ -267,6 +285,14 @@ static void bnxt_get_ethtool_stats(struc
+ bnxt_port_stats_arr[i].offset));
+ }
+ }
++ if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
++ __le64 *port_stats_ext = (__le64 *)bp->hw_rx_port_stats_ext;
++
++ for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++, j++) {
++ buf[j] = le64_to_cpu(*(port_stats_ext +
++ bnxt_port_stats_ext_arr[i].offset));
++ }
++ }
+ }
+
+ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+@@ -327,6 +353,12 @@ static void bnxt_get_strings(struct net_
+ buf += ETH_GSTRING_LEN;
+ }
+ }
++ if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
++ for (i = 0; i < BNXT_NUM_PORT_STATS_EXT; i++) {
++ strcpy(buf, bnxt_port_stats_ext_arr[i].string);
++ buf += ETH_GSTRING_LEN;
++ }
++ }
+ break;
+ case ETH_SS_TEST:
+ if (bp->num_tests)
diff --git a/patches.drivers/bnxt_en-Add-support-for-ndo_set_vf_trust.patch b/patches.drivers/bnxt_en-Add-support-for-ndo_set_vf_trust.patch
new file mode 100644
index 0000000000..5a5d29f87f
--- /dev/null
+++ b/patches.drivers/bnxt_en-Add-support-for-ndo_set_vf_trust.patch
@@ -0,0 +1,156 @@
+From: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Date: Sat, 31 Mar 2018 13:54:10 -0400
+Subject: bnxt_en: Add support for ndo_set_vf_trust
+Patch-mainline: v4.17-rc1
+Git-commit: 746df139646ea7fd11c26f88fd95a247d2a7c94b
+References: bsc#1086282 FATE#324873
+
+Trusted VFs are allowed to modify MAC address, even when PF
+has assigned one.
+
+Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 1
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1
+ drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 41 +++++++++++++++++++-----
+ drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h | 3 +
+ 4 files changed, 37 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -8113,6 +8113,7 @@ static const struct net_device_ops bnxt_
+ .ndo_set_vf_rate = bnxt_set_vf_bw,
+ .ndo_set_vf_link_state = bnxt_set_vf_link_state,
+ .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
++ .ndo_set_vf_trust = bnxt_set_vf_trust,
+ #endif
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = bnxt_poll_controller,
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -794,6 +794,7 @@ struct bnxt_vf_info {
+ #define BNXT_VF_SPOOFCHK 0x2
+ #define BNXT_VF_LINK_FORCED 0x4
+ #define BNXT_VF_LINK_UP 0x8
++#define BNXT_VF_TRUST 0x10
+ u32 func_flags; /* func cfg flags */
+ u32 min_tx_rate;
+ u32 max_tx_rate;
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+@@ -1,7 +1,7 @@
+ /* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+- * Copyright (c) 2016-2017 Broadcom Limited
++ * Copyright (c) 2016-2018 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+@@ -121,6 +121,23 @@ int bnxt_set_vf_spoofchk(struct net_devi
+ return rc;
+ }
+
++int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted)
++{
++ struct bnxt *bp = netdev_priv(dev);
++ struct bnxt_vf_info *vf;
++
++ if (bnxt_vf_ndo_prep(bp, vf_id))
++ return -EINVAL;
++
++ vf = &bp->pf.vf[vf_id];
++ if (trusted)
++ vf->flags |= BNXT_VF_TRUST;
++ else
++ vf->flags &= ~BNXT_VF_TRUST;
++
++ return 0;
++}
++
+ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
+ struct ifla_vf_info *ivi)
+ {
+@@ -147,6 +164,7 @@ int bnxt_get_vf_config(struct net_device
+ else
+ ivi->qos = 0;
+ ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
++ ivi->trusted = !!(vf->flags & BNXT_VF_TRUST);
+ if (!(vf->flags & BNXT_VF_LINK_FORCED))
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->flags & BNXT_VF_LINK_UP)
+@@ -886,18 +904,19 @@ exec_fwd_resp_exit:
+ return rc;
+ }
+
+-static int bnxt_vf_store_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
++static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
+ {
+ u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
+ struct hwrm_func_vf_cfg_input *req =
+ (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
+
+- /* Only allow VF to set a valid MAC address if the PF assigned MAC
+- * address is zero
++ /* Allow VF to set a valid MAC address, if trust is set to on or
++ * if the PF assigned MAC address is zero
+ */
+ if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
+ if (is_valid_ether_addr(req->dflt_mac_addr) &&
+- !is_valid_ether_addr(vf->mac_addr)) {
++ ((vf->flags & BNXT_VF_TRUST) ||
++ (!is_valid_ether_addr(vf->mac_addr)))) {
+ ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
+ return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+ }
+@@ -913,11 +932,17 @@ static int bnxt_vf_validate_set_mac(stru
+ (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
+ bool mac_ok = false;
+
+- /* VF MAC address must first match PF MAC address, if it is valid.
++ if (!is_valid_ether_addr((const u8 *)req->l2_addr))
++ return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
++
++ /* Allow VF to set a valid MAC address, if trust is set to on.
++ * Or VF MAC address must first match MAC address in PF's context.
+ * Otherwise, it must match the VF MAC address if firmware spec >=
+ * 1.2.2
+ */
+- if (is_valid_ether_addr(vf->mac_addr)) {
++ if (vf->flags & BNXT_VF_TRUST) {
++ mac_ok = true;
++ } else if (is_valid_ether_addr(vf->mac_addr)) {
+ if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
+ mac_ok = true;
+ } else if (is_valid_ether_addr(vf->vf_mac_addr)) {
+@@ -993,7 +1018,7 @@ static int bnxt_vf_req_validate_snd(stru
+
+ switch (req_type) {
+ case HWRM_FUNC_VF_CFG:
+- rc = bnxt_vf_store_mac(bp, vf);
++ rc = bnxt_vf_configure_mac(bp, vf);
+ break;
+ case HWRM_CFA_L2_FILTER_ALLOC:
+ rc = bnxt_vf_validate_set_mac(bp, vf);
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+@@ -1,7 +1,7 @@
+ /* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+- * Copyright (c) 2016-2017 Broadcom Limited
++ * Copyright (c) 2016-2018 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+@@ -17,6 +17,7 @@ int bnxt_set_vf_vlan(struct net_device *
+ int bnxt_set_vf_bw(struct net_device *, int, int, int);
+ int bnxt_set_vf_link_state(struct net_device *, int, int);
+ int bnxt_set_vf_spoofchk(struct net_device *, int, bool);
++int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trust);
+ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
+ void bnxt_sriov_disable(struct bnxt *);
+ void bnxt_hwrm_exec_fwd_req(struct bnxt *);
diff --git a/patches.drivers/bnxt_en-Add-the-new-firmware-API-to-query-hardware-r.patch b/patches.drivers/bnxt_en-Add-the-new-firmware-API-to-query-hardware-r.patch
new file mode 100644
index 0000000000..266d3fc28c
--- /dev/null
+++ b/patches.drivers/bnxt_en-Add-the-new-firmware-API-to-query-hardware-r.patch
@@ -0,0 +1,101 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Wed, 17 Jan 2018 03:21:07 -0500
+Subject: bnxt_en: Add the new firmware API to query hardware resources.
+Patch-mainline: v4.16-rc1
+Git-commit: be0dd9c4100c9549fe50258e3d928072e6c31590
+References: bsc#1086282 FATE#324873
+
+The new API HWRM_FUNC_RESOURCE_QCAPS provides min and max hardware
+resources. Use the new API when it is supported by firmware.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 56 +++++++++++++++++++++++++++++-
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 +
+ 2 files changed, 57 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -4736,7 +4736,46 @@ func_qcfg_exit:
+ return rc;
+ }
+
+-static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
++static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
++{
++ struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
++ struct hwrm_func_resource_qcaps_input req = {0};
++ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
++ int rc;
++
++ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
++ req.fid = cpu_to_le16(0xffff);
++
++ mutex_lock(&bp->hwrm_cmd_lock);
++ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
++ if (rc) {
++ rc = -EIO;
++ goto hwrm_func_resc_qcaps_exit;
++ }
++
++ hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
++ hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
++ hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
++ hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
++ hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
++ hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
++ hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
++ hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
++ hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
++ hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
++ hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
++ hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
++ hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
++ hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
++ hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
++ hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
++
++hwrm_func_resc_qcaps_exit:
++ mutex_unlock(&bp->hwrm_cmd_lock);
++ return rc;
++}
++
++static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
+ {
+ int rc = 0;
+ struct hwrm_func_qcaps_input req = {0};
+@@ -4804,6 +4843,21 @@ hwrm_func_qcaps_exit:
+ return rc;
+ }
+
++static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
++{
++ int rc;
++
++ rc = __bnxt_hwrm_func_qcaps(bp);
++ if (rc)
++ return rc;
++ if (bp->hwrm_spec_code >= 0x10803) {
++ rc = bnxt_hwrm_func_resc_qcaps(bp);
++ if (!rc)
++ bp->flags |= BNXT_FLAG_NEW_RM;
++ }
++ return 0;
++}
++
+ static int bnxt_hwrm_func_reset(struct bnxt *bp)
+ {
+ struct hwrm_func_reset_input req = {0};
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1133,6 +1133,8 @@ struct bnxt {
+ #define BNXT_FLAG_DOUBLE_DB 0x400000
+ #define BNXT_FLAG_FW_DCBX_AGENT 0x800000
+ #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
++ #define BNXT_FLAG_DIM 0x2000000
++ #define BNXT_FLAG_NEW_RM 0x8000000
+
+ #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
+ BNXT_FLAG_RFS | \
diff --git a/patches.drivers/bnxt_en-Adjust-default-rings-for-multi-port-NICs.patch b/patches.drivers/bnxt_en-Adjust-default-rings-for-multi-port-NICs.patch
new file mode 100644
index 0000000000..b726deaf10
--- /dev/null
+++ b/patches.drivers/bnxt_en-Adjust-default-rings-for-multi-port-NICs.patch
@@ -0,0 +1,38 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Sat, 31 Mar 2018 13:54:07 -0400
+Subject: bnxt_en: Adjust default rings for multi-port NICs.
+Patch-mainline: v4.17-rc1
+Git-commit: 1d3ef13dd48da9177e417379644be9003bc459cc
+References: bsc#1086282 FATE#324873
+
+Change the default ring logic to select default number of rings to be up to
+8 per port if the default rings x NIC ports <= total CPUs.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -8302,9 +8302,15 @@ static int bnxt_set_dflt_rings(struct bn
+ if (sh)
+ bp->flags |= BNXT_FLAG_SHARED_RINGS;
+ dflt_rings = netif_get_num_default_rss_queues();
+- /* Reduce default rings to reduce memory usage on multi-port cards */
+- if (bp->port_count > 1)
+- dflt_rings = min_t(int, dflt_rings, 4);
++ /* Reduce default rings on multi-port cards so that total default
++ * rings do not exceed CPU count.
++ */
++ if (bp->port_count > 1) {
++ int max_rings =
++ max_t(int, num_online_cpus() / bp->port_count, 1);
++
++ dflt_rings = min_t(int, dflt_rings, max_rings);
++ }
+ rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
+ if (rc)
+ return rc;
diff --git a/patches.drivers/bnxt_en-Always-forward-VF-MAC-address-to-the-PF.patch b/patches.drivers/bnxt_en-Always-forward-VF-MAC-address-to-the-PF.patch
new file mode 100644
index 0000000000..00a6283d3c
--- /dev/null
+++ b/patches.drivers/bnxt_en-Always-forward-VF-MAC-address-to-the-PF.patch
@@ -0,0 +1,52 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Tue, 8 May 2018 03:18:41 -0400
+Subject: bnxt_en: Always forward VF MAC address to the PF.
+Patch-mainline: v4.18-rc1
+Git-commit: 707e7e96602675beb5e09bb994195663da6eb56d
+References: bsc#1086282 FATE#324873
+
+The current code already forwards the VF MAC address to the PF, except
+in one case. If the VF driver gets a valid MAC address from the firmware
+during probe time, it will not forward the MAC address to the PF,
+incorrectly assuming that the PF already knows the MAC address. This
+causes "ip link show" to show zero VF MAC addresses for this case.
+
+This assumption is not correct. Newer firmware remembers the VF MAC
+address last used by the VF and provides it to the VF driver during
+probe. So we need to always forward the VF MAC address to the PF.
+
+The forwarded MAC address may now be the PF assigned MAC address and so we
+need to make sure we approve it for this case.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +-
+ drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -8583,8 +8583,8 @@ static int bnxt_init_mac_addr(struct bnx
+ memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+ } else {
+ eth_hw_addr_random(bp->dev);
+- rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
+ }
++ rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
+ #endif
+ }
+ return rc;
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+@@ -923,7 +923,8 @@ static int bnxt_vf_configure_mac(struct
+ if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
+ if (is_valid_ether_addr(req->dflt_mac_addr) &&
+ ((vf->flags & BNXT_VF_TRUST) ||
+- (!is_valid_ether_addr(vf->mac_addr)))) {
++ !is_valid_ether_addr(vf->mac_addr) ||
++ ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) {
+ ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
+ return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+ }
diff --git a/patches.drivers/bnxt_en-Change-IRQ-assignment-for-RDMA-driver.patch b/patches.drivers/bnxt_en-Change-IRQ-assignment-for-RDMA-driver.patch
new file mode 100644
index 0000000000..af2f05008a
--- /dev/null
+++ b/patches.drivers/bnxt_en-Change-IRQ-assignment-for-RDMA-driver.patch
@@ -0,0 +1,150 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Sat, 31 Mar 2018 13:54:17 -0400
+Subject: bnxt_en: Change IRQ assignment for RDMA driver.
+Patch-mainline: v4.17-rc1
+Git-commit: 08654eb213a8066b30c41e22067a9f066b40c80f
+References: bsc#1086282 FATE#324873
+
+In the current code, the range of MSIX vectors allocated for the RDMA
+driver is disjoint from the network driver. This creates a problem
+for the new firmware ring reservation scheme. The new scheme requires
+the reserved completion rings/MSIX vectors to be in a contiguous
+range.
+
+Change the logic to allocate RDMA MSIX vectors to be contiguous with
+the vectors used by bnxt_en on new firmware using the new scheme.
+The new function bnxt_get_num_msix() calculates the exact number of
+vectors needed by both drivers.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 31 ++++++++++++++++++++++++--
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | 30 ++++++++++++++++++++++++-
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h | 3 ++
+ 3 files changed, 61 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -4695,6 +4695,21 @@ static int bnxt_hwrm_reserve_rings(struc
+ return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic);
+ }
+
++static int bnxt_cp_rings_in_use(struct bnxt *bp)
++{
++ int cp = bp->cp_nr_rings;
++ int ulp_msix, ulp_base;
++
++ ulp_msix = bnxt_get_ulp_msix_num(bp);
++ if (ulp_msix) {
++ ulp_base = bnxt_get_ulp_msix_base(bp);
++ cp += ulp_msix;
++ if ((ulp_base + ulp_msix) > cp)
++ cp = ulp_base + ulp_msix;
++ }
++ return cp;
++}
++
+ static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool shared);
+
+@@ -5838,12 +5853,24 @@ void bnxt_set_max_func_irqs(struct bnxt
+ bp->hw_resc.max_irqs = max_irqs;
+ }
+
++static int bnxt_get_num_msix(struct bnxt *bp)
++{
++ if (!(bp->flags & BNXT_FLAG_NEW_RM))
++ return bnxt_get_max_func_irqs(bp);
++
++ return bnxt_cp_rings_in_use(bp);
++}
++
+ static int bnxt_init_msix(struct bnxt *bp)
+ {
+- int i, total_vecs, rc = 0, min = 1;
++ int i, total_vecs, max, rc = 0, min = 1;
+ struct msix_entry *msix_ent;
+
+- total_vecs = bnxt_get_max_func_irqs(bp);
++ total_vecs = bnxt_get_num_msix(bp);
++ max = bnxt_get_max_func_irqs(bp);
++ if (total_vecs > max)
++ total_vecs = max;
++
+ msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
+ if (!msix_ent)
+ return -ENOMEM;
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+@@ -116,6 +116,9 @@ static int bnxt_req_msix_vecs(struct bnx
+ if (!(bp->flags & BNXT_FLAG_USING_MSIX))
+ return -ENODEV;
+
++ if (edev->ulp_tbl[ulp_id].msix_requested)
++ return -EAGAIN;
++
+ max_cp_rings = bnxt_get_max_func_cp_rings(bp);
+ max_idx = min_t(int, bp->total_irqs, max_cp_rings);
+ avail_msix = max_idx - bp->cp_nr_rings;
+@@ -124,7 +127,11 @@ static int bnxt_req_msix_vecs(struct bnx
+ if (avail_msix > num_msix)
+ avail_msix = num_msix;
+
+- idx = max_idx - avail_msix;
++ if (bp->flags & BNXT_FLAG_NEW_RM)
++ idx = bp->cp_nr_rings;
++ else
++ idx = max_idx - avail_msix;
++ edev->ulp_tbl[ulp_id].msix_base = idx;
+ for (i = 0; i < avail_msix; i++) {
+ ent[i].vector = bp->irq_tbl[idx + i].vector;
+ ent[i].ring_idx = idx + i;
+@@ -154,6 +161,27 @@ static int bnxt_free_msix_vecs(struct bn
+ return 0;
+ }
+
++int bnxt_get_ulp_msix_num(struct bnxt *bp)
++{
++ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
++ struct bnxt_en_dev *edev = bp->edev;
++
++ return edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
++ }
++ return 0;
++}
++
++int bnxt_get_ulp_msix_base(struct bnxt *bp)
++{
++ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
++ struct bnxt_en_dev *edev = bp->edev;
++
++ if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
++ return edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
++ }
++ return 0;
++}
++
+ void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id)
+ {
+ ASSERT_RTNL();
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+@@ -49,6 +49,7 @@ struct bnxt_ulp {
+ unsigned long *async_events_bmap;
+ u16 max_async_event_id;
+ u16 msix_requested;
++ u16 msix_base;
+ atomic_t ref_count;
+ };
+
+@@ -84,6 +85,8 @@ static inline bool bnxt_ulp_registered(s
+ return false;
+ }
+
++int bnxt_get_ulp_msix_num(struct bnxt *bp);
++int bnxt_get_ulp_msix_base(struct bnxt *bp);
+ void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id);
+ void bnxt_ulp_stop(struct bnxt *bp);
+ void bnxt_ulp_start(struct bnxt *bp);
diff --git a/patches.drivers/bnxt_en-Check-max_tx_scheduler_inputs-value-from-fir.patch b/patches.drivers/bnxt_en-Check-max_tx_scheduler_inputs-value-from-fir.patch
new file mode 100644
index 0000000000..9281d5c384
--- /dev/null
+++ b/patches.drivers/bnxt_en-Check-max_tx_scheduler_inputs-value-from-fir.patch
@@ -0,0 +1,101 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Sat, 31 Mar 2018 13:54:13 -0400
+Subject: bnxt_en: Check max_tx_scheduler_inputs value from firmware.
+Patch-mainline: v4.17-rc1
+Git-commit: db4723b3cd2d836ae44382d16e6a4418ae8929dc
+References: bsc#1086282 FATE#324873
+
+When checking for the maximum pre-set TX channels for ethtool -l, we
+need to check the current max_tx_scheduler_inputs parameter from firmware.
+This parameter specifies the max input for the internal QoS nodes currently
+available to this function. The function's TX rings will be capped by this
+parameter. By adding this logic, we provide a more accurate pre-set max
+TX channels to the user.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 8 ++++++--
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 ++
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 11 +++++++++++
+ 3 files changed, 19 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -5038,7 +5038,7 @@ func_qcfg_exit:
+ return rc;
+ }
+
+-static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
++int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
+ {
+ struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_func_resource_qcaps_input req = {0};
+@@ -5055,6 +5055,10 @@ static int bnxt_hwrm_func_resc_qcaps(str
+ goto hwrm_func_resc_qcaps_exit;
+ }
+
++ hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
++ if (!all)
++ goto hwrm_func_resc_qcaps_exit;
++
+ hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
+ hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+ hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
+@@ -5161,7 +5165,7 @@ static int bnxt_hwrm_func_qcaps(struct b
+ if (rc)
+ return rc;
+ if (bp->hwrm_spec_code >= 0x10803) {
+- rc = bnxt_hwrm_func_resc_qcaps(bp);
++ rc = bnxt_hwrm_func_resc_qcaps(bp, true);
+ if (!rc)
+ bp->flags |= BNXT_FLAG_NEW_RM;
+ }
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -765,6 +765,7 @@ struct bnxt_hw_resc {
+ u16 min_tx_rings;
+ u16 max_tx_rings;
+ u16 resv_tx_rings;
++ u16 max_tx_sch_inputs;
+ u16 min_rx_rings;
+ u16 max_rx_rings;
+ u16 resv_rx_rings;
+@@ -1446,6 +1447,7 @@ int bnxt_hwrm_set_pause(struct bnxt *);
+ int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
+ int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
+ int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
++int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
+ int bnxt_hwrm_fw_set_time(struct bnxt *);
+ int bnxt_open_nic(struct bnxt *, bool, bool);
+ int bnxt_half_open_nic(struct bnxt *bp);
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -413,15 +413,26 @@ static void bnxt_get_channels(struct net
+ struct ethtool_channels *channel)
+ {
+ struct bnxt *bp = netdev_priv(dev);
++ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int max_rx_rings, max_tx_rings, tcs;
++ int max_tx_sch_inputs;
++
++ /* Get the most up-to-date max_tx_sch_inputs. */
++ if (bp->flags & BNXT_FLAG_NEW_RM)
++ bnxt_hwrm_func_resc_qcaps(bp, false);
++ max_tx_sch_inputs = hw_resc->max_tx_sch_inputs;
+
+ bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
++ if (max_tx_sch_inputs)
++ max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
+ channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
+
+ if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
+ max_rx_rings = 0;
+ max_tx_rings = 0;
+ }
++ if (max_tx_sch_inputs)
++ max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
+
+ tcs = netdev_get_num_tc(dev);
+ if (tcs > 1)
diff --git a/patches.drivers/bnxt_en-Check-the-lengths-of-encapsulated-firmware-r.patch b/patches.drivers/bnxt_en-Check-the-lengths-of-encapsulated-firmware-r.patch
new file mode 100644
index 0000000000..19b752d2d2
--- /dev/null
+++ b/patches.drivers/bnxt_en-Check-the-lengths-of-encapsulated-firmware-r.patch
@@ -0,0 +1,74 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Thu, 26 Apr 2018 17:44:33 -0400
+Subject: bnxt_en: Check the lengths of encapsulated firmware responses.
+Patch-mainline: v4.18-rc1
+Git-commit: 59895f596b13b4b09f739bf8470a5028a5ff2b9a
+References: bsc#1086282 FATE#324873
+
+Firmware messages that are forwarded from PF to VFs are encapsulated.
+The size of these encapsulated messages must not exceed the maximum
+defined message size. Add appropriate checks to avoid oversize
+messages. Firmware messages may be expanded in future specs and
+this will provide some guardrails to avoid data corruption.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 9 +++++++++
+ drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h | 12 ++++++++++++
+ 2 files changed, 21 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+@@ -809,6 +809,9 @@ static int bnxt_hwrm_fwd_resp(struct bnx
+ struct hwrm_fwd_resp_input req = {0};
+ struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
++ if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
++ return -EINVAL;
++
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1);
+
+ /* Set the new target id */
+@@ -845,6 +848,9 @@ static int bnxt_hwrm_fwd_err_resp(struct
+ struct hwrm_reject_fwd_resp_input req = {0};
+ struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
++ if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size))
++ return -EINVAL;
++
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
+ /* Set the new target id */
+ req.target_id = cpu_to_le16(vf->fw_fid);
+@@ -877,6 +883,9 @@ static int bnxt_hwrm_exec_fwd_resp(struc
+ struct hwrm_exec_fwd_resp_input req = {0};
+ struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
+
++ if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size))
++ return -EINVAL;
++
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
+ /* Set the new target id */
+ req.target_id = cpu_to_le16(vf->fw_fid);
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+@@ -11,6 +11,18 @@
+ #ifndef BNXT_SRIOV_H
+ #define BNXT_SRIOV_H
+
++#define BNXT_FWD_RESP_SIZE_ERR(n) \
++ ((offsetof(struct hwrm_fwd_resp_input, encap_resp) + n) > \
++ sizeof(struct hwrm_fwd_resp_input))
++
++#define BNXT_EXEC_FWD_RESP_SIZE_ERR(n) \
++ ((offsetof(struct hwrm_exec_fwd_resp_input, encap_request) + n) >\
++ offsetof(struct hwrm_exec_fwd_resp_input, encap_resp_target_id))
++
++#define BNXT_REJ_FWD_RESP_SIZE_ERR(n) \
++ ((offsetof(struct hwrm_reject_fwd_resp_input, encap_request) + n) >\
++ offsetof(struct hwrm_reject_fwd_resp_input, encap_resp_target_id))
++
+ int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *);
+ int bnxt_set_vf_mac(struct net_device *, int, u8 *);
+ int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
diff --git a/patches.drivers/bnxt_en-Check-unsupported-speeds-in-bnxt_update_link.patch b/patches.drivers/bnxt_en-Check-unsupported-speeds-in-bnxt_update_link.patch
new file mode 100644
index 0000000000..fd579df412
--- /dev/null
+++ b/patches.drivers/bnxt_en-Check-unsupported-speeds-in-bnxt_update_link.patch
@@ -0,0 +1,31 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Tue, 8 May 2018 03:18:39 -0400
+Subject: bnxt_en: Check unsupported speeds in bnxt_update_link() on PF only.
+Patch-mainline: v4.18-rc1
+Git-commit: dac0490718bd17df5e3995ffca14255e5f9ed22d
+References: bsc#1086282 FATE#324873
+
+Only non-NPAR PFs need to actively check and manage unsupported link
+speeds. NPAR functions and VFs do not control the link speed and
+should skip the unsupported speed detection logic, to avoid warning
+messages from firmware rejecting the unsupported firmware calls.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -6394,6 +6394,9 @@ static int bnxt_update_link(struct bnxt
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
++ if (!BNXT_SINGLE_PF(bp))
++ return 0;
++
+ diff = link_info->support_auto_speeds ^ link_info->advertising;
+ if ((link_info->support_auto_speeds | diff) !=
+ link_info->support_auto_speeds) {
diff --git a/patches.drivers/bnxt_en-Display-function-level-rx-tx_discard_pkts-vi.patch b/patches.drivers/bnxt_en-Display-function-level-rx-tx_discard_pkts-vi.patch
new file mode 100644
index 0000000000..325d8a219b
--- /dev/null
+++ b/patches.drivers/bnxt_en-Display-function-level-rx-tx_discard_pkts-vi.patch
@@ -0,0 +1,96 @@
+From: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Date: Thu, 26 Apr 2018 17:44:36 -0400
+Subject: bnxt_en: Display function level rx/tx_discard_pkts via ethtool
+Patch-mainline: v4.18-rc1
+Git-commit: 20c1d28e106c0b526ae015fcac8e1e254bff091c
+References: bsc#1086282 FATE#324873
+
+Add counters to display sum of rx/tx_discard_pkts of all rings as
+function level statistics via ethtool.
+
+Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 33 ++++++++++++++++++++++
+ 1 file changed, 33 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -128,6 +128,19 @@ static int bnxt_set_coalesce(struct net_
+ #define BNXT_RX_STATS_EXT_ENTRY(counter) \
+ { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
+
++enum {
++ RX_TOTAL_DISCARDS,
++ TX_TOTAL_DISCARDS,
++};
++
++static struct {
++ u64 counter;
++ char string[ETH_GSTRING_LEN];
++} bnxt_sw_func_stats[] = {
++ {0, "rx_total_discard_pkts"},
++ {0, "tx_total_discard_pkts"},
++};
++
+ static const struct {
+ long offset;
+ char string[ETH_GSTRING_LEN];
+@@ -225,6 +238,7 @@ static const struct {
+ BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events),
+ };
+
++#define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
+ #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
+ #define BNXT_NUM_PORT_STATS_EXT ARRAY_SIZE(bnxt_port_stats_ext_arr)
+
+@@ -232,6 +246,8 @@ static int bnxt_get_num_stats(struct bnx
+ {
+ int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
+
++ num_stats += BNXT_NUM_SW_FUNC_STATS;
++
+ if (bp->flags & BNXT_FLAG_PORT_STATS)
+ num_stats += BNXT_NUM_PORT_STATS;
+
+@@ -267,6 +283,9 @@ static void bnxt_get_ethtool_stats(struc
+ if (!bp->bnapi)
+ return;
+
++ for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++)
++ bnxt_sw_func_stats[i].counter = 0;
++
+ for (i = 0; i < bp->cp_nr_rings; i++) {
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+@@ -276,7 +295,16 @@ static void bnxt_get_ethtool_stats(struc
+ for (k = 0; k < stat_fields; j++, k++)
+ buf[j] = le64_to_cpu(hw_stats[k]);
+ buf[j++] = cpr->rx_l4_csum_errors;
++
++ bnxt_sw_func_stats[RX_TOTAL_DISCARDS].counter +=
++ le64_to_cpu(cpr->hw_stats->rx_discard_pkts);
++ bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter +=
++ le64_to_cpu(cpr->hw_stats->tx_discard_pkts);
+ }
++
++ for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++)
++ buf[j] = bnxt_sw_func_stats[i].counter;
++
+ if (bp->flags & BNXT_FLAG_PORT_STATS) {
+ __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats;
+
+@@ -347,6 +375,11 @@ static void bnxt_get_strings(struct net_
+ sprintf(buf, "[%d]: rx_l4_csum_errors", i);
+ buf += ETH_GSTRING_LEN;
+ }
++ for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) {
++ strcpy(buf, bnxt_sw_func_stats[i].string);
++ buf += ETH_GSTRING_LEN;
++ }
++
+ if (bp->flags & BNXT_FLAG_PORT_STATS) {
+ for (i = 0; i < BNXT_NUM_PORT_STATS; i++) {
+ strcpy(buf, bnxt_port_stats_arr[i].string);
diff --git a/patches.drivers/bnxt_en-Do-not-allow-VF-to-read-EEPROM.patch b/patches.drivers/bnxt_en-Do-not-allow-VF-to-read-EEPROM.patch
new file mode 100644
index 0000000000..b902615e14
--- /dev/null
+++ b/patches.drivers/bnxt_en-Do-not-allow-VF-to-read-EEPROM.patch
@@ -0,0 +1,31 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Thu, 26 Apr 2018 17:44:37 -0400
+Subject: bnxt_en: Do not allow VF to read EEPROM.
+Patch-mainline: v4.18-rc1
+Git-commit: 4cebbaca12514986039b2ac7d30e36ecd2222f64
+References: bsc#1086282 FATE#324873
+
+Firmware does not allow the operation and would return failure, causing
+a warning in dmesg. So check for VF and disallow it in the driver.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -1806,6 +1806,11 @@ static int nvm_get_dir_info(struct net_d
+
+ static int bnxt_get_eeprom_len(struct net_device *dev)
+ {
++ struct bnxt *bp = netdev_priv(dev);
++
++ if (BNXT_VF(bp))
++ return 0;
++
+ /* The -1 return value allows the entire 32-bit range of offsets to be
+ * passed via the ethtool command-line utility.
+ */
diff --git a/patches.drivers/bnxt_en-Do-not-set-firmware-time-from-VF-driver-on-o.patch b/patches.drivers/bnxt_en-Do-not-set-firmware-time-from-VF-driver-on-o.patch
new file mode 100644
index 0000000000..13e1825d9c
--- /dev/null
+++ b/patches.drivers/bnxt_en-Do-not-set-firmware-time-from-VF-driver-on-o.patch
@@ -0,0 +1,29 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Thu, 26 Apr 2018 17:44:34 -0400
+Subject: bnxt_en: Do not set firmware time from VF driver on older firmware.
+Patch-mainline: v4.18-rc1
+Git-commit: ca2c39e2ec04e78ca6eb5162621cb9a5b897ca16
+References: bsc#1086282 FATE#324873
+
+Older firmware will reject this call and cause an error message to
+be printed by the VF driver.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -5324,7 +5324,8 @@ int bnxt_hwrm_fw_set_time(struct bnxt *b
+ struct tm tm;
+ time64_t now = ktime_get_real_seconds();
+
+- if (bp->hwrm_spec_code < 0x10400)
++ if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
++ bp->hwrm_spec_code < 0x10400)
+ return -EOPNOTSUPP;
+
+ time64_to_tm(now, 0, &tm);
diff --git a/patches.drivers/bnxt_en-Don-t-reserve-rings-on-VF-when-min-rings-wer.patch b/patches.drivers/bnxt_en-Don-t-reserve-rings-on-VF-when-min-rings-wer.patch
new file mode 100644
index 0000000000..8fd95cc2db
--- /dev/null
+++ b/patches.drivers/bnxt_en-Don-t-reserve-rings-on-VF-when-min-rings-wer.patch
@@ -0,0 +1,78 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Thu, 26 Apr 2018 17:44:42 -0400
+Subject: bnxt_en: Don't reserve rings on VF when min rings were not
+ provisioned by PF.
+Patch-mainline: v4.18-rc1
+Git-commit: 2773dfb201e18722265c38dacdea6ecadf933064
+References: bsc#1086282 FATE#324873
+
+When rings are more limited and the PF has not provisioned minimum
+guaranteed rings to the VF, do not reserve rings during driver probe.
+Wait till device open before reserving rings when they will be used.
+Device open will succeed if some minimum rings can be successfully
+reserved and allocated.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 27 ++++++++++++++++++++++++++-
+ 1 file changed, 26 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -5896,6 +5896,9 @@ static int bnxt_init_msix(struct bnxt *b
+ if (total_vecs > max)
+ total_vecs = max;
+
++ if (!total_vecs)
++ return 0;
++
+ msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
+ if (!msix_ent)
+ return -ENOMEM;
+@@ -7205,6 +7208,25 @@ skip_uc:
+ return rc;
+ }
+
++static bool bnxt_can_reserve_rings(struct bnxt *bp)
++{
++#ifdef CONFIG_BNXT_SRIOV
++ if ((bp->flags & BNXT_FLAG_NEW_RM) && BNXT_VF(bp)) {
++ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
++
++ /* No minimum rings were provisioned by the PF. Don't
++ * reserve rings by default when device is down.
++ */
++ if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
++ return true;
++
++ if (!netif_running(bp->dev))
++ return false;
++ }
++#endif
++ return true;
++}
++
+ /* If the chip and firmware supports RFS */
+ static bool bnxt_rfs_supported(struct bnxt *bp)
+ {
+@@ -7221,7 +7243,7 @@ static bool bnxt_rfs_capable(struct bnxt
+ #ifdef CONFIG_RFS_ACCEL
+ int vnics, max_vnics, max_rss_ctxs;
+
+- if (!(bp->flags & BNXT_FLAG_MSIX_CAP))
++ if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
+ return false;
+
+ vnics = 1 + bp->rx_nr_rings;
+@@ -8431,6 +8453,9 @@ static int bnxt_set_dflt_rings(struct bn
+ {
+ int dflt_rings, max_rx_rings, max_tx_rings, rc;
+
++ if (!bnxt_can_reserve_rings(bp))
++ return 0;
++
+ if (sh)
+ bp->flags |= BNXT_FLAG_SHARED_RINGS;
+ dflt_rings = netif_get_num_default_rss_queues();
diff --git a/patches.drivers/bnxt_en-Eliminate-duplicate-barriers-on-weakly-order.patch b/patches.drivers/bnxt_en-Eliminate-duplicate-barriers-on-weakly-order.patch
new file mode 100644
index 0000000000..5dde4c7a2d
--- /dev/null
+++ b/patches.drivers/bnxt_en-Eliminate-duplicate-barriers-on-weakly-order.patch
@@ -0,0 +1,59 @@
+From: Sinan Kaya <okaya@codeaurora.org>
+Date: Sun, 25 Mar 2018 10:39:20 -0400
+Subject: bnxt_en: Eliminate duplicate barriers on weakly-ordered archs
+Patch-mainline: v4.17-rc1
+Git-commit: fd141fa47c03018aa1f77c335b0f444493e145d5
+References: bsc#1086282 FATE#324873
+
+Code includes wmb() followed by writel(). writel() already has a barrier on
+some architectures like arm64.
+
+This ends up CPU observing two barriers back to back before executing the
+register write.
+
+Create a new wrapper function with relaxed write operator. Use the new
+wrapper when a write is following a wmb().
+
+Since code already has an explicit barrier call, changing writel() to
+writel_relaxed().
+
+Also add mmiowb() so that write code doesn't move outside of scope.
+
+Signed-off-by: Sinan Kaya <okaya@codeaurora.org>
+Acked-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +-
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 9 +++++++++
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1918,7 +1918,7 @@ static int bnxt_poll_work(struct bnxt *b
+ /* Sync BD data before updating doorbell */
+ wmb();
+
+- bnxt_db_write(bp, db, DB_KEY_TX | prod);
++ bnxt_db_write_relaxed(bp, db, DB_KEY_TX | prod);
+ }
+
+ cpr->cp_raw_cons = raw_cons;
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1392,6 +1392,15 @@ static inline u32 bnxt_tx_avail(struct b
+ ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
+ }
+
++/* For TX and RX ring doorbells with no ordering guarantee*/
++static inline void bnxt_db_write_relaxed(struct bnxt *bp, void __iomem *db,
++ u32 val)
++{
++ writel_relaxed(val, db);
++ if (bp->flags & BNXT_FLAG_DOUBLE_DB)
++ writel_relaxed(val, db);
++}
++
+ /* For TX and RX ring doorbells */
+ static inline void bnxt_db_write(struct bnxt *bp, void __iomem *db, u32 val)
+ {
diff --git a/patches.drivers/bnxt_en-Expand-bnxt_check_rings-to-check-all-resourc.patch b/patches.drivers/bnxt_en-Expand-bnxt_check_rings-to-check-all-resourc.patch
new file mode 100644
index 0000000000..6027bada3b
--- /dev/null
+++ b/patches.drivers/bnxt_en-Expand-bnxt_check_rings-to-check-all-resourc.patch
@@ -0,0 +1,152 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Wed, 17 Jan 2018 03:21:12 -0500
+Subject: bnxt_en: Expand bnxt_check_rings() to check all resources.
+Patch-mainline: v4.16-rc1
+Git-commit: 8f23d638b36b4ff0fe5785cf01f9bdc41afb9c06
+References: bsc#1086282 FATE#324873
+
+bnxt_check_rings() is called by ethtool, XDP setup, and ndo_setup_tc()
+to see if there are enough resources to support the new configuration.
+Expand the call to test all resources if the firmware supports the new
+API. With the more flexible resource allocation scheme, this call must
+be made to check that all resources are available before committing to
+allocate the resources.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 93 +++++++++++++++++++++++++++---
+ 1 file changed, 84 insertions(+), 9 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -4712,28 +4712,99 @@ static bool bnxt_need_reserve_rings(stru
+ return false;
+ }
+
+-static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings)
++static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
++ int ring_grps, int cp_rings)
+ {
+- struct hwrm_func_cfg_input req = {0};
++ struct hwrm_func_vf_cfg_input req = {0};
++ u32 flags, enables;
+ int rc;
+
+- if (bp->hwrm_spec_code < 0x10801)
++ if (!(bp->flags & BNXT_FLAG_NEW_RM))
+ return 0;
+
+- if (BNXT_VF(bp))
+- return 0;
++ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
++ flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
++ FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
++ FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
++ FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
++ FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
++ FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
++ enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS |
++ FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
++ FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
++ FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
++ FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS |
++ FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS;
++
++ req.flags = cpu_to_le32(flags);
++ req.enables = cpu_to_le32(enables);
++ req.num_tx_rings = cpu_to_le16(tx_rings);
++ req.num_rx_rings = cpu_to_le16(rx_rings);
++ req.num_cmpl_rings = cpu_to_le16(cp_rings);
++ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
++ req.num_stat_ctxs = cpu_to_le16(cp_rings);
++ req.num_vnics = cpu_to_le16(1);
++ if (bp->flags & BNXT_FLAG_RFS)
++ req.num_vnics = cpu_to_le16(rx_rings + 1);
++ rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
++ if (rc)
++ return -ENOMEM;
++ return 0;
++}
++
++static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
++ int ring_grps, int cp_rings)
++{
++ struct hwrm_func_cfg_input req = {0};
++ u32 flags, enables;
++ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+- req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST);
+- req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
++ flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
++ enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS;
+ req.num_tx_rings = cpu_to_le16(tx_rings);
++ if (bp->flags & BNXT_FLAG_NEW_RM) {
++ flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
++ FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
++ FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
++ FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
++ FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
++ enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
++ FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
++ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
++ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
++ FUNC_CFG_REQ_ENABLES_NUM_VNICS;
++ req.num_rx_rings = cpu_to_le16(rx_rings);
++ req.num_cmpl_rings = cpu_to_le16(cp_rings);
++ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
++ req.num_stat_ctxs = cpu_to_le16(cp_rings);
++ req.num_vnics = cpu_to_le16(1);
++ if (bp->flags & BNXT_FLAG_RFS)
++ req.num_vnics = cpu_to_le16(rx_rings + 1);
++ }
++ req.flags = cpu_to_le32(flags);
++ req.enables = cpu_to_le32(enables);
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return -ENOMEM;
+ return 0;
+ }
+
++static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
++ int ring_grps, int cp_rings)
++{
++ if (bp->hwrm_spec_code < 0x10801)
++ return 0;
++
++ if (BNXT_PF(bp))
++ return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
++ ring_grps, cp_rings);
++
++ return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
++ cp_rings);
++}
++
+ static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
+ {
+@@ -7365,7 +7436,8 @@ int bnxt_check_rings(struct bnxt *bp, in
+ {
+ int max_rx, max_tx, tx_sets = 1;
+ int tx_rings_needed;
+- int rc;
++ int rx_rings = rx;
++ int cp, rc;
+
+ if (tcs)
+ tx_sets = tcs;
+@@ -7381,7 +7453,10 @@ int bnxt_check_rings(struct bnxt *bp, in
+ if (max_tx < tx_rings_needed)
+ return -ENOMEM;
+
+- return bnxt_hwrm_check_tx_rings(bp, tx_rings_needed);
++ if (bp->flags & BNXT_FLAG_AGG_RINGS)
++ rx_rings <<= 1;
++ cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
++ return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp);
+ }
+
+ static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
diff --git a/patches.drivers/bnxt_en-Fix-NULL-pointer-dereference-at-bnxt_free_ir.patch b/patches.drivers/bnxt_en-Fix-NULL-pointer-dereference-at-bnxt_free_ir.patch
new file mode 100644
index 0000000000..998ff75a57
--- /dev/null
+++ b/patches.drivers/bnxt_en-Fix-NULL-pointer-dereference-at-bnxt_free_ir.patch
@@ -0,0 +1,35 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Wed, 11 Apr 2018 11:50:18 -0400
+Subject: bnxt_en: Fix NULL pointer dereference at bnxt_free_irq().
+Patch-mainline: v4.17-rc1
+Git-commit: cb98526bf9b985866d648dbb9c983ba9eb59daba
+References: bsc#1086282 FATE#324873
+
+When open fails during ethtool -L ring change, for example, the driver
+may crash at bnxt_free_irq() because bp->bnapi is NULL.
+
+If we fail to allocate all the new rings, bnxt_open_nic() will free
+all the memory including bp->bnapi. Subsequent call to bnxt_close_nic()
+will try to dereference bp->bnapi in bnxt_free_irq().
+
+Fix it by checking for !bp->bnapi in bnxt_free_irq().
+
+Fixes: e5811b8c09df ("bnxt_en: Add IRQ remapping logic.")
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -6035,7 +6035,7 @@ static void bnxt_free_irq(struct bnxt *b
+ free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
+ bp->dev->rx_cpu_rmap = NULL;
+ #endif
+- if (!bp->irq_tbl)
++ if (!bp->irq_tbl || !bp->bnapi)
+ return;
+
+ for (i = 0; i < bp->cp_nr_rings; i++) {
diff --git a/patches.drivers/bnxt_en-Fix-ethtool-x-crash-when-device-is-down.patch b/patches.drivers/bnxt_en-Fix-ethtool-x-crash-when-device-is-down.patch
new file mode 100644
index 0000000000..99235fdc87
--- /dev/null
+++ b/patches.drivers/bnxt_en-Fix-ethtool-x-crash-when-device-is-down.patch
@@ -0,0 +1,45 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Wed, 11 Apr 2018 11:50:13 -0400
+Subject: bnxt_en: Fix ethtool -x crash when device is down.
+Patch-mainline: v4.17-rc1
+Git-commit: 7991cb9cfbce1b60ac1cff819350b05de4d902e1
+References: bsc#1086282 FATE#324873
+
+Fix ethtool .get_rxfh() crash by checking for valid indirection table
+address before copying the data.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -858,17 +858,22 @@ static int bnxt_get_rxfh(struct net_devi
+ u8 *hfunc)
+ {
+ struct bnxt *bp = netdev_priv(dev);
+- struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
++ struct bnxt_vnic_info *vnic;
+ int i = 0;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+- if (indir)
++ if (!bp->vnic_info)
++ return 0;
++
++ vnic = &bp->vnic_info[0];
++ if (indir && vnic->rss_table) {
+ for (i = 0; i < HW_HASH_INDEX_SIZE; i++)
+ indir[i] = le16_to_cpu(vnic->rss_table[i]);
++ }
+
+- if (key)
++ if (key && vnic->rss_hash_key)
+ memcpy(key, vnic->rss_hash_key, HW_HASH_KEY_SIZE);
+
+ return 0;
diff --git a/patches.drivers/bnxt_en-Fix-firmware-message-delay-loop-regression.patch b/patches.drivers/bnxt_en-Fix-firmware-message-delay-loop-regression.patch
new file mode 100644
index 0000000000..4941b8afa3
--- /dev/null
+++ b/patches.drivers/bnxt_en-Fix-firmware-message-delay-loop-regression.patch
@@ -0,0 +1,86 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Tue, 8 May 2018 03:18:38 -0400
+Subject: bnxt_en: Fix firmware message delay loop regression.
+Patch-mainline: v4.18-rc1
+Git-commit: cc559c1ac250a6025bd4a9528e424b8da250655b
+References: bsc#1086282 FATE#324873
+
+A recent change to reduce delay granularity waiting for firmware
+reponse has caused a regression. With a tighter delay loop,
+the driver may see the beginning part of the response faster.
+The original 5 usec delay to wait for the rest of the message
+is not long enough and some messages are detected as invalid.
+
+Increase the maximum wait time from 5 usec to 20 usec. Also, fix
+the debug message that shows the total delay time for the response
+when the message times out. With the new logic, the delay time
+is not fixed per iteration of the loop, so we define a macro to
+show the total delay time.
+
+Fixes: 9751e8e71487 ("bnxt_en: reduce timeout on initial HWRM calls")
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 12 ++++++++----
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 7 +++++++
+ 2 files changed, 15 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -3504,6 +3504,8 @@ static int bnxt_hwrm_do_send_msg(struct
+ HWRM_RESP_LEN_SFT;
+ valid = bp->hwrm_cmd_resp_addr + len - 1;
+ } else {
++ int j;
++
+ /* Check if response len is updated */
+ for (i = 0; i < tmo_count; i++) {
+ len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
+@@ -3521,14 +3523,15 @@ static int bnxt_hwrm_do_send_msg(struct
+
+ if (i >= tmo_count) {
+ netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
+- timeout, le16_to_cpu(req->req_type),
++ HWRM_TOTAL_TIMEOUT(i),
++ le16_to_cpu(req->req_type),
+ le16_to_cpu(req->seq_id), len);
+ return -1;
+ }
+
+ /* Last byte of resp contains valid bit */
+ valid = bp->hwrm_cmd_resp_addr + len - 1;
+- for (i = 0; i < 5; i++) {
++ for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
+ /* make sure we read from updated DMA memory */
+ dma_rmb();
+ if (*valid)
+@@ -3536,9 +3539,10 @@ static int bnxt_hwrm_do_send_msg(struct
+ udelay(1);
+ }
+
+- if (i >= 5) {
++ if (j >= HWRM_VALID_BIT_DELAY_USEC) {
+ netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
+- timeout, le16_to_cpu(req->req_type),
++ HWRM_TOTAL_TIMEOUT(i),
++ le16_to_cpu(req->req_type),
+ le16_to_cpu(req->seq_id), len, *valid);
+ return -1;
+ }
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -537,6 +537,13 @@ struct rx_tpa_end_cmp_ext {
+ #define HWRM_MIN_TIMEOUT 25
+ #define HWRM_MAX_TIMEOUT 40
+
++#define HWRM_TOTAL_TIMEOUT(n) (((n) <= HWRM_SHORT_TIMEOUT_COUNTER) ? \
++ ((n) * HWRM_SHORT_MIN_TIMEOUT) : \
++ (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \
++ ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT))
++
++#define HWRM_VALID_BIT_DELAY_USEC 20
++
+ #define BNXT_RX_EVENT 1
+ #define BNXT_AGG_EVENT 2
+ #define BNXT_TX_EVENT 4
diff --git a/patches.drivers/bnxt_en-Fix-regressions-when-setting-up-MQPRIO-TX-ri.patch b/patches.drivers/bnxt_en-Fix-regressions-when-setting-up-MQPRIO-TX-ri.patch
new file mode 100644
index 0000000000..0ec56bf6d7
--- /dev/null
+++ b/patches.drivers/bnxt_en-Fix-regressions-when-setting-up-MQPRIO-TX-ri.patch
@@ -0,0 +1,55 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Fri, 9 Mar 2018 23:46:07 -0500
+Subject: bnxt_en: Fix regressions when setting up MQPRIO TX rings.
+Patch-mainline: v4.16-rc7
+Git-commit: 832aed16ce7af2a43dafe9d4bc9080322e042cde
+References: bsc#1086282 FATE#324873
+
+Recent changes added the bnxt_init_int_mode() call in the driver's open
+path whenever ring reservations are changed. This call was previously
+only called in the probe path. In the open path, if MQPRIO TC has been
+setup, the bnxt_init_int_mode() call would reset and mess up the MQPRIO
+per TC rings.
+
+Fix it by not re-initilizing bp->tx_nr_rings_per_tc in
+bnxt_init_int_mode(). Instead, initialize it in the probe path only
+after the bnxt_init_int_mode() call.
+
+Fixes: 674f50a5b026 ("bnxt_en: Implement new method to reserve rings.")
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -5802,7 +5802,6 @@ static int bnxt_init_msix(struct bnxt *b
+ if (rc)
+ goto msix_setup_exit;
+
+- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->cp_nr_rings = (min == 1) ?
+ max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
+ bp->tx_nr_rings + bp->rx_nr_rings;
+@@ -5834,7 +5833,6 @@ static int bnxt_init_inta(struct bnxt *b
+ bp->rx_nr_rings = 1;
+ bp->tx_nr_rings = 1;
+ bp->cp_nr_rings = 1;
+- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ bp->flags |= BNXT_FLAG_SHARED_RINGS;
+ bp->irq_tbl[0].vector = bp->pdev->irq;
+ return 0;
+@@ -8570,6 +8568,11 @@ static int bnxt_init_one(struct pci_dev
+ if (rc)
+ goto init_err_pci_clean;
+
++ /* No TC has been set yet and rings may have been trimmed due to
++ * limited MSIX, so we re-initialize the TX rings per TC.
++ */
++ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
++
+ bnxt_get_wol_settings(bp);
+ if (bp->flags & BNXT_FLAG_WOL_CAP)
+ device_set_wakeup_enable(&pdev->dev, bp->wol);
diff --git a/patches.drivers/bnxt_en-Fix-vnic-accounting-in-the-bnxt_check_rings-.patch b/patches.drivers/bnxt_en-Fix-vnic-accounting-in-the-bnxt_check_rings-.patch
new file mode 100644
index 0000000000..41d14a0fd6
--- /dev/null
+++ b/patches.drivers/bnxt_en-Fix-vnic-accounting-in-the-bnxt_check_rings-.patch
@@ -0,0 +1,160 @@
+From: Eddie Wai <eddie.wai@broadcom.com>
+Date: Fri, 9 Mar 2018 23:46:04 -0500
+Subject: bnxt_en: Fix vnic accounting in the bnxt_check_rings() path.
+Patch-mainline: v4.16-rc7
+Git-commit: 6fc2ffdf1001ae4fb485b3ba95ff757ae54565c9
+References: bsc#1086282 FATE#324873
+
+The number of vnics to check must be determined ahead of time because
+only standard RX rings require vnics to support RFS. The logic is
+similar to the ring reservation logic and we can now use the
+refactored common functions to do most of the work in setting up
+the firmware message.
+
+Fixes: 8f23d638b36b ("bnxt_en: Expand bnxt_check_rings() to check all resources.")
+Signed-off-by: Eddie Wai <eddie.wai@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 64 +++++++++---------------------
+ 1 file changed, 20 insertions(+), 44 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -4739,39 +4739,25 @@ static bool bnxt_need_reserve_rings(stru
+ }
+
+ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+- int ring_grps, int cp_rings)
++ int ring_grps, int cp_rings, int vnics)
+ {
+ struct hwrm_func_vf_cfg_input req = {0};
+- u32 flags, enables;
++ u32 flags;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_NEW_RM))
+ return 0;
+
+- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
++ __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
++ cp_rings, vnics);
+ flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
+ FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+- enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS |
+- FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
+- FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+- FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+- FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+- FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS;
+
+ req.flags = cpu_to_le32(flags);
+- req.enables = cpu_to_le32(enables);
+- req.num_tx_rings = cpu_to_le16(tx_rings);
+- req.num_rx_rings = cpu_to_le16(rx_rings);
+- req.num_cmpl_rings = cpu_to_le16(cp_rings);
+- req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+- req.num_stat_ctxs = cpu_to_le16(cp_rings);
+- req.num_vnics = cpu_to_le16(1);
+- if (bp->flags & BNXT_FLAG_RFS)
+- req.num_vnics = cpu_to_le16(rx_rings + 1);
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return -ENOMEM;
+@@ -4779,38 +4765,23 @@ static int bnxt_hwrm_check_vf_rings(stru
+ }
+
+ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+- int ring_grps, int cp_rings)
++ int ring_grps, int cp_rings, int vnics)
+ {
+ struct hwrm_func_cfg_input req = {0};
+- u32 flags, enables;
++ u32 flags;
+ int rc;
+
+- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+- req.fid = cpu_to_le16(0xffff);
++ __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
++ cp_rings, vnics);
+ flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
+- enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS;
+- req.num_tx_rings = cpu_to_le16(tx_rings);
+- if (bp->flags & BNXT_FLAG_NEW_RM) {
++ if (bp->flags & BNXT_FLAG_NEW_RM)
+ flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
+ FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
+- enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
+- FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+- FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
+- FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
+- FUNC_CFG_REQ_ENABLES_NUM_VNICS;
+- req.num_rx_rings = cpu_to_le16(rx_rings);
+- req.num_cmpl_rings = cpu_to_le16(cp_rings);
+- req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+- req.num_stat_ctxs = cpu_to_le16(cp_rings);
+- req.num_vnics = cpu_to_le16(1);
+- if (bp->flags & BNXT_FLAG_RFS)
+- req.num_vnics = cpu_to_le16(rx_rings + 1);
+- }
++
+ req.flags = cpu_to_le32(flags);
+- req.enables = cpu_to_le32(enables);
+ rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return -ENOMEM;
+@@ -4818,17 +4789,17 @@ static int bnxt_hwrm_check_pf_rings(stru
+ }
+
+ static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+- int ring_grps, int cp_rings)
++ int ring_grps, int cp_rings, int vnics)
+ {
+ if (bp->hwrm_spec_code < 0x10801)
+ return 0;
+
+ if (BNXT_PF(bp))
+ return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
+- ring_grps, cp_rings);
++ ring_grps, cp_rings, vnics);
+
+ return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
+- cp_rings);
++ cp_rings, vnics);
+ }
+
+ static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
+@@ -7485,7 +7456,7 @@ int bnxt_check_rings(struct bnxt *bp, in
+ int max_rx, max_tx, tx_sets = 1;
+ int tx_rings_needed;
+ int rx_rings = rx;
+- int cp, rc;
++ int cp, vnics, rc;
+
+ if (tcs)
+ tx_sets = tcs;
+@@ -7501,10 +7472,15 @@ int bnxt_check_rings(struct bnxt *bp, in
+ if (max_tx < tx_rings_needed)
+ return -ENOMEM;
+
++ vnics = 1;
++ if (bp->flags & BNXT_FLAG_RFS)
++ vnics += rx_rings;
++
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx_rings <<= 1;
+ cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
+- return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp);
++ return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
++ vnics);
+ }
+
+ static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
diff --git a/patches.drivers/bnxt_en-Forward-VF-MAC-address-to-the-PF.patch b/patches.drivers/bnxt_en-Forward-VF-MAC-address-to-the-PF.patch
new file mode 100644
index 0000000000..b216a41596
--- /dev/null
+++ b/patches.drivers/bnxt_en-Forward-VF-MAC-address-to-the-PF.patch
@@ -0,0 +1,140 @@
+From: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Date: Wed, 17 Jan 2018 03:21:14 -0500
+Subject: bnxt_en: Forward VF MAC address to the PF.
+Patch-mainline: v4.16-rc1
+Git-commit: 91cdda40714178497cbd182261b2ea6ec5cb9276
+References: bsc#1086282 FATE#324873
+
+Forward hwrm_func_vf_cfg command from VF to PF driver, to store
+VF MAC address in PF's context. This will allow "ip link show"
+to display all VF MAC addresses.
+
+Maintain 2 locations of MAC address in VF info structure, one for
+a PF assigned MAC and one for VF assigned MAC.
+
+Display VF assigned MAC in "ip link show", only if PF assigned MAC is
+not valid.
+
+Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 -
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 5 +-
+ drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 50 +++++++++++++++++++++---
+ 3 files changed, 51 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -213,6 +213,7 @@ MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
+
+ static const u16 bnxt_vf_req_snif[] = {
+ HWRM_FUNC_CFG,
++ HWRM_FUNC_VF_CFG,
+ HWRM_PORT_PHY_QCFG,
+ HWRM_CFA_L2_FILTER_ALLOC,
+ };
+@@ -8348,7 +8349,7 @@ static int bnxt_init_mac_addr(struct bnx
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ if (is_valid_ether_addr(vf->mac_addr)) {
+- /* overwrite netdev dev_adr with admin VF MAC */
++ /* overwrite netdev dev_addr with admin VF MAC */
+ memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+ } else {
+ eth_hw_addr_random(bp->dev);
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -783,7 +783,10 @@ struct bnxt_hw_resc {
+ #if defined(CONFIG_BNXT_SRIOV)
+ struct bnxt_vf_info {
+ u16 fw_fid;
+- u8 mac_addr[ETH_ALEN];
++ u8 mac_addr[ETH_ALEN]; /* PF assigned MAC Address */
++ u8 vf_mac_addr[ETH_ALEN]; /* VF assigned MAC address, only
++ * stored by PF.
++ */
+ u16 vlan;
+ u32 flags;
+ #define BNXT_VF_QOS 0x1
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+@@ -135,7 +135,10 @@ int bnxt_get_vf_config(struct net_device
+ ivi->vf = vf_id;
+ vf = &bp->pf.vf[vf_id];
+
+- memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
++ if (is_valid_ether_addr(vf->mac_addr))
++ memcpy(&ivi->mac, vf->mac_addr, ETH_ALEN);
++ else
++ memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
+ ivi->max_tx_rate = vf->max_tx_rate;
+ ivi->min_tx_rate = vf->min_tx_rate;
+ ivi->vlan = vf->vlan;
+@@ -883,17 +886,51 @@ exec_fwd_resp_exit:
+ return rc;
+ }
+
++static int bnxt_vf_store_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
++{
++ u32 msg_size = sizeof(struct hwrm_func_vf_cfg_input);
++ struct hwrm_func_vf_cfg_input *req =
++ (struct hwrm_func_vf_cfg_input *)vf->hwrm_cmd_req_addr;
++
++ /* Only allow VF to set a valid MAC address if the PF assigned MAC
++ * address is zero
++ */
++ if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) {
++ if (is_valid_ether_addr(req->dflt_mac_addr) &&
++ !is_valid_ether_addr(vf->mac_addr)) {
++ ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr);
++ return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
++ }
++ return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
++ }
++ return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
++}
++
+ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf)
+ {
+ u32 msg_size = sizeof(struct hwrm_cfa_l2_filter_alloc_input);
+ struct hwrm_cfa_l2_filter_alloc_input *req =
+ (struct hwrm_cfa_l2_filter_alloc_input *)vf->hwrm_cmd_req_addr;
++ bool mac_ok = false;
+
+- if (!is_valid_ether_addr(vf->mac_addr) ||
+- ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
++ /* VF MAC address must first match PF MAC address, if it is valid.
++ * Otherwise, it must match the VF MAC address if firmware spec >=
++ * 1.2.2
++ */
++ if (is_valid_ether_addr(vf->mac_addr)) {
++ if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr))
++ mac_ok = true;
++ } else if (is_valid_ether_addr(vf->vf_mac_addr)) {
++ if (ether_addr_equal((const u8 *)req->l2_addr, vf->vf_mac_addr))
++ mac_ok = true;
++ } else if (bp->hwrm_spec_code < 0x10202) {
++ mac_ok = true;
++ } else {
++ mac_ok = true;
++ }
++ if (mac_ok)
+ return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size);
+- else
+- return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
++ return bnxt_hwrm_fwd_err_resp(bp, vf, msg_size);
+ }
+
+ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
+@@ -955,6 +992,9 @@ static int bnxt_vf_req_validate_snd(stru
+ u32 req_type = le16_to_cpu(encap_req->req_type);
+
+ switch (req_type) {
++ case HWRM_FUNC_VF_CFG:
++ rc = bnxt_vf_store_mac(bp, vf);
++ break;
+ case HWRM_CFA_L2_FILTER_ALLOC:
+ rc = bnxt_vf_validate_set_mac(bp, vf);
+ break;
diff --git a/patches.drivers/bnxt_en-Implement-new-method-for-the-PF-to-assign-SR.patch b/patches.drivers/bnxt_en-Implement-new-method-for-the-PF-to-assign-SR.patch
new file mode 100644
index 0000000000..40dec652d2
--- /dev/null
+++ b/patches.drivers/bnxt_en-Implement-new-method-for-the-PF-to-assign-SR.patch
@@ -0,0 +1,213 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Wed, 17 Jan 2018 03:21:11 -0500
+Subject: bnxt_en: Implement new method for the PF to assign SRIOV resources.
+Patch-mainline: v4.16-rc1
+Git-commit: 4673d66468b80dc37abd1159a4bd038128173d48
+References: bsc#1086282 FATE#324873
+
+Instead of the old method of evenly dividing the resources to the VFs,
+use the new firmware API to specify min and max resources for each VF.
+This way, there is more flexibility for each VF to allocate more or less
+resources.
+
+The min is the absolute minimum for each VF to function. The max is the
+global resources minus the resources used by the PF. Each VF is
+guaranteed the min. Up to max resources may be available for some VFs.
+
+The PF driver can use one of 2 strategies specified in NVRAM to assign
+the resources. The old legacy strategy of evenly dividing the resources
+or the new flexible strategy.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 8 +
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 3
+ drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 121 +++++++++++++++++++++++-
+ 3 files changed, 127 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -4963,6 +4963,14 @@ static int bnxt_hwrm_func_resc_qcaps(str
+ hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
+ hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+
++ if (BNXT_PF(bp)) {
++ struct bnxt_pf_info *pf = &bp->pf;
++
++ pf->vf_resv_strategy =
++ le16_to_cpu(resp->vf_reservation_strategy);
++ if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL)
++ pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
++ }
+ hwrm_func_resc_qcaps_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -815,6 +815,9 @@ struct bnxt_pf_info {
+ u32 max_rx_wm_flows;
+ unsigned long *vf_event_bmap;
+ u16 hwrm_cmd_req_pages;
++ u8 vf_resv_strategy;
++#define BNXT_VF_RESV_STRATEGY_MAXIMAL 0
++#define BNXT_VF_RESV_STRATEGY_MINIMAL 1
+ void *hwrm_cmd_req_addr[4];
+ dma_addr_t hwrm_cmd_req_dma_addr[4];
+ struct bnxt_vf_info *vf;
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+@@ -416,7 +416,100 @@ static int bnxt_hwrm_func_buf_rgtr(struc
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ }
+
+-/* only call by PF to reserve resources for VF */
++/* Only called by PF to reserve resources for VFs, returns actual number of
++ * VFs configured, or < 0 on error.
++ */
++static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs)
++{
++ struct hwrm_func_vf_resource_cfg_input req = {0};
++ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
++ u16 vf_tx_rings, vf_rx_rings, vf_cp_rings;
++ u16 vf_stat_ctx, vf_vnics, vf_ring_grps;
++ struct bnxt_pf_info *pf = &bp->pf;
++ int i, rc = 0;
++
++ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1);
++
++ vf_cp_rings = hw_resc->max_cp_rings - bp->cp_nr_rings;
++ vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
++ if (bp->flags & BNXT_FLAG_AGG_RINGS)
++ vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2;
++ else
++ vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings;
++ vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings;
++ vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings;
++ vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
++ vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
++
++ req.min_rsscos_ctx = cpu_to_le16(1);
++ req.max_rsscos_ctx = cpu_to_le16(1);
++ if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
++ req.min_cmpl_rings = cpu_to_le16(1);
++ req.min_tx_rings = cpu_to_le16(1);
++ req.min_rx_rings = cpu_to_le16(1);
++ req.min_l2_ctxs = cpu_to_le16(1);
++ req.min_vnics = cpu_to_le16(1);
++ req.min_stat_ctx = cpu_to_le16(1);
++ req.min_hw_ring_grps = cpu_to_le16(1);
++ } else {
++ vf_cp_rings /= num_vfs;
++ vf_tx_rings /= num_vfs;
++ vf_rx_rings /= num_vfs;
++ vf_vnics /= num_vfs;
++ vf_stat_ctx /= num_vfs;
++ vf_ring_grps /= num_vfs;
++
++ req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
++ req.min_tx_rings = cpu_to_le16(vf_tx_rings);
++ req.min_rx_rings = cpu_to_le16(vf_rx_rings);
++ req.min_l2_ctxs = cpu_to_le16(4);
++ req.min_vnics = cpu_to_le16(vf_vnics);
++ req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
++ req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
++ }
++ req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
++ req.max_tx_rings = cpu_to_le16(vf_tx_rings);
++ req.max_rx_rings = cpu_to_le16(vf_rx_rings);
++ req.max_l2_ctxs = cpu_to_le16(4);
++ req.max_vnics = cpu_to_le16(vf_vnics);
++ req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
++ req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
++
++ mutex_lock(&bp->hwrm_cmd_lock);
++ for (i = 0; i < num_vfs; i++) {
++ req.vf_id = cpu_to_le16(pf->first_vf_id + i);
++ rc = _hwrm_send_message(bp, &req, sizeof(req),
++ HWRM_CMD_TIMEOUT);
++ if (rc) {
++ rc = -ENOMEM;
++ break;
++ }
++ pf->active_vfs = i + 1;
++ pf->vf[i].fw_fid = pf->first_vf_id + i;
++ }
++ mutex_unlock(&bp->hwrm_cmd_lock);
++ if (pf->active_vfs) {
++ u16 n = 1;
++
++ if (pf->vf_resv_strategy != BNXT_VF_RESV_STRATEGY_MINIMAL)
++ n = pf->active_vfs;
++
++ hw_resc->max_tx_rings -= vf_tx_rings * n;
++ hw_resc->max_rx_rings -= vf_rx_rings * n;
++ hw_resc->max_hw_ring_grps -= vf_ring_grps * n;
++ hw_resc->max_cp_rings -= vf_cp_rings * n;
++ hw_resc->max_rsscos_ctxs -= pf->active_vfs;
++ hw_resc->max_stat_ctxs -= vf_stat_ctx * n;
++ hw_resc->max_vnics -= vf_vnics * n;
++
++ rc = pf->active_vfs;
++ }
++ return rc;
++}
++
++/* Only called by PF to reserve resources for VFs, returns actual number of
++ * VFs configured, or < 0 on error.
++ */
+ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
+ {
+ u32 rc = 0, mtu, i;
+@@ -489,7 +582,9 @@ static int bnxt_hwrm_func_cfg(struct bnx
+ total_vf_tx_rings += vf_tx_rsvd;
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+- if (!rc) {
++ if (rc)
++ rc = -ENOMEM;
++ if (pf->active_vfs) {
+ hw_resc->max_tx_rings -= total_vf_tx_rings;
+ hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
+ hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
+@@ -497,10 +592,19 @@ static int bnxt_hwrm_func_cfg(struct bnx
+ hw_resc->max_rsscos_ctxs -= num_vfs;
+ hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
+ hw_resc->max_vnics -= vf_vnics * num_vfs;
++ rc = pf->active_vfs;
+ }
+ return rc;
+ }
+
++static int bnxt_func_cfg(struct bnxt *bp, int num_vfs)
++{
++ if (bp->flags & BNXT_FLAG_NEW_RM)
++ return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs);
++ else
++ return bnxt_hwrm_func_cfg(bp, num_vfs);
++}
++
+ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
+ {
+ int rc = 0, vfs_supported;
+@@ -567,9 +671,16 @@ static int bnxt_sriov_enable(struct bnxt
+ goto err_out1;
+
+ /* Reserve resources for VFs */
+- rc = bnxt_hwrm_func_cfg(bp, *num_vfs);
+- if (rc)
+- goto err_out2;
++ rc = bnxt_func_cfg(bp, *num_vfs);
++ if (rc != *num_vfs) {
++ if (rc <= 0) {
++ netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n");
++ *num_vfs = 0;
++ goto err_out2;
++ }
++ netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc);
++ *num_vfs = rc;
++ }
+
+ /* Register buffers for VFs */
+ rc = bnxt_hwrm_func_buf_rgtr(bp);
diff --git a/patches.drivers/bnxt_en-Implement-new-method-to-reserve-rings.patch b/patches.drivers/bnxt_en-Implement-new-method-to-reserve-rings.patch
new file mode 100644
index 0000000000..31188365c1
--- /dev/null
+++ b/patches.drivers/bnxt_en-Implement-new-method-to-reserve-rings.patch
@@ -0,0 +1,363 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Wed, 17 Jan 2018 03:21:09 -0500
+Subject: bnxt_en: Implement new method to reserve rings.
+Patch-mainline: v4.16-rc1
+Git-commit: 674f50a5b026151f4109992cb594d89f5334adde
+References: bsc#1086282 FATE#324873
+
+The new method will call firmware to reserve the desired tx, rx, cmpl
+rings, ring groups, stats context, and vnic resources. A second query
+call will check the actual resources that firmware is able to reserve.
+The driver will then trim and adjust based on the actual resources
+provided by firmware. The driver will then reserve the final resources
+in use.
+
+This method is a more flexible way of using hardware resources. The
+resources are not fixed and can by adjusted by firmware. The driver
+adapts to the available resources that the firmware can reserve for
+the driver.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 272 +++++++++++++++++++++++++++---
+ 1 file changed, 247 insertions(+), 25 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -4473,6 +4473,42 @@ static void bnxt_hwrm_ring_free(struct b
+ }
+ }
+
++static int bnxt_hwrm_get_rings(struct bnxt *bp)
++{
++ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
++ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
++ struct hwrm_func_qcfg_input req = {0};
++ int rc;
++
++ if (bp->hwrm_spec_code < 0x10601)
++ return 0;
++
++ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
++ req.fid = cpu_to_le16(0xffff);
++ mutex_lock(&bp->hwrm_cmd_lock);
++ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
++ if (rc) {
++ mutex_unlock(&bp->hwrm_cmd_lock);
++ return -EIO;
++ }
++
++ hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
++ if (bp->flags & BNXT_FLAG_NEW_RM) {
++ u16 cp, stats;
++
++ hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
++ hw_resc->resv_hw_ring_grps =
++ le32_to_cpu(resp->alloc_hw_ring_grps);
++ hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
++ cp = le16_to_cpu(resp->alloc_cmpl_rings);
++ stats = le16_to_cpu(resp->alloc_stat_ctx);
++ cp = min_t(u16, cp, stats);
++ hw_resc->resv_cp_rings = cp;
++ }
++ mutex_unlock(&bp->hwrm_cmd_lock);
++ return 0;
++}
++
+ /* Caller must hold bp->hwrm_cmd_lock */
+ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
+ {
+@@ -4492,33 +4528,190 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt
+ return rc;
+ }
+
+-static int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
++static int
++bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
++ int ring_grps, int cp_rings, int vnics)
+ {
+ struct hwrm_func_cfg_input req = {0};
++ u32 enables = 0;
+ int rc;
+
+- if (bp->hwrm_spec_code < 0x10601)
++ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
++ req.fid = cpu_to_le16(0xffff);
++ enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
++ req.num_tx_rings = cpu_to_le16(tx_rings);
++ if (bp->flags & BNXT_FLAG_NEW_RM) {
++ enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
++ enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
++ FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
++ enables |= ring_grps ?
++ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
++ enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
++
++ req.num_rx_rings = cpu_to_le16(rx_rings);
++ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
++ req.num_cmpl_rings = cpu_to_le16(cp_rings);
++ req.num_stat_ctxs = req.num_cmpl_rings;
++ req.num_vnics = cpu_to_le16(vnics);
++ }
++ if (!enables)
+ return 0;
+
+- if (BNXT_VF(bp))
++ req.enables = cpu_to_le32(enables);
++ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
++ if (rc)
++ return -ENOMEM;
++
++ if (bp->hwrm_spec_code < 0x10601)
++ bp->hw_resc.resv_tx_rings = tx_rings;
++
++ rc = bnxt_hwrm_get_rings(bp);
++ return rc;
++}
++
++static int
++bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
++ int ring_grps, int cp_rings, int vnics)
++{
++ struct hwrm_func_vf_cfg_input req = {0};
++ u32 enables = 0;
++ int rc;
++
++ if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
++ bp->hw_resc.resv_tx_rings = tx_rings;
+ return 0;
++ }
+
+- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+- req.fid = cpu_to_le16(0xffff);
+- req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
+- req.num_tx_rings = cpu_to_le16(*tx_rings);
++ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
++ enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
++ enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
++ enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
++ FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
++ enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
++ enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
++
++ req.num_tx_rings = cpu_to_le16(tx_rings);
++ req.num_rx_rings = cpu_to_le16(rx_rings);
++ req.num_hw_ring_grps = cpu_to_le16(ring_grps);
++ req.num_cmpl_rings = cpu_to_le16(cp_rings);
++ req.num_stat_ctxs = req.num_cmpl_rings;
++ req.num_vnics = cpu_to_le16(vnics);
++
++ req.enables = cpu_to_le32(enables);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
++ return -ENOMEM;
++
++ rc = bnxt_hwrm_get_rings(bp);
++ return rc;
++}
++
++static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
++ int cp, int vnic)
++{
++ if (BNXT_PF(bp))
++ return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, vnic);
++ else
++ return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic);
++}
++
++static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
++ bool shared);
++
++static int __bnxt_reserve_rings(struct bnxt *bp)
++{
++ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
++ int tx = bp->tx_nr_rings;
++ int rx = bp->rx_nr_rings;
++ int cp = bp->cp_nr_rings;
++ int grp, rx_rings, rc;
++ bool sh = false;
++ int vnic = 1;
++
++ if (bp->hwrm_spec_code < 0x10601)
++ return 0;
++
++ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
++ sh = true;
++ if (bp->flags & BNXT_FLAG_RFS)
++ vnic = rx + 1;
++ if (bp->flags & BNXT_FLAG_AGG_RINGS)
++ rx <<= 1;
++
++ grp = bp->rx_nr_rings;
++ if (tx == hw_resc->resv_tx_rings &&
++ (!(bp->flags & BNXT_FLAG_NEW_RM) ||
++ (rx == hw_resc->resv_rx_rings &&
++ grp == hw_resc->resv_hw_ring_grps &&
++ cp == hw_resc->resv_cp_rings && vnic == hw_resc->resv_vnics)))
++ return 0;
++
++ rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic);
++ if (rc)
+ return rc;
+
+- mutex_lock(&bp->hwrm_cmd_lock);
+- rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
+- mutex_unlock(&bp->hwrm_cmd_lock);
+- if (!rc)
+- bp->hw_resc.resv_tx_rings = *tx_rings;
++ tx = hw_resc->resv_tx_rings;
++ if (bp->flags & BNXT_FLAG_NEW_RM) {
++ rx = hw_resc->resv_rx_rings;
++ cp = hw_resc->resv_cp_rings;
++ grp = hw_resc->resv_hw_ring_grps;
++ vnic = hw_resc->resv_vnics;
++ }
++
++ rx_rings = rx;
++ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
++ if (rx >= 2) {
++ rx_rings = rx >> 1;
++ } else {
++ if (netif_running(bp->dev))
++ return -ENOMEM;
++
++ bp->flags &= ~BNXT_FLAG_AGG_RINGS;
++ bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
++ bp->dev->hw_features &= ~NETIF_F_LRO;
++ bp->dev->features &= ~NETIF_F_LRO;
++ bnxt_set_ring_params(bp);
++ }
++ }
++ rx_rings = min_t(int, rx_rings, grp);
++ rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
++ if (bp->flags & BNXT_FLAG_AGG_RINGS)
++ rx = rx_rings << 1;
++ cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
++ bp->tx_nr_rings = tx;
++ bp->rx_nr_rings = rx_rings;
++ bp->cp_nr_rings = cp;
++
++ if (!tx || !rx || !cp || !grp || !vnic)
++ return -ENOMEM;
++
+ return rc;
+ }
+
++static bool bnxt_need_reserve_rings(struct bnxt *bp)
++{
++ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
++ int rx = bp->rx_nr_rings;
++ int vnic = 1;
++
++ if (bp->hwrm_spec_code < 0x10601)
++ return false;
++
++ if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
++ return true;
++
++ if (bp->flags & BNXT_FLAG_RFS)
++ vnic = rx + 1;
++ if (bp->flags & BNXT_FLAG_AGG_RINGS)
++ rx <<= 1;
++ if ((bp->flags & BNXT_FLAG_NEW_RM) &&
++ (hw_resc->resv_rx_rings != rx ||
++ hw_resc->resv_cp_rings != bp->cp_nr_rings ||
++ hw_resc->resv_vnics != vnic))
++ return true;
++ return false;
++}
++
+ static int bnxt_hwrm_check_tx_rings(struct bnxt *bp, int tx_rings)
+ {
+ struct hwrm_func_cfg_input req = {0};
+@@ -5215,15 +5408,6 @@ static int bnxt_init_chip(struct bnxt *b
+ rc);
+ goto err_out;
+ }
+- if (bp->hw_resc.resv_tx_rings != bp->tx_nr_rings) {
+- int tx = bp->tx_nr_rings;
+-
+- if (bnxt_hwrm_reserve_tx_rings(bp, &tx) ||
+- tx < bp->tx_nr_rings) {
+- rc = -ENOMEM;
+- goto err_out;
+- }
+- }
+ }
+
+ rc = bnxt_hwrm_ring_alloc(bp);
+@@ -5582,6 +5766,36 @@ static void bnxt_clear_int_mode(struct b
+ bp->flags &= ~BNXT_FLAG_USING_MSIX;
+ }
+
++static int bnxt_reserve_rings(struct bnxt *bp)
++{
++ int orig_cp = bp->hw_resc.resv_cp_rings;
++ int tcs = netdev_get_num_tc(bp->dev);
++ int rc;
++
++ if (!bnxt_need_reserve_rings(bp))
++ return 0;
++
++ rc = __bnxt_reserve_rings(bp);
++ if (rc) {
++ netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
++ return rc;
++ }
++ if ((bp->flags & BNXT_FLAG_NEW_RM) && bp->cp_nr_rings > orig_cp) {
++ bnxt_clear_int_mode(bp);
++ rc = bnxt_init_int_mode(bp);
++ if (rc)
++ return rc;
++ }
++ if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
++ netdev_err(bp->dev, "tx ring reservation failure\n");
++ netdev_reset_tc(bp->dev);
++ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
++ return -ENOMEM;
++ }
++ bp->num_stat_ctxs = bp->cp_nr_rings;
++ return 0;
++}
++
+ static void bnxt_free_irq(struct bnxt *bp)
+ {
+ struct bnxt_irq *irq;
+@@ -6326,6 +6540,10 @@ static int __bnxt_open_nic(struct bnxt *
+ bnxt_preset_reg_win(bp);
+ netif_carrier_off(bp->dev);
+ if (irq_re_init) {
++ rc = bnxt_reserve_rings(bp);
++ if (rc)
++ return rc;
++
+ rc = bnxt_setup_int_mode(bp);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
+@@ -7978,16 +8196,20 @@ static int bnxt_set_dflt_rings(struct bn
+ bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+
+- rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
++ rc = __bnxt_reserve_rings(bp);
+ if (rc)
+ netdev_warn(bp->dev, "Unable to reserve tx rings\n");
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ if (sh)
+ bnxt_trim_dflt_sh_rings(bp);
+
+- bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+- bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
+- bp->tx_nr_rings + bp->rx_nr_rings;
++ /* Rings may have been trimmed, re-reserve the trimmed rings. */
++ if (bnxt_need_reserve_rings(bp)) {
++ rc = __bnxt_reserve_rings(bp);
++ if (rc)
++ netdev_warn(bp->dev, "2nd rings reservation failed.\n");
++ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
++ }
+ bp->num_stat_ctxs = bp->cp_nr_rings;
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ bp->rx_nr_rings++;
diff --git a/patches.drivers/bnxt_en-Improve-resource-accounting-for-SRIOV.patch b/patches.drivers/bnxt_en-Improve-resource-accounting-for-SRIOV.patch
new file mode 100644
index 0000000000..286e7bb5f0
--- /dev/null
+++ b/patches.drivers/bnxt_en-Improve-resource-accounting-for-SRIOV.patch
@@ -0,0 +1,49 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Sat, 31 Mar 2018 13:54:14 -0400
+Subject: bnxt_en: Improve resource accounting for SRIOV.
+Patch-mainline: v4.17-rc1
+Git-commit: 596f9d55feebdf31c03172fcc82cdec62bb969ea
+References: bsc#1086282 FATE#324873
+
+When VFs are created, the current code subtracts the maximum VF
+resources from the PF's pool. This under-estimates the resources
+remaining in the PF pool. Instead, we should subtract the minimum
+VF resources. The VF minimum resources are guaranteed to the VFs
+and only these should be subtracted from the PF's pool.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 18 ++++++++----------
+ 1 file changed, 8 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+@@ -510,18 +510,16 @@ static int bnxt_hwrm_func_vf_resc_cfg(st
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ if (pf->active_vfs) {
+- u16 n = 1;
++ u16 n = pf->active_vfs;
+
+- if (pf->vf_resv_strategy != BNXT_VF_RESV_STRATEGY_MINIMAL)
+- n = pf->active_vfs;
+-
+- hw_resc->max_tx_rings -= vf_tx_rings * n;
+- hw_resc->max_rx_rings -= vf_rx_rings * n;
+- hw_resc->max_hw_ring_grps -= vf_ring_grps * n;
+- hw_resc->max_cp_rings -= vf_cp_rings * n;
++ hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n;
++ hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n;
++ hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) *
++ n;
++ hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n;
+ hw_resc->max_rsscos_ctxs -= pf->active_vfs;
+- hw_resc->max_stat_ctxs -= vf_stat_ctx * n;
+- hw_resc->max_vnics -= vf_vnics * n;
++ hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
++ hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
+
+ rc = pf->active_vfs;
+ }
diff --git a/patches.drivers/bnxt_en-Improve-ring-allocation-logic.patch b/patches.drivers/bnxt_en-Improve-ring-allocation-logic.patch
new file mode 100644
index 0000000000..bd00856008
--- /dev/null
+++ b/patches.drivers/bnxt_en-Improve-ring-allocation-logic.patch
@@ -0,0 +1,150 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Sat, 31 Mar 2018 13:54:16 -0400
+Subject: bnxt_en: Improve ring allocation logic.
+Patch-mainline: v4.17-rc1
+Git-commit: 9899bb59ff08a50aef033b4d388d223adca58a7f
+References: bsc#1086282 FATE#324873
+
+Currently, the driver code makes some assumptions about the group index
+and the map index of rings. This makes the code more difficult to
+understand and less flexible.
+
+Improve it by adding the grp_idx and map_idx fields explicitly to the
+bnxt_ring_struct as a union. The grp_idx is initialized for each tx ring
+and rx agg ring during init. time. We do the same for the map_idx for
+each cmpl ring.
+
+The grp_idx ties the tx ring to the ring group. The map_idx is the
+doorbell index of the ring. With this new infrastructure, we can change
+the ring index mapping scheme easily in the future.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 32 +++++++++++++++---------------
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 4 +++
+ 2 files changed, 21 insertions(+), 15 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -2297,6 +2297,7 @@ static int bnxt_alloc_rx_rings(struct bn
+ if (rc)
+ return rc;
+
++ ring->grp_idx = i;
+ rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
+ mem_size = rxr->rx_agg_bmap_size / 8;
+ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+@@ -2369,6 +2370,7 @@ static int bnxt_alloc_tx_rings(struct bn
+ if (rc)
+ return rc;
+
++ ring->grp_idx = txr->bnapi->index;
+ if (bp->tx_push_size) {
+ dma_addr_t mapping;
+
+@@ -2438,6 +2440,7 @@ static int bnxt_alloc_cp_rings(struct bn
+ rc = bnxt_alloc_ring(bp, ring);
+ if (rc)
+ return rc;
++ ring->map_idx = i;
+ }
+ return 0;
+ }
+@@ -4228,12 +4231,12 @@ static int bnxt_hwrm_ring_grp_free(struc
+
+ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
+ struct bnxt_ring_struct *ring,
+- u32 ring_type, u32 map_index,
+- u32 stats_ctx_id)
++ u32 ring_type, u32 map_index)
+ {
+ int rc = 0, err = 0;
+ struct hwrm_ring_alloc_input req = {0};
+ struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
++ struct bnxt_ring_grp_info *grp_info;
+ u16 ring_id;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
+@@ -4255,10 +4258,10 @@ static int hwrm_ring_alloc_send_msg(stru
+ case HWRM_RING_ALLOC_TX:
+ req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
+ /* Association of transmit ring with completion ring */
+- req.cmpl_ring_id =
+- cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
++ grp_info = &bp->grp_info[ring->grp_idx];
++ req.cmpl_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
+ req.length = cpu_to_le32(bp->tx_ring_mask + 1);
+- req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
++ req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
+ req.queue_id = cpu_to_le16(ring->queue_id);
+ break;
+ case HWRM_RING_ALLOC_RX:
+@@ -4345,10 +4348,11 @@ static int bnxt_hwrm_ring_alloc(struct b
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
++ u32 map_idx = ring->map_idx;
+
+- cpr->cp_doorbell = bp->bar1 + i * 0x80;
+- rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
+- INVALID_STATS_CTX_ID);
++ cpr->cp_doorbell = bp->bar1 + map_idx * 0x80;
++ rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL,
++ map_idx);
+ if (rc)
+ goto err_out;
+ BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+@@ -4364,11 +4368,10 @@ static int bnxt_hwrm_ring_alloc(struct b
+ for (i = 0; i < bp->tx_nr_rings; i++) {
+ struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
+ struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
+- u32 map_idx = txr->bnapi->index;
+- u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
++ u32 map_idx = i;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
+- map_idx, fw_stats_ctx);
++ map_idx);
+ if (rc)
+ goto err_out;
+ txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
+@@ -4380,7 +4383,7 @@ static int bnxt_hwrm_ring_alloc(struct b
+ u32 map_idx = rxr->bnapi->index;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
+- map_idx, INVALID_STATS_CTX_ID);
++ map_idx);
+ if (rc)
+ goto err_out;
+ rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
+@@ -4393,13 +4396,12 @@ static int bnxt_hwrm_ring_alloc(struct b
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct bnxt_ring_struct *ring =
+ &rxr->rx_agg_ring_struct;
+- u32 grp_idx = rxr->bnapi->index;
++ u32 grp_idx = ring->grp_idx;
+ u32 map_idx = grp_idx + bp->rx_nr_rings;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring,
+ HWRM_RING_ALLOC_AGG,
+- map_idx,
+- INVALID_STATS_CTX_ID);
++ map_idx);
+ if (rc)
+ goto err_out;
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -571,6 +571,10 @@ struct bnxt_ring_struct {
+ void **vmem;
+
+ u16 fw_ring_id; /* Ring id filled by Chimp FW */
++ union {
++ u16 grp_idx;
++ u16 map_idx; /* Used by cmpl rings */
++ };
+ u8 queue_id;
+ };
+
diff --git a/patches.drivers/bnxt_en-Improve-valid-bit-checking-in-firmware-respo.patch b/patches.drivers/bnxt_en-Improve-valid-bit-checking-in-firmware-respo.patch
new file mode 100644
index 0000000000..f695c1d85d
--- /dev/null
+++ b/patches.drivers/bnxt_en-Improve-valid-bit-checking-in-firmware-respo.patch
@@ -0,0 +1,102 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Sat, 31 Mar 2018 13:54:15 -0400
+Subject: bnxt_en: Improve valid bit checking in firmware response message.
+Patch-mainline: v4.17-rc1
+Git-commit: 845adfe40c2a75e67ddae6639fc2b987338b7983
+References: bsc#1086282 FATE#324873
+
+When firmware sends a DMA response to the driver, the last byte of the
+message will be set to 1 to indicate that the whole response is valid.
+The driver waits for the message to be valid before reading the message.
+
+The firmware spec allows these response messages to increase in
+length by adding new fields to the end of these messages. The
+older spec's valid location may become a new field in a newer
+spec. To guarantee compatibility, the driver should zero the valid
+byte before interpreting the entire message so that any new fields not
+implemented by the older spec will be read as zero.
+
+For messages that are forwarded to VFs, we need to set the length
+and re-instate the valid bit so the VF will see the valid response.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 21 ++++++++++++++++-----
+ drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 2 ++
+ 2 files changed, 18 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -3397,7 +3397,8 @@ static int bnxt_hwrm_do_send_msg(struct
+ int i, intr_process, rc, tmo_count;
+ struct input *req = msg;
+ u32 *data = msg;
+- __le32 *resp_len, *valid;
++ __le32 *resp_len;
++ u8 *valid;
+ u16 cp_ring_id, len = 0;
+ struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
+ u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
+@@ -3449,6 +3450,7 @@ static int bnxt_hwrm_do_send_msg(struct
+
+ i = 0;
+ tmo_count = timeout * 40;
++ resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
+ if (intr_process) {
+ /* Wait until hwrm response cmpl interrupt is processed */
+ while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
+@@ -3461,9 +3463,11 @@ static int bnxt_hwrm_do_send_msg(struct
+ le16_to_cpu(req->req_type));
+ return -1;
+ }
++ len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
++ HWRM_RESP_LEN_SFT;
++ valid = bp->hwrm_cmd_resp_addr + len - 1;
+ } else {
+ /* Check if response len is updated */
+- resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
+ for (i = 0; i < tmo_count; i++) {
+ len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
+ HWRM_RESP_LEN_SFT;
+@@ -3479,10 +3483,12 @@ static int bnxt_hwrm_do_send_msg(struct
+ return -1;
+ }
+
+- /* Last word of resp contains valid bit */
+- valid = bp->hwrm_cmd_resp_addr + len - 4;
++ /* Last byte of resp contains valid bit */
++ valid = bp->hwrm_cmd_resp_addr + len - 1;
+ for (i = 0; i < 5; i++) {
+- if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
++ /* make sure we read from updated DMA memory */
++ dma_rmb();
++ if (*valid)
+ break;
+ udelay(1);
+ }
+@@ -3495,6 +3501,11 @@ static int bnxt_hwrm_do_send_msg(struct
+ }
+ }
+
++ /* Zero valid bit for compatibility. Valid bit in an older spec
++ * may become a new field in a newer spec. We must make sure that
++ * a new field not implemented by old spec will read zero.
++ */
++ *valid = 0;
+ rc = le16_to_cpu(resp->error_code);
+ if (rc && !silent)
+ netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+@@ -974,7 +974,9 @@ static int bnxt_vf_set_link(struct bnxt
+ memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp,
+ sizeof(phy_qcfg_resp));
+ mutex_unlock(&bp->hwrm_cmd_lock);
++ phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
+ phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
++ phy_qcfg_resp.valid = 1;
+
+ if (vf->flags & BNXT_VF_LINK_UP) {
+ /* if physical link is down, force link up on VF */
diff --git a/patches.drivers/bnxt_en-Include-additional-hardware-port-statistics-.patch b/patches.drivers/bnxt_en-Include-additional-hardware-port-statistics-.patch
new file mode 100644
index 0000000000..ec70ea0075
--- /dev/null
+++ b/patches.drivers/bnxt_en-Include-additional-hardware-port-statistics-.patch
@@ -0,0 +1,39 @@
+From: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Date: Sat, 31 Mar 2018 13:54:11 -0400
+Subject: bnxt_en: Include additional hardware port statistics in ethtool -S.
+Patch-mainline: v4.17-rc1
+Git-commit: 699efed00df0631e39a639b49e3b8e27e62e6c89
+References: bsc#1086282 FATE#324873
+
+Include additional hardware port statistics in ethtool -S, which
+are useful for debugging.
+
+Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -169,6 +169,8 @@ static const struct {
+ BNXT_RX_STATS_ENTRY(rx_bytes),
+ BNXT_RX_STATS_ENTRY(rx_runt_bytes),
+ BNXT_RX_STATS_ENTRY(rx_runt_frames),
++ BNXT_RX_STATS_ENTRY(rx_stat_discard),
++ BNXT_RX_STATS_ENTRY(rx_stat_err),
+
+ BNXT_TX_STATS_ENTRY(tx_64b_frames),
+ BNXT_TX_STATS_ENTRY(tx_65b_127b_frames),
+@@ -204,6 +206,9 @@ static const struct {
+ BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration),
+ BNXT_TX_STATS_ENTRY(tx_total_collisions),
+ BNXT_TX_STATS_ENTRY(tx_bytes),
++ BNXT_TX_STATS_ENTRY(tx_xthol_frames),
++ BNXT_TX_STATS_ENTRY(tx_stat_discard),
++ BNXT_TX_STATS_ENTRY(tx_stat_error),
+ };
+
+ #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
diff --git a/patches.drivers/bnxt_en-Increase-RING_IDLE-minimum-threshold-to-50.patch b/patches.drivers/bnxt_en-Increase-RING_IDLE-minimum-threshold-to-50.patch
new file mode 100644
index 0000000000..0d26c9a416
--- /dev/null
+++ b/patches.drivers/bnxt_en-Increase-RING_IDLE-minimum-threshold-to-50.patch
@@ -0,0 +1,29 @@
+From: Andy Gospodarek <gospo@broadcom.com>
+Date: Thu, 26 Apr 2018 17:44:38 -0400
+Subject: bnxt_en: Increase RING_IDLE minimum threshold to 50
+Patch-mainline: v4.18-rc1
+Git-commit: 05abe4ddf0010e15419f5a6758b5bf44b7790982
+References: bsc#1086282 FATE#324873
+
+This keeps the RING_IDLE flag set in hardware for higher coalesce
+settings by default and improved latency.
+
+Signed-off-by: Andy Gospodarek <gospo@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -7635,7 +7635,7 @@ static void bnxt_init_dflt_coal(struct b
+ coal->coal_bufs = 30;
+ coal->coal_ticks_irq = 1;
+ coal->coal_bufs_irq = 2;
+- coal->idle_thresh = 25;
++ coal->idle_thresh = 50;
+ coal->bufs_per_record = 2;
+ coal->budget = 64; /* NAPI budget */
+
diff --git a/patches.drivers/bnxt_en-Need-to-include-RDMA-rings-in-bnxt_check_rin.patch b/patches.drivers/bnxt_en-Need-to-include-RDMA-rings-in-bnxt_check_rin.patch
new file mode 100644
index 0000000000..a9ce87a60e
--- /dev/null
+++ b/patches.drivers/bnxt_en-Need-to-include-RDMA-rings-in-bnxt_check_rin.patch
@@ -0,0 +1,30 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Wed, 11 Apr 2018 11:50:17 -0400
+Subject: bnxt_en: Need to include RDMA rings in bnxt_check_rings().
+Patch-mainline: v4.17-rc1
+Git-commit: 11c3ec7bb940b6fa3f87f05f01b7f45eef08dfbb
+References: bsc#1086282 FATE#324873
+
+With recent changes to reserve both L2 and RDMA rings, we need to include
+the RDMA rings in bnxt_check_rings(). Otherwise we will under-estimate
+the rings we need during ethtool -L and may lead to failure.
+
+Fixes: fbcfc8e46741 ("bnxt_en: Reserve completion rings and MSIX for bnxt_re RDMA driver.")
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -7619,6 +7619,8 @@ int bnxt_check_rings(struct bnxt *bp, in
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx_rings <<= 1;
+ cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
++ if (bp->flags & BNXT_FLAG_NEW_RM)
++ cp += bnxt_get_ulp_msix_num(bp);
+ return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
+ vnics);
+ }
diff --git a/patches.drivers/bnxt_en-Pass-complete-VLAN-TCI-to-the-stack.patch b/patches.drivers/bnxt_en-Pass-complete-VLAN-TCI-to-the-stack.patch
new file mode 100644
index 0000000000..040aafa206
--- /dev/null
+++ b/patches.drivers/bnxt_en-Pass-complete-VLAN-TCI-to-the-stack.patch
@@ -0,0 +1,49 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Fri, 9 Mar 2018 23:46:06 -0500
+Subject: bnxt_en: Pass complete VLAN TCI to the stack.
+Patch-mainline: v4.16-rc7
+Git-commit: ed7bc602f60a653e5dea488e6917d9a75d6ac0dd
+References: bsc#1086282 FATE#324873
+
+When receiving a packet with VLAN tag, pass the entire 16-bit TCI to the
+stack when calling __vlan_hwaccel_put_tag(). The current code is only
+passing the 12-bit tag and it is missing the priority bits.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 4 ++--
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 +
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1439,7 +1439,7 @@ static inline struct sk_buff *bnxt_tpa_e
+ (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+ u16 vlan_proto = tpa_info->metadata >>
+ RX_CMP_FLAGS2_METADATA_TPID_SFT;
+- u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
++ u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
+
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
+ }
+@@ -1623,7 +1623,7 @@ static int bnxt_rx_pkt(struct bnxt *bp,
+ cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
+ (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
+ u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+- u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
++ u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
+ u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
+
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -187,6 +187,7 @@ struct rx_cmp_ext {
+ #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
+ #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4)
+ __le32 rx_cmp_meta_data;
++ #define RX_CMP_FLAGS2_METADATA_TCI_MASK 0xffff
+ #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff
+ #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000
+ #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16
diff --git a/patches.drivers/bnxt_en-Read-phy-eeprom-A2h-address-only-when-optica.patch b/patches.drivers/bnxt_en-Read-phy-eeprom-A2h-address-only-when-optica.patch
new file mode 100644
index 0000000000..5cb3667772
--- /dev/null
+++ b/patches.drivers/bnxt_en-Read-phy-eeprom-A2h-address-only-when-optica.patch
@@ -0,0 +1,91 @@
+From: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Date: Tue, 8 May 2018 03:18:40 -0400
+Subject: bnxt_en: Read phy eeprom A2h address only when optical diagnostics is
+ supported.
+Patch-mainline: v4.18-rc1
+Git-commit: 7328a23c063a9ecf56314fb9631889c1820bd0ce
+References: bsc#1086282 FATE#324873
+
+For SFP+ modules, 0xA2 page is available only when Diagnostic Monitoring
+Type [Address A0h, Byte 92] is implemented. Extend bnxt_get_module_info(),
+to read optical diagnostics support at offset 92(0x5c) and set eeprom_len
+length to ETH_MODULE_SFF_8436_LEN (to exclude A2 page), if dianostics is
+not supported.
+
+Also in bnxt_get_module_info(), module id is read from offset 0x5e which
+is not correct. It was working by accident, as offset was not effective
+without setting enables flag in the firmware request. SFP module id is
+present at location 0. Fix this by removing the offset and read it
+from location 0.
+
+Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 3 +--
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 20 ++++++++------------
+ 2 files changed, 9 insertions(+), 14 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1402,8 +1402,7 @@ struct bnxt {
+
+ #define I2C_DEV_ADDR_A0 0xa0
+ #define I2C_DEV_ADDR_A2 0xa2
+-#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
+-#define SFP_EEPROM_SFF_8472_COMP_SIZE 1
++#define SFF_DIAG_SUPPORT_OFFSET 0x5c
+ #define SFF_MODULE_ID_SFP 0x3
+ #define SFF_MODULE_ID_QSFP 0xc
+ #define SFF_MODULE_ID_QSFP_PLUS 0xd
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -2172,9 +2172,8 @@ static int bnxt_read_sfp_module_eeprom_i
+ static int bnxt_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+ {
++ u8 data[SFF_DIAG_SUPPORT_OFFSET + 1];
+ struct bnxt *bp = netdev_priv(dev);
+- struct hwrm_port_phy_i2c_read_input req = {0};
+- struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* No point in going further if phy status indicates
+@@ -2189,21 +2188,19 @@ static int bnxt_get_module_info(struct n
+ if (bp->hwrm_spec_code < 0x10202)
+ return -EOPNOTSUPP;
+
+- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
+- req.i2c_slave_addr = I2C_DEV_ADDR_A0;
+- req.page_number = 0;
+- req.page_offset = cpu_to_le16(SFP_EEPROM_SFF_8472_COMP_ADDR);
+- req.data_length = SFP_EEPROM_SFF_8472_COMP_SIZE;
+- req.port_id = cpu_to_le16(bp->pf.port_id);
+- mutex_lock(&bp->hwrm_cmd_lock);
+- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
++ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0, 0,
++ SFF_DIAG_SUPPORT_OFFSET + 1,
++ data);
+ if (!rc) {
+- u32 module_id = le32_to_cpu(output->data[0]);
++ u8 module_id = data[0];
++ u8 diag_supported = data[SFF_DIAG_SUPPORT_OFFSET];
+
+ switch (module_id) {
+ case SFF_MODULE_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
++ if (!diag_supported)
++ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+ case SFF_MODULE_ID_QSFP:
+ case SFF_MODULE_ID_QSFP_PLUS:
+@@ -2219,7 +2216,6 @@ static int bnxt_get_module_info(struct n
+ break;
+ }
+ }
+- mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+ }
+
diff --git a/patches.drivers/bnxt_en-Refactor-bnxt_close_nic.patch b/patches.drivers/bnxt_en-Refactor-bnxt_close_nic.patch
new file mode 100644
index 0000000000..7617d9bc50
--- /dev/null
+++ b/patches.drivers/bnxt_en-Refactor-bnxt_close_nic.patch
@@ -0,0 +1,70 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Wed, 17 Jan 2018 03:21:04 -0500
+Subject: bnxt_en: Refactor bnxt_close_nic().
+Patch-mainline: v4.16-rc1
+Git-commit: 86e953db0114f396f916344395160aa267bf2627
+References: bsc#1086282 FATE#324873
+
+Add a new __bnxt_close_nic() function to do all the work previously done
+in bnxt_close_nic() except waiting for SRIOV configuration. The new
+function will be used in the next patch as part of SRIOV cleanup.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 32 +++++++++++++++++-------------
+ 1 file changed, 19 insertions(+), 13 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -6451,23 +6451,13 @@ static bool bnxt_drv_busy(struct bnxt *b
+ test_bit(BNXT_STATE_READ_STATS, &bp->state));
+ }
+
+-int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
++static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
++ bool link_re_init)
+ {
+- int rc = 0;
+-
+-#ifdef CONFIG_BNXT_SRIOV
+- if (bp->sriov_cfg) {
+- rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
+- !bp->sriov_cfg,
+- BNXT_SRIOV_CFG_WAIT_TMO);
+- if (rc)
+- netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
+- }
+-
+ /* Close the VF-reps before closing PF */
+ if (BNXT_PF(bp))
+ bnxt_vf_reps_close(bp);
+-#endif
++
+ /* Change device state to avoid TX queue wake up's */
+ bnxt_tx_disable(bp);
+
+@@ -6490,6 +6480,22 @@ int bnxt_close_nic(struct bnxt *bp, bool
+ bnxt_del_napi(bp);
+ }
+ bnxt_free_mem(bp, irq_re_init);
++}
++
++int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
++{
++ int rc = 0;
++
++#ifdef CONFIG_BNXT_SRIOV
++ if (bp->sriov_cfg) {
++ rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
++ !bp->sriov_cfg,
++ BNXT_SRIOV_CFG_WAIT_TMO);
++ if (rc)
++ netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
++ }
++#endif
++ __bnxt_close_nic(bp, irq_re_init, link_re_init);
+ return rc;
+ }
+
diff --git a/patches.drivers/bnxt_en-Refactor-bnxt_need_reserve_rings.patch b/patches.drivers/bnxt_en-Refactor-bnxt_need_reserve_rings.patch
new file mode 100644
index 0000000000..bacae78431
--- /dev/null
+++ b/patches.drivers/bnxt_en-Refactor-bnxt_need_reserve_rings.patch
@@ -0,0 +1,105 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Sat, 31 Mar 2018 13:54:19 -0400
+Subject: bnxt_en: Refactor bnxt_need_reserve_rings().
+Patch-mainline: v4.17-rc1
+Git-commit: 4e41dc5deb6e5c36ac5f2e49575485920037b2aa
+References: bsc#1086282 FATE#324873
+
+Refactor bnxt_need_reserve_rings() slightly so that __bnxt_reserve_rings()
+can call it and remove some duplicated code.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 57 +++++++++++++-----------------
+ 1 file changed, 25 insertions(+), 32 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -4728,6 +4728,30 @@ static int bnxt_cp_rings_in_use(struct b
+ return cp;
+ }
+
++static bool bnxt_need_reserve_rings(struct bnxt *bp)
++{
++ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
++ int cp = bp->cp_nr_rings;
++ int rx = bp->rx_nr_rings;
++ int vnic = 1, grp = rx;
++
++ if (bp->hwrm_spec_code < 0x10601)
++ return false;
++
++ if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
++ return true;
++
++ if (bp->flags & BNXT_FLAG_RFS)
++ vnic = rx + 1;
++ if (bp->flags & BNXT_FLAG_AGG_RINGS)
++ rx <<= 1;
++ if ((bp->flags & BNXT_FLAG_NEW_RM) &&
++ (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
++ hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic))
++ return true;
++ return false;
++}
++
+ static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
+ bool shared);
+
+@@ -4741,7 +4765,7 @@ static int __bnxt_reserve_rings(struct b
+ bool sh = false;
+ int vnic = 1;
+
+- if (bp->hwrm_spec_code < 0x10601)
++ if (!bnxt_need_reserve_rings(bp))
+ return 0;
+
+ if (bp->flags & BNXT_FLAG_SHARED_RINGS)
+@@ -4750,14 +4774,7 @@ static int __bnxt_reserve_rings(struct b
+ vnic = rx + 1;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ rx <<= 1;
+-
+ grp = bp->rx_nr_rings;
+- if (tx == hw_resc->resv_tx_rings &&
+- (!(bp->flags & BNXT_FLAG_NEW_RM) ||
+- (rx == hw_resc->resv_rx_rings &&
+- grp == hw_resc->resv_hw_ring_grps &&
+- cp == hw_resc->resv_cp_rings && vnic == hw_resc->resv_vnics)))
+- return 0;
+
+ rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic);
+ if (rc)
+@@ -4801,30 +4818,6 @@ static int __bnxt_reserve_rings(struct b
+ return rc;
+ }
+
+-static bool bnxt_need_reserve_rings(struct bnxt *bp)
+-{
+- struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+- int rx = bp->rx_nr_rings;
+- int vnic = 1;
+-
+- if (bp->hwrm_spec_code < 0x10601)
+- return false;
+-
+- if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
+- return true;
+-
+- if (bp->flags & BNXT_FLAG_RFS)
+- vnic = rx + 1;
+- if (bp->flags & BNXT_FLAG_AGG_RINGS)
+- rx <<= 1;
+- if ((bp->flags & BNXT_FLAG_NEW_RM) &&
+- (hw_resc->resv_rx_rings != rx ||
+- hw_resc->resv_cp_rings != bp->cp_nr_rings ||
+- hw_resc->resv_vnics != vnic))
+- return true;
+- return false;
+-}
+-
+ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+ int ring_grps, int cp_rings, int vnics)
+ {
diff --git a/patches.drivers/bnxt_en-Refactor-hardware-resource-data-structures.patch b/patches.drivers/bnxt_en-Refactor-hardware-resource-data-structures.patch
new file mode 100644
index 0000000000..18ebf92e73
--- /dev/null
+++ b/patches.drivers/bnxt_en-Refactor-hardware-resource-data-structures.patch
@@ -0,0 +1,431 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Wed, 17 Jan 2018 03:21:06 -0500
+Subject: bnxt_en: Refactor hardware resource data structures.
+Patch-mainline: v4.16-rc1
+Git-commit: 6a4f29470569c5a158c1871a2f752ca22e433420
+References: bsc#1086282 FATE#324873
+
+In preparation for new firmware APIs to allocate hardware resources,
+add a new struct bnxt_hw_resc to hold various min, max and reserved
+resources. This new structure is common for PFs and VFs.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 126 ++++++------------------
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 37 ++++---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 50 +++++----
+ 3 files changed, 86 insertions(+), 127 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -4515,7 +4515,7 @@ static int bnxt_hwrm_reserve_tx_rings(st
+ rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ if (!rc)
+- bp->tx_reserved_rings = *tx_rings;
++ bp->hw_resc.resv_tx_rings = *tx_rings;
+ return rc;
+ }
+
+@@ -4741,6 +4741,8 @@ static int bnxt_hwrm_func_qcaps(struct b
+ int rc = 0;
+ struct hwrm_func_qcaps_input req = {0};
+ struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
++ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
++ u32 flags;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+@@ -4750,16 +4752,27 @@ static int bnxt_hwrm_func_qcaps(struct b
+ if (rc)
+ goto hwrm_func_qcaps_exit;
+
+- if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
++ flags = le32_to_cpu(resp->flags);
++ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
+ bp->flags |= BNXT_FLAG_ROCEV1_CAP;
+- if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
++ if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
+ bp->flags |= BNXT_FLAG_ROCEV2_CAP;
+
+ bp->tx_push_thresh = 0;
+- if (resp->flags &
+- cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
++ if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
+ bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
+
++ hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
++ hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
++ hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
++ hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
++ hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
++ if (!hw_resc->max_hw_ring_grps)
++ hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
++ hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
++ hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
++ hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
++
+ if (BNXT_PF(bp)) {
+ struct bnxt_pf_info *pf = &bp->pf;
+
+@@ -4767,16 +4780,6 @@ static int bnxt_hwrm_func_qcaps(struct b
+ pf->port_id = le16_to_cpu(resp->port_id);
+ bp->dev->dev_port = pf->port_id;
+ memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
+- pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+- pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+- pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+- pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+- pf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
+- if (!pf->max_hw_ring_grps)
+- pf->max_hw_ring_grps = pf->max_tx_rings;
+- pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+- pf->max_vnics = le16_to_cpu(resp->max_vnics);
+- pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+ pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
+ pf->max_vfs = le16_to_cpu(resp->max_vfs);
+ pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
+@@ -4785,26 +4788,13 @@ static int bnxt_hwrm_func_qcaps(struct b
+ pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
+ pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
+ pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
+- if (resp->flags &
+- cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED))
++ if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
+ bp->flags |= BNXT_FLAG_WOL_CAP;
+ } else {
+ #ifdef CONFIG_BNXT_SRIOV
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ vf->fw_fid = le16_to_cpu(resp->fid);
+-
+- vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
+- vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
+- vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
+- vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
+- vf->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
+- if (!vf->max_hw_ring_grps)
+- vf->max_hw_ring_grps = vf->max_tx_rings;
+- vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
+- vf->max_vnics = le16_to_cpu(resp->max_vnics);
+- vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
+-
+ memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
+ #endif
+ }
+@@ -5171,7 +5161,7 @@ static int bnxt_init_chip(struct bnxt *b
+ rc);
+ goto err_out;
+ }
+- if (bp->tx_reserved_rings != bp->tx_nr_rings) {
++ if (bp->hw_resc.resv_tx_rings != bp->tx_nr_rings) {
+ int tx = bp->tx_nr_rings;
+
+ if (bnxt_hwrm_reserve_tx_rings(bp, &tx) ||
+@@ -5399,79 +5389,45 @@ static int bnxt_setup_int_mode(struct bn
+ #ifdef CONFIG_RFS_ACCEL
+ static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
+ {
+-#if defined(CONFIG_BNXT_SRIOV)
+- if (BNXT_VF(bp))
+- return bp->vf.max_rsscos_ctxs;
+-#endif
+- return bp->pf.max_rsscos_ctxs;
++ return bp->hw_resc.max_rsscos_ctxs;
+ }
+
+ static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
+ {
+-#if defined(CONFIG_BNXT_SRIOV)
+- if (BNXT_VF(bp))
+- return bp->vf.max_vnics;
+-#endif
+- return bp->pf.max_vnics;
++ return bp->hw_resc.max_vnics;
+ }
+ #endif
+
+ unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
+ {
+-#if defined(CONFIG_BNXT_SRIOV)
+- if (BNXT_VF(bp))
+- return bp->vf.max_stat_ctxs;
+-#endif
+- return bp->pf.max_stat_ctxs;
++ return bp->hw_resc.max_stat_ctxs;
+ }
+
+ void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
+ {
+-#if defined(CONFIG_BNXT_SRIOV)
+- if (BNXT_VF(bp))
+- bp->vf.max_stat_ctxs = max;
+- else
+-#endif
+- bp->pf.max_stat_ctxs = max;
++ bp->hw_resc.max_stat_ctxs = max;
+ }
+
+ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
+ {
+-#if defined(CONFIG_BNXT_SRIOV)
+- if (BNXT_VF(bp))
+- return bp->vf.max_cp_rings;
+-#endif
+- return bp->pf.max_cp_rings;
++ return bp->hw_resc.max_cp_rings;
+ }
+
+ void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
+ {
+-#if defined(CONFIG_BNXT_SRIOV)
+- if (BNXT_VF(bp))
+- bp->vf.max_cp_rings = max;
+- else
+-#endif
+- bp->pf.max_cp_rings = max;
++ bp->hw_resc.max_cp_rings = max;
+ }
+
+ static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
+ {
+-#if defined(CONFIG_BNXT_SRIOV)
+- if (BNXT_VF(bp))
+- return min_t(unsigned int, bp->vf.max_irqs,
+- bp->vf.max_cp_rings);
+-#endif
+- return min_t(unsigned int, bp->pf.max_irqs, bp->pf.max_cp_rings);
++ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
++
++ return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
+ }
+
+ void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
+ {
+-#if defined(CONFIG_BNXT_SRIOV)
+- if (BNXT_VF(bp))
+- bp->vf.max_irqs = max_irqs;
+- else
+-#endif
+- bp->pf.max_irqs = max_irqs;
++ bp->hw_resc.max_irqs = max_irqs;
+ }
+
+ static int bnxt_init_msix(struct bnxt *bp)
+@@ -7864,24 +7820,14 @@ static int bnxt_get_max_irq(struct pci_d
+ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
+ int *max_cp)
+ {
++ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int max_ring_grps = 0;
+
+-#ifdef CONFIG_BNXT_SRIOV
+- if (!BNXT_PF(bp)) {
+- *max_tx = bp->vf.max_tx_rings;
+- *max_rx = bp->vf.max_rx_rings;
+- *max_cp = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
+- *max_cp = min_t(int, *max_cp, bp->vf.max_stat_ctxs);
+- max_ring_grps = bp->vf.max_hw_ring_grps;
+- } else
+-#endif
+- {
+- *max_tx = bp->pf.max_tx_rings;
+- *max_rx = bp->pf.max_rx_rings;
+- *max_cp = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
+- *max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
+- max_ring_grps = bp->pf.max_hw_ring_grps;
+- }
++ *max_tx = hw_resc->max_tx_rings;
++ *max_rx = hw_resc->max_rx_rings;
++ *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings);
++ *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
++ max_ring_grps = hw_resc->max_hw_ring_grps;
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
+ *max_cp -= 1;
+ *max_rx -= 2;
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -755,19 +755,35 @@ struct bnxt_vnic_info {
+ #define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
+ };
+
+-#if defined(CONFIG_BNXT_SRIOV)
+-struct bnxt_vf_info {
+- u16 fw_fid;
+- u8 mac_addr[ETH_ALEN];
++struct bnxt_hw_resc {
++ u16 min_rsscos_ctxs;
+ u16 max_rsscos_ctxs;
++ u16 min_cp_rings;
+ u16 max_cp_rings;
++ u16 resv_cp_rings;
++ u16 min_tx_rings;
+ u16 max_tx_rings;
++ u16 resv_tx_rings;
++ u16 min_rx_rings;
+ u16 max_rx_rings;
++ u16 resv_rx_rings;
++ u16 min_hw_ring_grps;
+ u16 max_hw_ring_grps;
++ u16 resv_hw_ring_grps;
++ u16 min_l2_ctxs;
+ u16 max_l2_ctxs;
+- u16 max_irqs;
++ u16 min_vnics;
+ u16 max_vnics;
++ u16 resv_vnics;
++ u16 min_stat_ctxs;
+ u16 max_stat_ctxs;
++ u16 max_irqs;
++};
++
++#if defined(CONFIG_BNXT_SRIOV)
++struct bnxt_vf_info {
++ u16 fw_fid;
++ u8 mac_addr[ETH_ALEN];
+ u16 vlan;
+ u32 flags;
+ #define BNXT_VF_QOS 0x1
+@@ -788,15 +804,6 @@ struct bnxt_pf_info {
+ u16 fw_fid;
+ u16 port_id;
+ u8 mac_addr[ETH_ALEN];
+- u16 max_rsscos_ctxs;
+- u16 max_cp_rings;
+- u16 max_tx_rings; /* HW assigned max tx rings for this PF */
+- u16 max_rx_rings; /* HW assigned max rx rings for this PF */
+- u16 max_hw_ring_grps;
+- u16 max_irqs;
+- u16 max_l2_ctxs;
+- u16 max_vnics;
+- u16 max_stat_ctxs;
+ u32 first_vf_id;
+ u16 active_vfs;
+ u16 max_vfs;
+@@ -1185,7 +1192,6 @@ struct bnxt {
+ int tx_nr_rings;
+ int tx_nr_rings_per_tc;
+ int tx_nr_rings_xdp;
+- int tx_reserved_rings;
+
+ int tx_wake_thresh;
+ int tx_push_thresh;
+@@ -1297,6 +1303,7 @@ struct bnxt {
+ #define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
+ #define BNXT_FLOW_STATS_SP_EVENT 15
+
++ struct bnxt_hw_resc hw_resc;
+ struct bnxt_pf_info pf;
+ #ifdef CONFIG_BNXT_SRIOV
+ int nr_vfs;
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+@@ -421,24 +421,28 @@ static int bnxt_hwrm_func_cfg(struct bnx
+ {
+ u32 rc = 0, mtu, i;
+ u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
+- u16 vf_ring_grps;
++ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
++ u16 vf_ring_grps, max_stat_ctxs;
+ struct hwrm_func_cfg_input req = {0};
+ struct bnxt_pf_info *pf = &bp->pf;
+ int total_vf_tx_rings = 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+
++ max_stat_ctxs = hw_resc->max_stat_ctxs;
++
+ /* Remaining rings are distributed equally amongs VF's for now */
+- vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs;
+- vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
++ vf_cp_rings = (hw_resc->max_cp_rings - bp->cp_nr_rings) / num_vfs;
++ vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+- vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
++ vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) /
+ num_vfs;
+ else
+- vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
+- vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
+- vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
+- vf_vnics = (pf->max_vnics - bp->nr_vnics) / num_vfs;
++ vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings) /
++ num_vfs;
++ vf_ring_grps = (hw_resc->max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
++ vf_tx_rings = (hw_resc->max_tx_rings - bp->tx_nr_rings) / num_vfs;
++ vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs;
+ vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
+
+ req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
+@@ -486,13 +490,13 @@ static int bnxt_hwrm_func_cfg(struct bnx
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ if (!rc) {
+- pf->max_tx_rings -= total_vf_tx_rings;
+- pf->max_rx_rings -= vf_rx_rings * num_vfs;
+- pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
+- pf->max_cp_rings -= vf_cp_rings * num_vfs;
+- pf->max_rsscos_ctxs -= num_vfs;
+- pf->max_stat_ctxs -= vf_stat_ctx * num_vfs;
+- pf->max_vnics -= vf_vnics * num_vfs;
++ hw_resc->max_tx_rings -= total_vf_tx_rings;
++ hw_resc->max_rx_rings -= vf_rx_rings * num_vfs;
++ hw_resc->max_hw_ring_grps -= vf_ring_grps * num_vfs;
++ hw_resc->max_cp_rings -= vf_cp_rings * num_vfs;
++ hw_resc->max_rsscos_ctxs -= num_vfs;
++ hw_resc->max_stat_ctxs -= vf_stat_ctx * num_vfs;
++ hw_resc->max_vnics -= vf_vnics * num_vfs;
+ }
+ return rc;
+ }
+@@ -501,6 +505,7 @@ static int bnxt_sriov_enable(struct bnxt
+ {
+ int rc = 0, vfs_supported;
+ int min_rx_rings, min_tx_rings, min_rss_ctxs;
++ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int tx_ok = 0, rx_ok = 0, rss_ok = 0;
+ int avail_cp, avail_stat;
+
+@@ -510,8 +515,8 @@ static int bnxt_sriov_enable(struct bnxt
+ */
+ vfs_supported = *num_vfs;
+
+- avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
+- avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
++ avail_cp = hw_resc->max_cp_rings - bp->cp_nr_rings;
++ avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs;
+ avail_cp = min_t(int, avail_cp, avail_stat);
+
+ while (vfs_supported) {
+@@ -520,23 +525,24 @@ static int bnxt_sriov_enable(struct bnxt
+ min_rss_ctxs = vfs_supported;
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+- if (bp->pf.max_rx_rings - bp->rx_nr_rings * 2 >=
++ if (hw_resc->max_rx_rings - bp->rx_nr_rings * 2 >=
+ min_rx_rings)
+ rx_ok = 1;
+ } else {
+- if (bp->pf.max_rx_rings - bp->rx_nr_rings >=
++ if (hw_resc->max_rx_rings - bp->rx_nr_rings >=
+ min_rx_rings)
+ rx_ok = 1;
+ }
+- if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
++ if (hw_resc->max_vnics - bp->nr_vnics < min_rx_rings ||
+ avail_cp < min_rx_rings)
+ rx_ok = 0;
+
+- if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
++ if (hw_resc->max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
+ avail_cp >= min_tx_rings)
+ tx_ok = 1;
+
+- if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
++ if (hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs >=
++ min_rss_ctxs)
+ rss_ok = 1;
+
+ if (tx_ok && rx_ok && rss_ok)
diff --git a/patches.drivers/bnxt_en-Refactor-the-functions-to-reserve-hardware-r.patch b/patches.drivers/bnxt_en-Refactor-the-functions-to-reserve-hardware-r.patch
new file mode 100644
index 0000000000..7ec926b064
--- /dev/null
+++ b/patches.drivers/bnxt_en-Refactor-the-functions-to-reserve-hardware-r.patch
@@ -0,0 +1,140 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Fri, 9 Mar 2018 23:46:03 -0500
+Subject: bnxt_en: Refactor the functions to reserve hardware rings.
+Patch-mainline: v4.16-rc7
+Git-commit: 4ed50ef4da4d113fe65d9f9d049c1ce7468e3ac1
+References: bsc#1086282 FATE#324873
+
+The bnxt_hwrm_reserve_{pf|vf}_rings() functions are very similar to
+the bnxt_hwrm_check_{pf|vf}_rings() functions. Refactor the former
+so that the latter can make use of common code in the next patch.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 85 ++++++++++++++++++------------
+ 1 file changed, 53 insertions(+), 32 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -4533,18 +4533,17 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt
+ return rc;
+ }
+
+-static int
+-bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
+- int ring_grps, int cp_rings, int vnics)
++static void
++__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
++ int tx_rings, int rx_rings, int ring_grps,
++ int cp_rings, int vnics)
+ {
+- struct hwrm_func_cfg_input req = {0};
+ u32 enables = 0;
+- int rc;
+
+- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
+- req.fid = cpu_to_le16(0xffff);
++ bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
++ req->fid = cpu_to_le16(0xffff);
+ enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+- req.num_tx_rings = cpu_to_le16(tx_rings);
++ req->num_tx_rings = cpu_to_le16(tx_rings);
+ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+ enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+@@ -4553,16 +4552,53 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *
+ FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+ enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+
+- req.num_rx_rings = cpu_to_le16(rx_rings);
+- req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+- req.num_cmpl_rings = cpu_to_le16(cp_rings);
+- req.num_stat_ctxs = req.num_cmpl_rings;
+- req.num_vnics = cpu_to_le16(vnics);
++ req->num_rx_rings = cpu_to_le16(rx_rings);
++ req->num_hw_ring_grps = cpu_to_le16(ring_grps);
++ req->num_cmpl_rings = cpu_to_le16(cp_rings);
++ req->num_stat_ctxs = req->num_cmpl_rings;
++ req->num_vnics = cpu_to_le16(vnics);
+ }
+- if (!enables)
++ req->enables = cpu_to_le32(enables);
++}
++
++static void
++__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
++ struct hwrm_func_vf_cfg_input *req, int tx_rings,
++ int rx_rings, int ring_grps, int cp_rings,
++ int vnics)
++{
++ u32 enables = 0;
++
++ bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
++ enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
++ enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
++ enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
++ FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
++ enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
++ enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
++
++ req->num_tx_rings = cpu_to_le16(tx_rings);
++ req->num_rx_rings = cpu_to_le16(rx_rings);
++ req->num_hw_ring_grps = cpu_to_le16(ring_grps);
++ req->num_cmpl_rings = cpu_to_le16(cp_rings);
++ req->num_stat_ctxs = req->num_cmpl_rings;
++ req->num_vnics = cpu_to_le16(vnics);
++
++ req->enables = cpu_to_le32(enables);
++}
++
++static int
++bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
++ int ring_grps, int cp_rings, int vnics)
++{
++ struct hwrm_func_cfg_input req = {0};
++ int rc;
++
++ __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
++ cp_rings, vnics);
++ if (!req.enables)
+ return 0;
+
+- req.enables = cpu_to_le32(enables);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return -ENOMEM;
+@@ -4579,7 +4615,6 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *
+ int ring_grps, int cp_rings, int vnics)
+ {
+ struct hwrm_func_vf_cfg_input req = {0};
+- u32 enables = 0;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
+@@ -4587,22 +4622,8 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *
+ return 0;
+ }
+
+- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+- enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
+- enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
+- enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
+- FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
+- enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
+- enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
+-
+- req.num_tx_rings = cpu_to_le16(tx_rings);
+- req.num_rx_rings = cpu_to_le16(rx_rings);
+- req.num_hw_ring_grps = cpu_to_le16(ring_grps);
+- req.num_cmpl_rings = cpu_to_le16(cp_rings);
+- req.num_stat_ctxs = req.num_cmpl_rings;
+- req.num_vnics = cpu_to_le16(vnics);
+-
+- req.enables = cpu_to_le32(enables);
++ __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
++ cp_rings, vnics);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return -ENOMEM;
diff --git a/patches.drivers/bnxt_en-Remap-TC-to-hardware-queues-when-configuring.patch b/patches.drivers/bnxt_en-Remap-TC-to-hardware-queues-when-configuring.patch
new file mode 100644
index 0000000000..aad62a0556
--- /dev/null
+++ b/patches.drivers/bnxt_en-Remap-TC-to-hardware-queues-when-configuring.patch
@@ -0,0 +1,158 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Thu, 26 Apr 2018 17:44:32 -0400
+Subject: bnxt_en: Remap TC to hardware queues when configuring PFC.
+Patch-mainline: v4.18-rc1
+Git-commit: d31cd579a45c44ede9e56c2f6d33537ba395a49b
+References: bsc#1086282 FATE#324873
+
+Initially, the MQPRIO TCs are mapped 1:1 directly to the hardware
+queues. Some of these hardware queues are configured to be lossless.
+When PFC is enabled on one of more TCs, we now need to remap the
+TCs that have PFC enabled to the lossless hardware queues.
+
+After remapping, we need to close and open the NIC for the new
+mapping to take effect. We also need to reprogram all ETS parameters.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c | 99 +++++++++++++++-----------
+ 1 file changed, 59 insertions(+), 40 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+@@ -173,44 +173,59 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(s
+ return 0;
+ }
+
+-static int bnxt_hwrm_queue_cfg(struct bnxt *bp, unsigned int lltc_mask)
++static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
+ {
+- struct hwrm_queue_cfg_input req = {0};
+- int i;
++ unsigned long qmap = 0;
++ int max = bp->max_tc;
++ int i, j, rc;
+
+- if (netif_running(bp->dev))
+- bnxt_tx_disable(bp);
++ /* Assign lossless TCs first */
++ for (i = 0, j = 0; i < max; ) {
++ if (lltc_mask & (1 << i)) {
++ if (BNXT_LLQ(bp->q_info[j].queue_profile)) {
++ bp->tc_to_qidx[i] = j;
++ __set_bit(j, &qmap);
++ i++;
++ }
++ j++;
++ continue;
++ }
++ i++;
++ }
+
+- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_CFG, -1, -1);
+- req.flags = cpu_to_le32(QUEUE_CFG_REQ_FLAGS_PATH_BIDIR);
+- req.enables = cpu_to_le32(QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE);
++ for (i = 0, j = 0; i < max; i++) {
++ if (lltc_mask & (1 << i))
++ continue;
++ j = find_next_zero_bit(&qmap, max, j);
++ bp->tc_to_qidx[i] = j;
++ __set_bit(j, &qmap);
++ j++;
++ }
+
+- /* Configure lossless queues to lossy first */
+- req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
+- for (i = 0; i < bp->max_tc; i++) {
+- if (BNXT_LLQ(bp->q_info[i].queue_profile)) {
+- req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
+- hwrm_send_message(bp, &req, sizeof(req),
+- HWRM_CMD_TIMEOUT);
+- bp->q_info[i].queue_profile =
+- QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY;
++ if (netif_running(bp->dev)) {
++ bnxt_close_nic(bp, false, false);
++ rc = bnxt_open_nic(bp, false, false);
++ if (rc) {
++ netdev_warn(bp->dev, "failed to open NIC, rc = %d\n", rc);
++ return rc;
+ }
+ }
++ if (bp->ieee_ets) {
++ int tc = netdev_get_num_tc(bp->dev);
+
+- /* Now configure desired queues to lossless */
+- req.service_profile = QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
+- for (i = 0; i < bp->max_tc; i++) {
+- if (lltc_mask & (1 << i)) {
+- req.queue_id = cpu_to_le32(bp->q_info[i].queue_id);
+- hwrm_send_message(bp, &req, sizeof(req),
+- HWRM_CMD_TIMEOUT);
+- bp->q_info[i].queue_profile =
+- QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS;
++ if (!tc)
++ tc = 1;
++ rc = bnxt_hwrm_queue_cos2bw_cfg(bp, bp->ieee_ets, tc);
++ if (rc) {
++ netdev_warn(bp->dev, "failed to config BW, rc = %d\n", rc);
++ return rc;
++ }
++ rc = bnxt_hwrm_queue_pri2cos_cfg(bp, bp->ieee_ets);
++ if (rc) {
++ netdev_warn(bp->dev, "failed to config prio, rc = %d\n", rc);
++ return rc;
+ }
+ }
+- if (netif_running(bp->dev))
+- bnxt_tx_enable(bp);
+-
+ return 0;
+ }
+
+@@ -220,7 +235,7 @@ static int bnxt_hwrm_queue_pfc_cfg(struc
+ struct ieee_ets *my_ets = bp->ieee_ets;
+ unsigned int tc_mask = 0, pri_mask = 0;
+ u8 i, pri, lltc_count = 0;
+- bool need_q_recfg = false;
++ bool need_q_remap = false;
+ int rc;
+
+ if (!my_ets)
+@@ -240,21 +255,25 @@ static int bnxt_hwrm_queue_pfc_cfg(struc
+ if (lltc_count > bp->max_lltc)
+ return -EINVAL;
+
+- bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
+- req.flags = cpu_to_le32(pri_mask);
+- rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+- if (rc)
+- return rc;
+-
+ for (i = 0; i < bp->max_tc; i++) {
+ if (tc_mask & (1 << i)) {
+- if (!BNXT_LLQ(bp->q_info[i].queue_profile))
+- need_q_recfg = true;
++ u8 qidx = bp->tc_to_qidx[i];
++
++ if (!BNXT_LLQ(bp->q_info[qidx].queue_profile)) {
++ need_q_remap = true;
++ break;
++ }
+ }
+ }
+
+- if (need_q_recfg)
+- rc = bnxt_hwrm_queue_cfg(bp, tc_mask);
++ if (need_q_remap)
++ rc = bnxt_queue_remap(bp, tc_mask);
++
++ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1);
++ req.flags = cpu_to_le32(pri_mask);
++ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
++ if (rc)
++ return rc;
+
+ return rc;
+ }
diff --git a/patches.drivers/bnxt_en-Reserve-RSS-and-L2-contexts-for-VF.patch b/patches.drivers/bnxt_en-Reserve-RSS-and-L2-contexts-for-VF.patch
new file mode 100644
index 0000000000..e12ca852a4
--- /dev/null
+++ b/patches.drivers/bnxt_en-Reserve-RSS-and-L2-contexts-for-VF.patch
@@ -0,0 +1,83 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Thu, 26 Apr 2018 17:44:43 -0400
+Subject: bnxt_en: Reserve RSS and L2 contexts for VF.
+Patch-mainline: v4.18-rc1
+Git-commit: 86c3380d9b1e2a3fcc87d34cea12991b81032b9f
+References: bsc#1086282 FATE#324873
+
+For completeness and correctness, the VF driver needs to reserve these
+RSS and L2 contexts.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 4 ++++
+ drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 10 +++++-----
+ drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h | 5 +++++
+ 3 files changed, 14 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -4687,6 +4687,10 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *
+
+ __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
+ cp_rings, vnics);
++ req.enables |= cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS |
++ FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS);
++ req.num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
++ req.num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ return -ENOMEM;
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+@@ -462,13 +462,13 @@ static int bnxt_hwrm_func_vf_resc_cfg(st
+ vf_vnics = hw_resc->max_vnics - bp->nr_vnics;
+ vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
+
+- req.min_rsscos_ctx = cpu_to_le16(1);
+- req.max_rsscos_ctx = cpu_to_le16(1);
++ req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX);
++ req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
+ if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL) {
+ req.min_cmpl_rings = cpu_to_le16(1);
+ req.min_tx_rings = cpu_to_le16(1);
+ req.min_rx_rings = cpu_to_le16(1);
+- req.min_l2_ctxs = cpu_to_le16(1);
++ req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MIN_L2_CTX);
+ req.min_vnics = cpu_to_le16(1);
+ req.min_stat_ctx = cpu_to_le16(1);
+ req.min_hw_ring_grps = cpu_to_le16(1);
+@@ -483,7 +483,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(st
+ req.min_cmpl_rings = cpu_to_le16(vf_cp_rings);
+ req.min_tx_rings = cpu_to_le16(vf_tx_rings);
+ req.min_rx_rings = cpu_to_le16(vf_rx_rings);
+- req.min_l2_ctxs = cpu_to_le16(4);
++ req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
+ req.min_vnics = cpu_to_le16(vf_vnics);
+ req.min_stat_ctx = cpu_to_le16(vf_stat_ctx);
+ req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+@@ -491,7 +491,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(st
+ req.max_cmpl_rings = cpu_to_le16(vf_cp_rings);
+ req.max_tx_rings = cpu_to_le16(vf_tx_rings);
+ req.max_rx_rings = cpu_to_le16(vf_rx_rings);
+- req.max_l2_ctxs = cpu_to_le16(4);
++ req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
+ req.max_vnics = cpu_to_le16(vf_vnics);
+ req.max_stat_ctx = cpu_to_le16(vf_stat_ctx);
+ req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps);
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+@@ -23,6 +23,11 @@
+ ((offsetof(struct hwrm_reject_fwd_resp_input, encap_request) + n) >\
+ offsetof(struct hwrm_reject_fwd_resp_input, encap_resp_target_id))
+
++#define BNXT_VF_MIN_RSS_CTX 1
++#define BNXT_VF_MAX_RSS_CTX 1
++#define BNXT_VF_MIN_L2_CTX 1
++#define BNXT_VF_MAX_L2_CTX 4
++
+ int bnxt_get_vf_config(struct net_device *, int, struct ifla_vf_info *);
+ int bnxt_set_vf_mac(struct net_device *, int, u8 *);
+ int bnxt_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
diff --git a/patches.drivers/bnxt_en-Reserve-completion-rings-and-MSIX-for-bnxt_r.patch b/patches.drivers/bnxt_en-Reserve-completion-rings-and-MSIX-for-bnxt_r.patch
new file mode 100644
index 0000000000..d311400c8e
--- /dev/null
+++ b/patches.drivers/bnxt_en-Reserve-completion-rings-and-MSIX-for-bnxt_r.patch
@@ -0,0 +1,219 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Sat, 31 Mar 2018 13:54:20 -0400
+Subject: bnxt_en: Reserve completion rings and MSIX for bnxt_re RDMA driver.
+Patch-mainline: v4.17-rc1
+Git-commit: fbcfc8e4674156cb7eb3d8054bd4332142d2cc58
+References: bsc#1086282 FATE#324873
+
+Add additional logic to reserve completion rings for the bnxt_re driver
+when it requests MSIX vectors. The function bnxt_cp_rings_in_use()
+will return the total number of completion rings used by both drivers
+that need to be reserved. If the network interface in up, we will
+close and open the NIC to reserve the new set of completion rings and
+re-initialize the vectors.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 39 ++++++++++++++++++++------
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 3 ++
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c | 39 +++++++++++++++++++++-----
+ 3 files changed, 65 insertions(+), 16 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -4731,7 +4731,7 @@ static int bnxt_cp_rings_in_use(struct b
+ static bool bnxt_need_reserve_rings(struct bnxt *bp)
+ {
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+- int cp = bp->cp_nr_rings;
++ int cp = bnxt_cp_rings_in_use(bp);
+ int rx = bp->rx_nr_rings;
+ int vnic = 1, grp = rx;
+
+@@ -4758,9 +4758,9 @@ static int bnxt_trim_rings(struct bnxt *
+ static int __bnxt_reserve_rings(struct bnxt *bp)
+ {
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
++ int cp = bnxt_cp_rings_in_use(bp);
+ int tx = bp->tx_nr_rings;
+ int rx = bp->rx_nr_rings;
+- int cp = bp->cp_nr_rings;
+ int grp, rx_rings, rc;
+ bool sh = false;
+ int vnic = 1;
+@@ -5853,7 +5853,7 @@ void bnxt_set_max_func_cp_rings(struct b
+ bp->hw_resc.max_cp_rings = max;
+ }
+
+-static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
++unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
+ {
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+
+@@ -5865,6 +5865,26 @@ void bnxt_set_max_func_irqs(struct bnxt
+ bp->hw_resc.max_irqs = max_irqs;
+ }
+
++int bnxt_get_avail_msix(struct bnxt *bp, int num)
++{
++ int max_cp = bnxt_get_max_func_cp_rings(bp);
++ int max_irq = bnxt_get_max_func_irqs(bp);
++ int total_req = bp->cp_nr_rings + num;
++ int max_idx, avail_msix;
++
++ max_idx = min_t(int, bp->total_irqs, max_cp);
++ avail_msix = max_idx - bp->cp_nr_rings;
++ if (!(bp->flags & BNXT_FLAG_NEW_RM) || avail_msix >= num)
++ return avail_msix;
++
++ if (max_irq < total_req) {
++ num = max_irq - bp->cp_nr_rings;
++ if (num <= 0)
++ return 0;
++ }
++ return num;
++}
++
+ static int bnxt_get_num_msix(struct bnxt *bp)
+ {
+ if (!(bp->flags & BNXT_FLAG_NEW_RM))
+@@ -5875,7 +5895,7 @@ static int bnxt_get_num_msix(struct bnxt
+
+ static int bnxt_init_msix(struct bnxt *bp)
+ {
+- int i, total_vecs, max, rc = 0, min = 1;
++ int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
+ struct msix_entry *msix_ent;
+
+ total_vecs = bnxt_get_num_msix(bp);
+@@ -5896,7 +5916,8 @@ static int bnxt_init_msix(struct bnxt *b
+ min = 2;
+
+ total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
+- if (total_vecs < 0) {
++ ulp_msix = bnxt_get_ulp_msix_num(bp);
++ if (total_vecs < 0 || total_vecs < ulp_msix) {
+ rc = -ENODEV;
+ goto msix_setup_exit;
+ }
+@@ -5909,7 +5930,7 @@ static int bnxt_init_msix(struct bnxt *b
+ bp->total_irqs = total_vecs;
+ /* Trim rings based upon num of vectors allocated */
+ rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
+- total_vecs, min == 1);
++ total_vecs - ulp_msix, min == 1);
+ if (rc)
+ goto msix_setup_exit;
+
+@@ -5973,9 +5994,8 @@ static void bnxt_clear_int_mode(struct b
+ bp->flags &= ~BNXT_FLAG_USING_MSIX;
+ }
+
+-static int bnxt_reserve_rings(struct bnxt *bp)
++int bnxt_reserve_rings(struct bnxt *bp)
+ {
+- int orig_cp = bp->hw_resc.resv_cp_rings;
+ int tcs = netdev_get_num_tc(bp->dev);
+ int rc;
+
+@@ -5987,7 +6007,8 @@ static int bnxt_reserve_rings(struct bnx
+ netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
+ return rc;
+ }
+- if ((bp->flags & BNXT_FLAG_NEW_RM) && bp->cp_nr_rings > orig_cp) {
++ if ((bp->flags & BNXT_FLAG_NEW_RM) &&
++ (bnxt_get_num_msix(bp) != bp->total_irqs)) {
+ bnxt_clear_int_mode(bp);
+ rc = bnxt_init_int_mode(bp);
+ if (rc)
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1444,7 +1444,10 @@ unsigned int bnxt_get_max_func_stat_ctxs
+ void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
+ unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
+ void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
++unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
+ void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max);
++int bnxt_get_avail_msix(struct bnxt *bp, int num);
++int bnxt_reserve_rings(struct bnxt *bp);
+ void bnxt_tx_disable(struct bnxt *bp);
+ void bnxt_tx_enable(struct bnxt *bp);
+ int bnxt_hwrm_set_pause(struct bnxt *);
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+@@ -108,6 +108,7 @@ static int bnxt_req_msix_vecs(struct bnx
+ struct bnxt *bp = netdev_priv(dev);
+ int max_idx, max_cp_rings;
+ int avail_msix, i, idx;
++ int rc = 0;
+
+ ASSERT_RTNL();
+ if (ulp_id != BNXT_ROCE_ULP)
+@@ -120,26 +121,46 @@ static int bnxt_req_msix_vecs(struct bnx
+ return -EAGAIN;
+
+ max_cp_rings = bnxt_get_max_func_cp_rings(bp);
+- max_idx = min_t(int, bp->total_irqs, max_cp_rings);
+- avail_msix = max_idx - bp->cp_nr_rings;
++ avail_msix = bnxt_get_avail_msix(bp, num_msix);
+ if (!avail_msix)
+ return -ENOMEM;
+ if (avail_msix > num_msix)
+ avail_msix = num_msix;
+
+- if (bp->flags & BNXT_FLAG_NEW_RM)
++ if (bp->flags & BNXT_FLAG_NEW_RM) {
+ idx = bp->cp_nr_rings;
+- else
++ } else {
++ max_idx = min_t(int, bp->total_irqs, max_cp_rings);
+ idx = max_idx - avail_msix;
++ }
+ edev->ulp_tbl[ulp_id].msix_base = idx;
++ edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
++ if (bp->total_irqs < (idx + avail_msix)) {
++ if (netif_running(dev)) {
++ bnxt_close_nic(bp, true, false);
++ rc = bnxt_open_nic(bp, true, false);
++ } else {
++ rc = bnxt_reserve_rings(bp);
++ }
++ }
++ if (rc) {
++ edev->ulp_tbl[ulp_id].msix_requested = 0;
++ return -EAGAIN;
++ }
++
++ if (bp->flags & BNXT_FLAG_NEW_RM) {
++ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
++
++ avail_msix = hw_resc->resv_cp_rings - bp->cp_nr_rings;
++ edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
++ }
+ for (i = 0; i < avail_msix; i++) {
+ ent[i].vector = bp->irq_tbl[idx + i].vector;
+ ent[i].ring_idx = idx + i;
+ ent[i].db_offset = (idx + i) * 0x80;
+ }
+- bnxt_set_max_func_irqs(bp, max_idx - avail_msix);
++ bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) - avail_msix);
+ bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
+- edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
+ return avail_msix;
+ }
+
+@@ -157,7 +178,11 @@ static int bnxt_free_msix_vecs(struct bn
+ msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
+ bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
+ edev->ulp_tbl[ulp_id].msix_requested = 0;
+- bnxt_set_max_func_irqs(bp, bp->total_irqs);
++ bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) + msix_requested);
++ if (netif_running(dev)) {
++ bnxt_close_nic(bp, true, false);
++ bnxt_open_nic(bp, true, false);
++ }
+ return 0;
+ }
+
diff --git a/patches.drivers/bnxt_en-Reserve-resources-for-RFS.patch b/patches.drivers/bnxt_en-Reserve-resources-for-RFS.patch
new file mode 100644
index 0000000000..ea11705a6a
--- /dev/null
+++ b/patches.drivers/bnxt_en-Reserve-resources-for-RFS.patch
@@ -0,0 +1,56 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Wed, 17 Jan 2018 03:21:10 -0500
+Subject: bnxt_en: Reserve resources for RFS.
+Patch-mainline: v4.16-rc1
+Git-commit: 6a1eef5b9079742ecfad647892669bd5fe6b0e3f
+References: bsc#1086282 FATE#324873
+
+In bnxt_rfs_capable(), add call to reserve vnic resources to support
+NTUPLE. Return true if we can successfully reserve enough vnics.
+Otherwise, reserve the minimum 1 VNIC for normal operations not
+supporting NTUPLE and return false.
+
+Also, suppress warning message about not enough resources for NTUPLE when
+only 1 RX ring is in use. NTUPLE filters by definition require multiple
+RX rings.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 21 +++++++++++++++++----
+ 1 file changed, 17 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -7003,13 +7003,26 @@ static bool bnxt_rfs_capable(struct bnxt
+ if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
+ max_rss_ctxs = max_vnics;
+ if (vnics > max_vnics || vnics > max_rss_ctxs) {
+- netdev_warn(bp->dev,
+- "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
+- min(max_rss_ctxs - 1, max_vnics - 1));
++ if (bp->rx_nr_rings > 1)
++ netdev_warn(bp->dev,
++ "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
++ min(max_rss_ctxs - 1, max_vnics - 1));
+ return false;
+ }
+
+- return true;
++ if (!(bp->flags & BNXT_FLAG_NEW_RM))
++ return true;
++
++ if (vnics == bp->hw_resc.resv_vnics)
++ return true;
++
++ bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, vnics);
++ if (vnics <= bp->hw_resc.resv_vnics)
++ return true;
++
++ netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
++ bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 1);
++ return false;
+ #else
+ return false;
+ #endif
diff --git a/patches.drivers/bnxt_en-Reserve-rings-at-driver-open-if-none-was-res.patch b/patches.drivers/bnxt_en-Reserve-rings-at-driver-open-if-none-was-res.patch
new file mode 100644
index 0000000000..2a483a42f1
--- /dev/null
+++ b/patches.drivers/bnxt_en-Reserve-rings-at-driver-open-if-none-was-res.patch
@@ -0,0 +1,75 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Thu, 26 Apr 2018 17:44:44 -0400
+Subject: bnxt_en: Reserve rings at driver open if none was reserved at probe
+ time.
+Patch-mainline: v4.18-rc1
+Git-commit: 47558acd56a74c1ac598093930a5559270bf8c09
+References: bsc#1086282 FATE#324873
+
+Add logic to reserve default rings at driver open time if none was
+reserved during probe time. This will happen when the PF driver did
+not provision minimum rings to the VF, due to more limited resources.
+
+Driver open will only succeed if some minimum rings can be reserved.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 31 ++++++++++++++++++++++++++++++
+ 1 file changed, 31 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -6776,6 +6776,8 @@ static void bnxt_preset_reg_win(struct b
+ }
+ }
+
++static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
++
+ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+ {
+ int rc = 0;
+@@ -6783,6 +6785,12 @@ static int __bnxt_open_nic(struct bnxt *
+ bnxt_preset_reg_win(bp);
+ netif_carrier_off(bp->dev);
+ if (irq_re_init) {
++ /* Reserve rings now if none were reserved at driver probe. */
++ rc = bnxt_init_dflt_ring_mode(bp);
++ if (rc) {
++ netdev_err(bp->dev, "Failed to reserve default rings at open\n");
++ return rc;
++ }
+ rc = bnxt_reserve_rings(bp);
+ if (rc)
+ return rc;
+@@ -8505,6 +8513,29 @@ static int bnxt_set_dflt_rings(struct bn
+ return rc;
+ }
+
++static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
++{
++ int rc;
++
++ if (bp->tx_nr_rings)
++ return 0;
++
++ rc = bnxt_set_dflt_rings(bp, true);
++ if (rc) {
++ netdev_err(bp->dev, "Not enough rings available.\n");
++ return rc;
++ }
++ rc = bnxt_init_int_mode(bp);
++ if (rc)
++ return rc;
++ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
++ if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
++ bp->flags |= BNXT_FLAG_RFS;
++ bp->dev->features |= NETIF_F_NTUPLE;
++ }
++ return 0;
++}
++
+ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
+ {
+ int rc;
diff --git a/patches.drivers/bnxt_en-Reserve-rings-in-bnxt_set_channels-if-device.patch b/patches.drivers/bnxt_en-Reserve-rings-in-bnxt_set_channels-if-device.patch
new file mode 100644
index 0000000000..c8a352dfdd
--- /dev/null
+++ b/patches.drivers/bnxt_en-Reserve-rings-in-bnxt_set_channels-if-device.patch
@@ -0,0 +1,32 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Thu, 26 Apr 2018 17:44:41 -0400
+Subject: bnxt_en: Reserve rings in bnxt_set_channels() if device is down.
+Patch-mainline: v4.18-rc1
+Git-commit: d8c09f19accb89fc08b246339abb005455e4c846
+References: bsc#1086282 FATE#324873
+
+The current code does not reserve rings during ethtool -L when the device
+is down. The rings will be reserved when the device is later opened.
+
+Change it to reserve rings during ethtool -L when the device is down.
+This provides a better guarantee that the device open will be successful
+when the rings are reserved ahead of time.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -572,6 +572,8 @@ static int bnxt_set_channels(struct net_
+ * to renable
+ */
+ }
++ } else {
++ rc = bnxt_reserve_rings(bp);
+ }
+
+ return rc;
diff --git a/patches.drivers/bnxt_en-Restore-MSIX-after-disabling-SRIOV.patch b/patches.drivers/bnxt_en-Restore-MSIX-after-disabling-SRIOV.patch
new file mode 100644
index 0000000000..b3d13ea9f7
--- /dev/null
+++ b/patches.drivers/bnxt_en-Restore-MSIX-after-disabling-SRIOV.patch
@@ -0,0 +1,61 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Wed, 17 Jan 2018 03:21:05 -0500
+Subject: bnxt_en: Restore MSIX after disabling SRIOV.
+Patch-mainline: v4.16-rc1
+Git-commit: 80fcaf46c09262a71f32bb577c976814c922f864
+References: bsc#1086282 FATE#324873
+
+After SRIOV has been enabled and disabled, the MSIX vectors assigned to
+the VFs have to be re-initialized. Otherwise they cannot be re-used by
+the PF. For example, increasing the number of PF rings after disabling
+SRIOV may fail if the PF uses MSIX vectors previously assigned to the VFs.
+
+To fix this, we add logic in bnxt_restore_pf_fw_resources() to close the
+NIC, clear and re-init MSIX, and re-open the NIC.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 16 ++++++++++++++--
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 +-
+ 2 files changed, 15 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -7977,11 +7977,23 @@ static int bnxt_set_dflt_rings(struct bn
+ return rc;
+ }
+
+-void bnxt_restore_pf_fw_resources(struct bnxt *bp)
++int bnxt_restore_pf_fw_resources(struct bnxt *bp)
+ {
++ int rc;
++
+ ASSERT_RTNL();
++ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
++ return 0;
++
+ bnxt_hwrm_func_qcaps(bp);
+- bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
++ __bnxt_close_nic(bp, true, false);
++ bnxt_clear_int_mode(bp);
++ rc = bnxt_init_int_mode(bp);
++ if (rc)
++ dev_close(bp->dev);
++ else
++ rc = bnxt_open_nic(bp, true, false);
++ return rc;
+ }
+
+ static int bnxt_init_mac_addr(struct bnxt *bp)
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1421,6 +1421,6 @@ int bnxt_check_rings(struct bnxt *bp, in
+ int tx_xdp);
+ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
+ int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
+-void bnxt_restore_pf_fw_resources(struct bnxt *bp);
++int bnxt_restore_pf_fw_resources(struct bnxt *bp);
+ int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr);
+ #endif
diff --git a/patches.drivers/bnxt_en-Set-initial-default-RX-and-TX-ring-numbers-t.patch b/patches.drivers/bnxt_en-Set-initial-default-RX-and-TX-ring-numbers-t.patch
new file mode 100644
index 0000000000..6a7e7defba
--- /dev/null
+++ b/patches.drivers/bnxt_en-Set-initial-default-RX-and-TX-ring-numbers-t.patch
@@ -0,0 +1,64 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Wed, 17 Jan 2018 03:21:08 -0500
+Subject: bnxt_en: Set initial default RX and TX ring numbers the same in
+ combined mode.
+Patch-mainline: v4.16-rc1
+Git-commit: 58ea801ac4c166cdcaa399ce7f9b3e9095ff2842
+References: bsc#1086282 FATE#324873
+
+In combined mode, the driver is currently not setting RX and TX ring
+numbers the same when firmware can allocate more RX than TX or vice versa.
+This will confuse the user as the ethtool convention assumes they are the
+same in combined mode. Fix it by adding bnxt_trim_dflt_sh_rings() to trim
+RX and TX ring numbers to be the same as the completion ring number in
+combined mode.
+
+Note that if TCs are enabled and/or XDP is enabled, the number of TX rings
+will not be the same as RX rings in combined mode.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -7946,6 +7946,17 @@ static int bnxt_get_dflt_rings(struct bn
+ return rc;
+ }
+
++/* In initial default shared ring setting, each shared ring must have a
++ * RX/TX ring pair.
++ */
++static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
++{
++ bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
++ bp->rx_nr_rings = bp->cp_nr_rings;
++ bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
++ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
++}
++
+ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
+ {
+ int dflt_rings, max_rx_rings, max_tx_rings, rc;
+@@ -7961,10 +7972,18 @@ static int bnxt_set_dflt_rings(struct bn
+ return rc;
+ bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
+ bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
++ if (sh)
++ bnxt_trim_dflt_sh_rings(bp);
++ else
++ bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
++ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+
+ rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
+ if (rc)
+ netdev_warn(bp->dev, "Unable to reserve tx rings\n");
++ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
++ if (sh)
++ bnxt_trim_dflt_sh_rings(bp);
+
+ bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
diff --git a/patches.drivers/bnxt_en-Simplify-ring-alloc-free-error-messages.patch b/patches.drivers/bnxt_en-Simplify-ring-alloc-free-error-messages.patch
new file mode 100644
index 0000000000..fbe3a6d869
--- /dev/null
+++ b/patches.drivers/bnxt_en-Simplify-ring-alloc-free-error-messages.patch
@@ -0,0 +1,76 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Thu, 26 Apr 2018 17:44:35 -0400
+Subject: bnxt_en: Simplify ring alloc/free error messages.
+Patch-mainline: v4.18-rc1
+Git-commit: 2727c888f2f8bef071e9a07d6e2f018840d0a834
+References: bsc#1086282 FATE#324873
+
+Replace switch statements printing different messages for every ring type
+with a common message.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 43 ++++--------------------------
+ 1 file changed, 6 insertions(+), 37 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -4311,26 +4311,9 @@ static int hwrm_ring_alloc_send_msg(stru
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (rc || err) {
+- switch (ring_type) {
+- case RING_FREE_REQ_RING_TYPE_L2_CMPL:
+- netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
+- rc, err);
+- return -1;
+-
+- case RING_FREE_REQ_RING_TYPE_RX:
+- netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
+- rc, err);
+- return -1;
+-
+- case RING_FREE_REQ_RING_TYPE_TX:
+- netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
+- rc, err);
+- return -1;
+-
+- default:
+- netdev_err(bp->dev, "Invalid ring\n");
+- return -1;
+- }
++ netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
++ ring_type, rc, err);
++ return -EIO;
+ }
+ ring->fw_ring_id = ring_id;
+ return rc;
+@@ -4454,23 +4437,9 @@ static int hwrm_ring_free_send_msg(struc
+ mutex_unlock(&bp->hwrm_cmd_lock);
+
+ if (rc || error_code) {
+- switch (ring_type) {
+- case RING_FREE_REQ_RING_TYPE_L2_CMPL:
+- netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
+- rc);
+- return rc;
+- case RING_FREE_REQ_RING_TYPE_RX:
+- netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
+- rc);
+- return rc;
+- case RING_FREE_REQ_RING_TYPE_TX:
+- netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
+- rc);
+- return rc;
+- default:
+- netdev_err(bp->dev, "Invalid ring\n");
+- return -1;
+- }
++ netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
++ ring_type, rc, error_code);
++ return -EIO;
+ }
+ return 0;
+ }
diff --git a/patches.drivers/bnxt_en-Support-max-mtu-with-VF-reps.patch b/patches.drivers/bnxt_en-Support-max-mtu-with-VF-reps.patch
new file mode 100644
index 0000000000..5a7d6761f4
--- /dev/null
+++ b/patches.drivers/bnxt_en-Support-max-mtu-with-VF-reps.patch
@@ -0,0 +1,76 @@
+From: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
+Date: Wed, 11 Apr 2018 11:50:16 -0400
+Subject: bnxt_en: Support max-mtu with VF-reps
+Patch-mainline: v4.17-rc1
+Git-commit: 9d96465b111edd6c4f94345783e6e01db7f435d6
+References: bsc#1086282 FATE#324873
+
+While a VF is configured with a bigger mtu (> 1500), any packets that
+are punted to the VF-rep (slow-path) get dropped by OVS kernel-datapath
+with the following message: "dropped over-mtu packet". Fix this by
+returning the max-mtu value for a VF-rep derived from its corresponding VF.
+VF-rep's mtu can be changed using 'ip' command as shown in this example:
+
+ $ ip link set bnxt0_pf0vf0 mtu 9000
+
+Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c | 30 ++++++++++++++++++++++++++
+ 1 file changed, 30 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+@@ -64,6 +64,31 @@ static int hwrm_cfa_vfr_free(struct bnxt
+ return rc;
+ }
+
++static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
++ u16 *max_mtu)
++{
++ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
++ struct hwrm_func_qcfg_input req = {0};
++ u16 mtu;
++ int rc;
++
++ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
++ req.fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid);
++
++ mutex_lock(&bp->hwrm_cmd_lock);
++
++ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
++ if (!rc) {
++ mtu = le16_to_cpu(resp->max_mtu_configured);
++ if (!mtu)
++ *max_mtu = BNXT_MAX_MTU;
++ else
++ *max_mtu = mtu;
++ }
++ mutex_unlock(&bp->hwrm_cmd_lock);
++ return rc;
++}
++
+ static int bnxt_vf_rep_open(struct net_device *dev)
+ {
+ struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
+@@ -330,6 +355,7 @@ static void bnxt_vf_rep_netdev_init(stru
+ struct net_device *dev)
+ {
+ struct net_device *pf_dev = bp->dev;
++ u16 max_mtu;
+
+ dev->netdev_ops = &bnxt_vf_rep_netdev_ops;
+ dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops;
+@@ -345,6 +371,10 @@ static void bnxt_vf_rep_netdev_init(stru
+ bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx,
+ dev->perm_addr);
+ ether_addr_copy(dev->dev_addr, dev->perm_addr);
++ /* Set VF-Rep's max-mtu to the corresponding VF's max-mtu */
++ if (!bnxt_hwrm_vfr_qcfg(bp, vf_rep, &max_mtu))
++ dev->max_mtu = max_mtu;
++ dev->min_mtu = ETH_ZLEN;
+ }
+
+ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
diff --git a/patches.drivers/bnxt_en-Update-firmware-interface-to-1.9.0.patch b/patches.drivers/bnxt_en-Update-firmware-interface-to-1.9.0.patch
new file mode 100644
index 0000000000..20d0537425
--- /dev/null
+++ b/patches.drivers/bnxt_en-Update-firmware-interface-to-1.9.0.patch
@@ -0,0 +1,12126 @@
+From: Michael Chan <michael.chan@broadcom.com>
+Date: Wed, 17 Jan 2018 03:21:03 -0500
+Subject: bnxt_en: Update firmware interface to 1.9.0.
+Patch-mainline: v4.16-rc1
+Git-commit: 894aa69a90932907f3de9d849ab9970884151d0e
+References: bsc#1086282 FATE#324873
+
+The version has new firmware APIs to allocate PF/VF resources more
+flexibly.
+
+New toolchains were used to generate this file, resulting in a one-time
+large diffstat.
+
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 19
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 6
+ drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h |11224 ++++++++++++--------------
+ 3 files changed, 5498 insertions(+), 5751 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -1,7 +1,7 @@
+ /* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+- * Copyright (c) 2016-2017 Broadcom Limited
++ * Copyright (c) 2016-2018 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+@@ -4883,23 +4883,24 @@ static int bnxt_hwrm_ver_get(struct bnxt
+
+ memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
+
+- bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
+- resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
+- if (resp->hwrm_intf_maj < 1) {
++ bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
++ resp->hwrm_intf_min_8b << 8 |
++ resp->hwrm_intf_upd_8b;
++ if (resp->hwrm_intf_maj_8b < 1) {
+ netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
+- resp->hwrm_intf_maj, resp->hwrm_intf_min,
+- resp->hwrm_intf_upd);
++ resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
++ resp->hwrm_intf_upd_8b);
+ netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
+ }
+ snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
+- resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
+- resp->hwrm_fw_rsvd);
++ resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
++ resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
+
+ bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
+ if (!bp->hwrm_cmd_timeout)
+ bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
+
+- if (resp->hwrm_intf_maj >= 1)
++ if (resp->hwrm_intf_maj_8b >= 1)
+ bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
+
+ bp->chip_num = le16_to_cpu(resp->chip_num);
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -1,7 +1,7 @@
+ /* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+- * Copyright (c) 2016-2017 Broadcom Limited
++ * Copyright (c) 2016-2018 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+@@ -12,10 +12,10 @@
+ #define BNXT_H
+
+ #define DRV_MODULE_NAME "bnxt_en"
+-#define DRV_MODULE_VERSION "1.8.0"
++#define DRV_MODULE_VERSION "1.9.0"
+
+ #define DRV_VER_MAJ 1
+-#define DRV_VER_MIN 8
++#define DRV_VER_MIN 9
+ #define DRV_VER_UPD 0
+
+ #include <linux/interrupt.h>
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+@@ -1,2437 +1,2700 @@
+ /* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2014-2016 Broadcom Corporation
+- * Copyright (c) 2016-2017 Broadcom Limited
++ * Copyright (c) 2016-2018 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
++ *
++ * DO NOT MODIFY!!! This file is automatically generated.
+ */
+
+-#ifndef BNXT_HSI_H
+-#define BNXT_HSI_H
++#ifndef _BNXT_HSI_H_
++#define _BNXT_HSI_H_
+
+-/* HSI and HWRM Specification 1.8.3 */
+-#define HWRM_VERSION_MAJOR 1
+-#define HWRM_VERSION_MINOR 8
+-#define HWRM_VERSION_UPDATE 3
+-
+-#define HWRM_VERSION_RSVD 1 /* non-zero means beta version */
+-
+-#define HWRM_VERSION_STR "1.8.3.1"
+-/*
+- * Following is the signature for HWRM message field that indicates not
+- * applicable (All F's). Need to cast it the size of the field if needed.
+- */
+-#define HWRM_NA_SIGNATURE ((__le32)(-1))
+-#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */
+-#define HWRM_MAX_RESP_LEN (280) /* hwrm_selftest_qlist */
+-#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */
+-#define HW_HASH_KEY_SIZE 40
+-#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */
++/* hwrm_cmd_hdr (size:128b/16B) */
++struct hwrm_cmd_hdr {
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++};
++
++/* hwrm_resp_hdr (size:64b/8B) */
++struct hwrm_resp_hdr {
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++};
++
++#define CMD_DISCR_TLV_ENCAP 0x8000UL
++#define CMD_DISCR_LAST CMD_DISCR_TLV_ENCAP
++
++
++#define TLV_TYPE_HWRM_REQUEST 0x1UL
++#define TLV_TYPE_HWRM_RESPONSE 0x2UL
++#define TLV_TYPE_ROCE_SP_COMMAND 0x3UL
++#define TLV_TYPE_ENGINE_CKV_DEVICE_SERIAL_NUMBER 0x8001UL
++#define TLV_TYPE_ENGINE_CKV_NONCE 0x8002UL
++#define TLV_TYPE_ENGINE_CKV_IV 0x8003UL
++#define TLV_TYPE_ENGINE_CKV_AUTH_TAG 0x8004UL
++#define TLV_TYPE_ENGINE_CKV_CIPHERTEXT 0x8005UL
++#define TLV_TYPE_ENGINE_CKV_ALGORITHMS 0x8006UL
++#define TLV_TYPE_ENGINE_CKV_ECC_PUBLIC_KEY 0x8007UL
++#define TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE 0x8008UL
++#define TLV_TYPE_LAST TLV_TYPE_ENGINE_CKV_ECDSA_SIGNATURE
++
++
++/* tlv (size:64b/8B) */
++struct tlv {
++ __le16 cmd_discr;
++ u8 reserved_8b;
++ u8 flags;
++ #define TLV_FLAGS_MORE 0x1UL
++ #define TLV_FLAGS_MORE_LAST 0x0UL
++ #define TLV_FLAGS_MORE_NOT_LAST 0x1UL
++ #define TLV_FLAGS_REQUIRED 0x2UL
++ #define TLV_FLAGS_REQUIRED_NO (0x0UL << 1)
++ #define TLV_FLAGS_REQUIRED_YES (0x1UL << 1)
++ #define TLV_FLAGS_REQUIRED_LAST TLV_FLAGS_REQUIRED_YES
++ __le16 tlv_type;
++ __le16 length;
++};
++
++/* input (size:128b/16B) */
++struct input {
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++};
++
++/* output (size:64b/8B) */
++struct output {
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++};
++
++/* hwrm_short_input (size:128b/16B) */
++struct hwrm_short_input {
++ __le16 req_type;
++ __le16 signature;
++ #define SHORT_REQ_SIGNATURE_SHORT_CMD 0x4321UL
++ #define SHORT_REQ_SIGNATURE_LAST SHORT_REQ_SIGNATURE_SHORT_CMD
++ __le16 unused_0;
++ __le16 size;
++ __le64 req_addr;
++};
++
++/* cmd_nums (size:64b/8B) */
++struct cmd_nums {
++ __le16 req_type;
++ #define HWRM_VER_GET 0x0UL
++ #define HWRM_FUNC_BUF_UNRGTR 0xeUL
++ #define HWRM_FUNC_VF_CFG 0xfUL
++ #define HWRM_RESERVED1 0x10UL
++ #define HWRM_FUNC_RESET 0x11UL
++ #define HWRM_FUNC_GETFID 0x12UL
++ #define HWRM_FUNC_VF_ALLOC 0x13UL
++ #define HWRM_FUNC_VF_FREE 0x14UL
++ #define HWRM_FUNC_QCAPS 0x15UL
++ #define HWRM_FUNC_QCFG 0x16UL
++ #define HWRM_FUNC_CFG 0x17UL
++ #define HWRM_FUNC_QSTATS 0x18UL
++ #define HWRM_FUNC_CLR_STATS 0x19UL
++ #define HWRM_FUNC_DRV_UNRGTR 0x1aUL
++ #define HWRM_FUNC_VF_RESC_FREE 0x1bUL
++ #define HWRM_FUNC_VF_VNIC_IDS_QUERY 0x1cUL
++ #define HWRM_FUNC_DRV_RGTR 0x1dUL
++ #define HWRM_FUNC_DRV_QVER 0x1eUL
++ #define HWRM_FUNC_BUF_RGTR 0x1fUL
++ #define HWRM_PORT_PHY_CFG 0x20UL
++ #define HWRM_PORT_MAC_CFG 0x21UL
++ #define HWRM_PORT_TS_QUERY 0x22UL
++ #define HWRM_PORT_QSTATS 0x23UL
++ #define HWRM_PORT_LPBK_QSTATS 0x24UL
++ #define HWRM_PORT_CLR_STATS 0x25UL
++ #define HWRM_PORT_LPBK_CLR_STATS 0x26UL
++ #define HWRM_PORT_PHY_QCFG 0x27UL
++ #define HWRM_PORT_MAC_QCFG 0x28UL
++ #define HWRM_PORT_MAC_PTP_QCFG 0x29UL
++ #define HWRM_PORT_PHY_QCAPS 0x2aUL
++ #define HWRM_PORT_PHY_I2C_WRITE 0x2bUL
++ #define HWRM_PORT_PHY_I2C_READ 0x2cUL
++ #define HWRM_PORT_LED_CFG 0x2dUL
++ #define HWRM_PORT_LED_QCFG 0x2eUL
++ #define HWRM_PORT_LED_QCAPS 0x2fUL
++ #define HWRM_QUEUE_QPORTCFG 0x30UL
++ #define HWRM_QUEUE_QCFG 0x31UL
++ #define HWRM_QUEUE_CFG 0x32UL
++ #define HWRM_FUNC_VLAN_CFG 0x33UL
++ #define HWRM_FUNC_VLAN_QCFG 0x34UL
++ #define HWRM_QUEUE_PFCENABLE_QCFG 0x35UL
++ #define HWRM_QUEUE_PFCENABLE_CFG 0x36UL
++ #define HWRM_QUEUE_PRI2COS_QCFG 0x37UL
++ #define HWRM_QUEUE_PRI2COS_CFG 0x38UL
++ #define HWRM_QUEUE_COS2BW_QCFG 0x39UL
++ #define HWRM_QUEUE_COS2BW_CFG 0x3aUL
++ #define HWRM_QUEUE_DSCP_QCAPS 0x3bUL
++ #define HWRM_QUEUE_DSCP2PRI_QCFG 0x3cUL
++ #define HWRM_QUEUE_DSCP2PRI_CFG 0x3dUL
++ #define HWRM_VNIC_ALLOC 0x40UL
++ #define HWRM_VNIC_FREE 0x41UL
++ #define HWRM_VNIC_CFG 0x42UL
++ #define HWRM_VNIC_QCFG 0x43UL
++ #define HWRM_VNIC_TPA_CFG 0x44UL
++ #define HWRM_VNIC_TPA_QCFG 0x45UL
++ #define HWRM_VNIC_RSS_CFG 0x46UL
++ #define HWRM_VNIC_RSS_QCFG 0x47UL
++ #define HWRM_VNIC_PLCMODES_CFG 0x48UL
++ #define HWRM_VNIC_PLCMODES_QCFG 0x49UL
++ #define HWRM_VNIC_QCAPS 0x4aUL
++ #define HWRM_RING_ALLOC 0x50UL
++ #define HWRM_RING_FREE 0x51UL
++ #define HWRM_RING_CMPL_RING_QAGGINT_PARAMS 0x52UL
++ #define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS 0x53UL
++ #define HWRM_RING_RESET 0x5eUL
++ #define HWRM_RING_GRP_ALLOC 0x60UL
++ #define HWRM_RING_GRP_FREE 0x61UL
++ #define HWRM_RESERVED5 0x64UL
++ #define HWRM_RESERVED6 0x65UL
++ #define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC 0x70UL
++ #define HWRM_VNIC_RSS_COS_LB_CTX_FREE 0x71UL
++ #define HWRM_CFA_L2_FILTER_ALLOC 0x90UL
++ #define HWRM_CFA_L2_FILTER_FREE 0x91UL
++ #define HWRM_CFA_L2_FILTER_CFG 0x92UL
++ #define HWRM_CFA_L2_SET_RX_MASK 0x93UL
++ #define HWRM_CFA_VLAN_ANTISPOOF_CFG 0x94UL
++ #define HWRM_CFA_TUNNEL_FILTER_ALLOC 0x95UL
++ #define HWRM_CFA_TUNNEL_FILTER_FREE 0x96UL
++ #define HWRM_CFA_ENCAP_RECORD_ALLOC 0x97UL
++ #define HWRM_CFA_ENCAP_RECORD_FREE 0x98UL
++ #define HWRM_CFA_NTUPLE_FILTER_ALLOC 0x99UL
++ #define HWRM_CFA_NTUPLE_FILTER_FREE 0x9aUL
++ #define HWRM_CFA_NTUPLE_FILTER_CFG 0x9bUL
++ #define HWRM_CFA_EM_FLOW_ALLOC 0x9cUL
++ #define HWRM_CFA_EM_FLOW_FREE 0x9dUL
++ #define HWRM_CFA_EM_FLOW_CFG 0x9eUL
++ #define HWRM_TUNNEL_DST_PORT_QUERY 0xa0UL
++ #define HWRM_TUNNEL_DST_PORT_ALLOC 0xa1UL
++ #define HWRM_TUNNEL_DST_PORT_FREE 0xa2UL
++ #define HWRM_STAT_CTX_ALLOC 0xb0UL
++ #define HWRM_STAT_CTX_FREE 0xb1UL
++ #define HWRM_STAT_CTX_QUERY 0xb2UL
++ #define HWRM_STAT_CTX_CLR_STATS 0xb3UL
++ #define HWRM_FW_RESET 0xc0UL
++ #define HWRM_FW_QSTATUS 0xc1UL
++ #define HWRM_FW_SET_TIME 0xc8UL
++ #define HWRM_FW_GET_TIME 0xc9UL
++ #define HWRM_FW_SET_STRUCTURED_DATA 0xcaUL
++ #define HWRM_FW_GET_STRUCTURED_DATA 0xcbUL
++ #define HWRM_FW_IPC_MAILBOX 0xccUL
++ #define HWRM_EXEC_FWD_RESP 0xd0UL
++ #define HWRM_REJECT_FWD_RESP 0xd1UL
++ #define HWRM_FWD_RESP 0xd2UL
++ #define HWRM_FWD_ASYNC_EVENT_CMPL 0xd3UL
++ #define HWRM_TEMP_MONITOR_QUERY 0xe0UL
++ #define HWRM_WOL_FILTER_ALLOC 0xf0UL
++ #define HWRM_WOL_FILTER_FREE 0xf1UL
++ #define HWRM_WOL_FILTER_QCFG 0xf2UL
++ #define HWRM_WOL_REASON_QCFG 0xf3UL
++ #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL
++ #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL
++ #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL
++ #define HWRM_CFA_METER_INSTANCE_ALLOC 0xf8UL
++ #define HWRM_CFA_METER_INSTANCE_FREE 0xf9UL
++ #define HWRM_CFA_VFR_ALLOC 0xfdUL
++ #define HWRM_CFA_VFR_FREE 0xfeUL
++ #define HWRM_CFA_VF_PAIR_ALLOC 0x100UL
++ #define HWRM_CFA_VF_PAIR_FREE 0x101UL
++ #define HWRM_CFA_VF_PAIR_INFO 0x102UL
++ #define HWRM_CFA_FLOW_ALLOC 0x103UL
++ #define HWRM_CFA_FLOW_FREE 0x104UL
++ #define HWRM_CFA_FLOW_FLUSH 0x105UL
++ #define HWRM_CFA_FLOW_STATS 0x106UL
++ #define HWRM_CFA_FLOW_INFO 0x107UL
++ #define HWRM_CFA_DECAP_FILTER_ALLOC 0x108UL
++ #define HWRM_CFA_DECAP_FILTER_FREE 0x109UL
++ #define HWRM_CFA_VLAN_ANTISPOOF_QCFG 0x10aUL
++ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC 0x10bUL
++ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE 0x10cUL
++ #define HWRM_CFA_PAIR_ALLOC 0x10dUL
++ #define HWRM_CFA_PAIR_FREE 0x10eUL
++ #define HWRM_CFA_PAIR_INFO 0x10fUL
++ #define HWRM_FW_IPC_MSG 0x110UL
++ #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL
++ #define HWRM_ENGINE_CKV_HELLO 0x12dUL
++ #define HWRM_ENGINE_CKV_STATUS 0x12eUL
++ #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL
++ #define HWRM_ENGINE_CKV_CKEK_DELETE 0x130UL
++ #define HWRM_ENGINE_CKV_KEY_ADD 0x131UL
++ #define HWRM_ENGINE_CKV_KEY_DELETE 0x132UL
++ #define HWRM_ENGINE_CKV_FLUSH 0x133UL
++ #define HWRM_ENGINE_CKV_RNG_GET 0x134UL
++ #define HWRM_ENGINE_CKV_KEY_GEN 0x135UL
++ #define HWRM_ENGINE_QG_CONFIG_QUERY 0x13cUL
++ #define HWRM_ENGINE_QG_QUERY 0x13dUL
++ #define HWRM_ENGINE_QG_METER_PROFILE_CONFIG_QUERY 0x13eUL
++ #define HWRM_ENGINE_QG_METER_PROFILE_QUERY 0x13fUL
++ #define HWRM_ENGINE_QG_METER_PROFILE_ALLOC 0x140UL
++ #define HWRM_ENGINE_QG_METER_PROFILE_FREE 0x141UL
++ #define HWRM_ENGINE_QG_METER_QUERY 0x142UL
++ #define HWRM_ENGINE_QG_METER_BIND 0x143UL
++ #define HWRM_ENGINE_QG_METER_UNBIND 0x144UL
++ #define HWRM_ENGINE_QG_FUNC_BIND 0x145UL
++ #define HWRM_ENGINE_SG_CONFIG_QUERY 0x146UL
++ #define HWRM_ENGINE_SG_QUERY 0x147UL
++ #define HWRM_ENGINE_SG_METER_QUERY 0x148UL
++ #define HWRM_ENGINE_SG_METER_CONFIG 0x149UL
++ #define HWRM_ENGINE_SG_QG_BIND 0x14aUL
++ #define HWRM_ENGINE_QG_SG_UNBIND 0x14bUL
++ #define HWRM_ENGINE_CONFIG_QUERY 0x154UL
++ #define HWRM_ENGINE_STATS_CONFIG 0x155UL
++ #define HWRM_ENGINE_STATS_CLEAR 0x156UL
++ #define HWRM_ENGINE_STATS_QUERY 0x157UL
++ #define HWRM_ENGINE_RQ_ALLOC 0x15eUL
++ #define HWRM_ENGINE_RQ_FREE 0x15fUL
++ #define HWRM_ENGINE_CQ_ALLOC 0x160UL
++ #define HWRM_ENGINE_CQ_FREE 0x161UL
++ #define HWRM_ENGINE_NQ_ALLOC 0x162UL
++ #define HWRM_ENGINE_NQ_FREE 0x163UL
++ #define HWRM_ENGINE_ON_DIE_RQE_CREDITS 0x164UL
++ #define HWRM_FUNC_RESOURCE_QCAPS 0x190UL
++ #define HWRM_FUNC_VF_RESOURCE_CFG 0x191UL
++ #define HWRM_SELFTEST_QLIST 0x200UL
++ #define HWRM_SELFTEST_EXEC 0x201UL
++ #define HWRM_SELFTEST_IRQ 0x202UL
++ #define HWRM_SELFTEST_RETRIEVE_SERDES_DATA 0x203UL
++ #define HWRM_DBG_READ_DIRECT 0xff10UL
++ #define HWRM_DBG_READ_INDIRECT 0xff11UL
++ #define HWRM_DBG_WRITE_DIRECT 0xff12UL
++ #define HWRM_DBG_WRITE_INDIRECT 0xff13UL
++ #define HWRM_DBG_DUMP 0xff14UL
++ #define HWRM_DBG_ERASE_NVM 0xff15UL
++ #define HWRM_DBG_CFG 0xff16UL
++ #define HWRM_DBG_COREDUMP_LIST 0xff17UL
++ #define HWRM_DBG_COREDUMP_INITIATE 0xff18UL
++ #define HWRM_DBG_COREDUMP_RETRIEVE 0xff19UL
++ #define HWRM_NVM_FACTORY_DEFAULTS 0xffeeUL
++ #define HWRM_NVM_VALIDATE_OPTION 0xffefUL
++ #define HWRM_NVM_FLUSH 0xfff0UL
++ #define HWRM_NVM_GET_VARIABLE 0xfff1UL
++ #define HWRM_NVM_SET_VARIABLE 0xfff2UL
++ #define HWRM_NVM_INSTALL_UPDATE 0xfff3UL
++ #define HWRM_NVM_MODIFY 0xfff4UL
++ #define HWRM_NVM_VERIFY_UPDATE 0xfff5UL
++ #define HWRM_NVM_GET_DEV_INFO 0xfff6UL
++ #define HWRM_NVM_ERASE_DIR_ENTRY 0xfff7UL
++ #define HWRM_NVM_MOD_DIR_ENTRY 0xfff8UL
++ #define HWRM_NVM_FIND_DIR_ENTRY 0xfff9UL
++ #define HWRM_NVM_GET_DIR_ENTRIES 0xfffaUL
++ #define HWRM_NVM_GET_DIR_INFO 0xfffbUL
++ #define HWRM_NVM_RAW_DUMP 0xfffcUL
++ #define HWRM_NVM_READ 0xfffdUL
++ #define HWRM_NVM_WRITE 0xfffeUL
++ #define HWRM_NVM_RAW_WRITE_BLK 0xffffUL
++ #define HWRM_LAST HWRM_NVM_RAW_WRITE_BLK
++ __le16 unused_0[3];
++};
++
++/* ret_codes (size:64b/8B) */
++struct ret_codes {
++ __le16 error_code;
++ #define HWRM_ERR_CODE_SUCCESS 0x0UL
++ #define HWRM_ERR_CODE_FAIL 0x1UL
++ #define HWRM_ERR_CODE_INVALID_PARAMS 0x2UL
++ #define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED 0x3UL
++ #define HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR 0x4UL
++ #define HWRM_ERR_CODE_INVALID_FLAGS 0x5UL
++ #define HWRM_ERR_CODE_INVALID_ENABLES 0x6UL
++ #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL
++ #define HWRM_ERR_CODE_NO_BUFFER 0x8UL
++ #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
++ #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
++ #define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
++ #define HWRM_ERR_CODE_LAST HWRM_ERR_CODE_CMD_NOT_SUPPORTED
++ __le16 unused_0[3];
++};
++
++/* hwrm_err_output (size:128b/16B) */
++struct hwrm_err_output {
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le32 opaque_0;
++ __le16 opaque_1;
++ u8 cmd_err;
++ u8 valid;
++};
++#define HWRM_NA_SIGNATURE ((__le32)(-1))
++#define HWRM_MAX_REQ_LEN 128
++#define HWRM_MAX_RESP_LEN 280
++#define HW_HASH_INDEX_SIZE 0x80
++#define HW_HASH_KEY_SIZE 40
++#define HWRM_RESP_VALID_KEY 1
++#define HWRM_VERSION_MAJOR 1
++#define HWRM_VERSION_MINOR 9
++#define HWRM_VERSION_UPDATE 0
++#define HWRM_VERSION_RSVD 0
++#define HWRM_VERSION_STR "1.9.0.0"
++
++/* hwrm_ver_get_input (size:192b/24B) */
++struct hwrm_ver_get_input {
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ u8 hwrm_intf_maj;
++ u8 hwrm_intf_min;
++ u8 hwrm_intf_upd;
++ u8 unused_0[5];
++};
++
++/* hwrm_ver_get_output (size:1408b/176B) */
++struct hwrm_ver_get_output {
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 hwrm_intf_maj_8b;
++ u8 hwrm_intf_min_8b;
++ u8 hwrm_intf_upd_8b;
++ u8 hwrm_intf_rsvd_8b;
++ u8 hwrm_fw_maj_8b;
++ u8 hwrm_fw_min_8b;
++ u8 hwrm_fw_bld_8b;
++ u8 hwrm_fw_rsvd_8b;
++ u8 mgmt_fw_maj_8b;
++ u8 mgmt_fw_min_8b;
++ u8 mgmt_fw_bld_8b;
++ u8 mgmt_fw_rsvd_8b;
++ u8 netctrl_fw_maj_8b;
++ u8 netctrl_fw_min_8b;
++ u8 netctrl_fw_bld_8b;
++ u8 netctrl_fw_rsvd_8b;
++ __le32 dev_caps_cfg;
++ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
++ #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
++ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
++ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
++ u8 roce_fw_maj_8b;
++ u8 roce_fw_min_8b;
++ u8 roce_fw_bld_8b;
++ u8 roce_fw_rsvd_8b;
++ char hwrm_fw_name[16];
++ char mgmt_fw_name[16];
++ char netctrl_fw_name[16];
++ u8 reserved2[16];
++ char roce_fw_name[16];
++ __le16 chip_num;
++ u8 chip_rev;
++ u8 chip_metal;
++ u8 chip_bond_id;
++ u8 chip_platform_type;
++ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
++ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
++ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
++ #define VER_GET_RESP_CHIP_PLATFORM_TYPE_LAST VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM
++ __le16 max_req_win_len;
++ __le16 max_resp_len;
++ __le16 def_req_timeout;
++ u8 flags;
++ #define VER_GET_RESP_FLAGS_DEV_NOT_RDY 0x1UL
++ #define VER_GET_RESP_FLAGS_EXT_VER_AVAIL 0x2UL
++ u8 unused_0[2];
++ u8 always_1;
++ __le16 hwrm_intf_major;
++ __le16 hwrm_intf_minor;
++ __le16 hwrm_intf_build;
++ __le16 hwrm_intf_patch;
++ __le16 hwrm_fw_major;
++ __le16 hwrm_fw_minor;
++ __le16 hwrm_fw_build;
++ __le16 hwrm_fw_patch;
++ __le16 mgmt_fw_major;
++ __le16 mgmt_fw_minor;
++ __le16 mgmt_fw_build;
++ __le16 mgmt_fw_patch;
++ __le16 netctrl_fw_major;
++ __le16 netctrl_fw_minor;
++ __le16 netctrl_fw_build;
++ __le16 netctrl_fw_patch;
++ __le16 roce_fw_major;
++ __le16 roce_fw_minor;
++ __le16 roce_fw_build;
++ __le16 roce_fw_patch;
++ __le16 max_ext_req_len;
++ u8 unused_1[5];
++ u8 valid;
++};
+
+-/* Statistics Ejection Buffer Completion Record (16 bytes) */
++/* eject_cmpl (size:128b/16B) */
+ struct eject_cmpl {
+- __le16 type;
+- #define EJECT_CMPL_TYPE_MASK 0x3fUL
+- #define EJECT_CMPL_TYPE_SFT 0
+- #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
+- __le16 len;
+- __le32 opaque;
+- __le32 v;
+- #define EJECT_CMPL_V 0x1UL
+- __le32 unused_2;
++ __le16 type;
++ #define EJECT_CMPL_TYPE_MASK 0x3fUL
++ #define EJECT_CMPL_TYPE_SFT 0
++ #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL
++ #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT
++ __le16 len;
++ __le32 opaque;
++ __le32 v;
++ #define EJECT_CMPL_V 0x1UL
++ __le32 unused_2;
+ };
+
+-/* HWRM Completion Record (16 bytes) */
++/* hwrm_cmpl (size:128b/16B) */
+ struct hwrm_cmpl {
+- __le16 type;
+- #define CMPL_TYPE_MASK 0x3fUL
+- #define CMPL_TYPE_SFT 0
+- #define CMPL_TYPE_HWRM_DONE 0x20UL
+- __le16 sequence_id;
+- __le32 unused_1;
+- __le32 v;
+- #define CMPL_V 0x1UL
+- __le32 unused_3;
++ __le16 type;
++ #define CMPL_TYPE_MASK 0x3fUL
++ #define CMPL_TYPE_SFT 0
++ #define CMPL_TYPE_HWRM_DONE 0x20UL
++ #define CMPL_TYPE_LAST CMPL_TYPE_HWRM_DONE
++ __le16 sequence_id;
++ __le32 unused_1;
++ __le32 v;
++ #define CMPL_V 0x1UL
++ __le32 unused_3;
+ };
+
+-/* HWRM Forwarded Request (16 bytes) */
++/* hwrm_fwd_req_cmpl (size:128b/16B) */
+ struct hwrm_fwd_req_cmpl {
+- __le16 req_len_type;
+- #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
+- #define FWD_REQ_CMPL_TYPE_SFT 0
+- #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
+- #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
+- #define FWD_REQ_CMPL_REQ_LEN_SFT 6
+- __le16 source_id;
+- __le32 unused_0;
+- __le32 req_buf_addr_v[2];
+- #define FWD_REQ_CMPL_V 0x1UL
+- #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
+- #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
++ __le16 req_len_type;
++ #define FWD_REQ_CMPL_TYPE_MASK 0x3fUL
++ #define FWD_REQ_CMPL_TYPE_SFT 0
++ #define FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ 0x22UL
++ #define FWD_REQ_CMPL_TYPE_LAST FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ
++ #define FWD_REQ_CMPL_REQ_LEN_MASK 0xffc0UL
++ #define FWD_REQ_CMPL_REQ_LEN_SFT 6
++ __le16 source_id;
++ __le32 unused0;
++ __le32 req_buf_addr_v[2];
++ #define FWD_REQ_CMPL_V 0x1UL
++ #define FWD_REQ_CMPL_REQ_BUF_ADDR_MASK 0xfffffffeUL
++ #define FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1
+ };
+
+-/* HWRM Forwarded Response (16 bytes) */
++/* hwrm_fwd_resp_cmpl (size:128b/16B) */
+ struct hwrm_fwd_resp_cmpl {
+- __le16 type;
+- #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
+- #define FWD_RESP_CMPL_TYPE_SFT 0
+- #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
+- __le16 source_id;
+- __le16 resp_len;
+- __le16 unused_1;
+- __le32 resp_buf_addr_v[2];
+- #define FWD_RESP_CMPL_V 0x1UL
+- #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
+- #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
++ __le16 type;
++ #define FWD_RESP_CMPL_TYPE_MASK 0x3fUL
++ #define FWD_RESP_CMPL_TYPE_SFT 0
++ #define FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP 0x24UL
++ #define FWD_RESP_CMPL_TYPE_LAST FWD_RESP_CMPL_TYPE_HWRM_FWD_RESP
++ __le16 source_id;
++ __le16 resp_len;
++ __le16 unused_1;
++ __le32 resp_buf_addr_v[2];
++ #define FWD_RESP_CMPL_V 0x1UL
++ #define FWD_RESP_CMPL_RESP_BUF_ADDR_MASK 0xfffffffeUL
++ #define FWD_RESP_CMPL_RESP_BUF_ADDR_SFT 1
+ };
+
+-/* HWRM Asynchronous Event Completion Record (16 bytes) */
++/* hwrm_async_event_cmpl (size:128b/16B) */
+ struct hwrm_async_event_cmpl {
+- __le16 type;
+- #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
+- #define ASYNC_EVENT_CMPL_TYPE_SFT 0
+- #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+- __le16 event_id;
+- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+- #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
++ __le16 type;
++ #define ASYNC_EVENT_CMPL_TYPE_MASK 0x3fUL
++ #define ASYNC_EVENT_CMPL_TYPE_SFT 0
++ #define ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT 0x2eUL
++ #define ASYNC_EVENT_CMPL_TYPE_LAST ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT
++ __le16 event_id;
++ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
++ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE 0x1UL
++ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
++ #define ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
++ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+- #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+- #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
+- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+- #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
+- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
+- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+- #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+- #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
+- #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
+- #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
+- __le32 event_data2;
+- u8 opaque_v;
+- #define ASYNC_EVENT_CMPL_V 0x1UL
+- #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
+- #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
+- u8 timestamp_lo;
+- __le16 timestamp_hi;
+- __le32 event_data1;
++ #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
++ #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
++ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
++ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
++ #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
++ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
++ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD 0x21UL
++ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR 0x30UL
++ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
++ #define ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
++ #define ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE 0x33UL
++ #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL
++ #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
++ #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
++ __le32 event_data2;
++ u8 opaque_v;
++ #define ASYNC_EVENT_CMPL_V 0x1UL
++ #define ASYNC_EVENT_CMPL_OPAQUE_MASK 0xfeUL
++ #define ASYNC_EVENT_CMPL_OPAQUE_SFT 1
++ u8 timestamp_lo;
++ __le16 timestamp_hi;
++ __le32 event_data1;
+ };
+
+-/* HWRM Asynchronous Event Completion Record for link status change (16 bytes) */
++/* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */
+ struct hwrm_async_event_cmpl_link_status_change {
+- __le16 type;
+- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
+- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
+- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+- __le16 event_id;
++ __le16 type;
++ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_MASK 0x3fUL
++ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_SFT 0
++ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
++ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT
++ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE 0x0UL
+- __le32 event_data2;
+- u8 opaque_v;
+- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
+- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
+- u8 timestamp_lo;
+- __le16 timestamp_hi;
+- __le32 event_data1;
+- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
+- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0)
+- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0)
+- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
+- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
+- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
+- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
+- #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+-};
+-
+-/* HWRM Asynchronous Event Completion Record for link MTU change (16 bytes) */
+-struct hwrm_async_event_cmpl_link_mtu_change {
+- __le16 type;
+- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_MASK 0x3fUL
+- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_SFT 0
+- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+- __le16 event_id;
+- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_ID_LINK_MTU_CHANGE 0x1UL
+- __le32 event_data2;
+- u8 opaque_v;
+- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_V 0x1UL
+- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_MASK 0xfeUL
+- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_OPAQUE_SFT 1
+- u8 timestamp_lo;
+- __le16 timestamp_hi;
+- __le32 event_data1;
+- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_MASK 0xffffUL
+- #define ASYNC_EVENT_CMPL_LINK_MTU_CHANGE_EVENT_DATA1_NEW_MTU_SFT 0
+-};
+-
+-/* HWRM Asynchronous Event Completion Record for link speed change (16 bytes) */
+-struct hwrm_async_event_cmpl_link_speed_change {
+- __le16 type;
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_MASK 0x3fUL
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_SFT 0
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+- __le16 event_id;
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_ID_LINK_SPEED_CHANGE 0x2UL
+- __le32 event_data2;
+- u8 opaque_v;
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_V 0x1UL
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_MASK 0xfeUL
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_OPAQUE_SFT 1
+- u8 timestamp_lo;
+- __le16 timestamp_hi;
+- __le32 event_data1;
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1)
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1)
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1)
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1)
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1)
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1)
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
+-};
+-
+-/* HWRM Asynchronous Event Completion Record for DCB Config change (16 bytes) */
+-struct hwrm_async_event_cmpl_dcb_config_change {
+- __le16 type;
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_MASK 0x3fUL
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_SFT 0
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+- __le16 event_id;
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_ID_DCB_CONFIG_CHANGE 0x3UL
+- __le32 event_data2;
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_ETS 0x1UL
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_PFC 0x2UL
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA2_APP 0x4UL
+- u8 opaque_v;
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_V 0x1UL
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_MASK 0xfeUL
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_OPAQUE_SFT 1
+- u8 timestamp_lo;
+- __le16 timestamp_hi;
+- __le32 event_data1;
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_MASK 0xff0000UL
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_SFT 16
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE (0xffUL << 16)
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_ROCE_PRIORITY_NONE
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_MASK 0xff000000UL
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_SFT 24
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE (0xffUL << 24)
+- #define ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_LAST ASYNC_EVENT_CMPL_DCB_CONFIG_CHANGE_EVENT_DATA1_RECOMMEND_L2_PRIORITY_NONE
++ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_ID_LINK_STATUS_CHANGE
++ __le32 event_data2;
++ u8 opaque_v;
++ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_V 0x1UL
++ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
++ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_OPAQUE_SFT 1
++ u8 timestamp_lo;
++ __le16 timestamp_hi;
++ __le32 event_data1;
++ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
++ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN 0x0UL
++ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP 0x1UL
++ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
++ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
++ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
++ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
++ #define ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_SFT 4
+ };
+
+-/* HWRM Asynchronous Event Completion Record for port connection not allowed (16 bytes) */
++/* hwrm_async_event_cmpl_port_conn_not_allowed (size:128b/16B) */
+ struct hwrm_async_event_cmpl_port_conn_not_allowed {
+- __le16 type;
+- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
+- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
+- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+- __le16 event_id;
++ __le16 type;
++ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_MASK 0x3fUL
++ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_SFT 0
++ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
++ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT
++ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED 0x4UL
+- __le32 event_data2;
+- u8 opaque_v;
+- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
++ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_ID_PORT_CONN_NOT_ALLOWED
++ __le32 event_data2;
++ u8 opaque_v;
++ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
+- u8 timestamp_lo;
+- __le16 timestamp_hi;
+- __le32 event_data1;
+- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
+- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
+- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
+- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
+- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
+- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
+- #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+-};
+-
+-/* HWRM Asynchronous Event Completion Record for link speed config not allowed (16 bytes) */
+-struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
+- __le16 type;
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+- __le16 event_id;
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED 0x5UL
+- __le32 event_data2;
+- u8 opaque_v;
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
+- u8 timestamp_lo;
+- __le16 timestamp_hi;
+- __le32 event_data1;
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
++ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_OPAQUE_SFT 1
++ u8 timestamp_lo;
++ __le16 timestamp_hi;
++ __le32 event_data1;
++ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
++ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
++ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
++ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
++ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
++ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
++ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
++ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
++ #define ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+ };
+
+-/* HWRM Asynchronous Event Completion Record for link speed configuration change (16 bytes) */
++/* hwrm_async_event_cmpl_link_speed_cfg_change (size:128b/16B) */
+ struct hwrm_async_event_cmpl_link_speed_cfg_change {
+- __le16 type;
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+- __le16 event_id;
++ __le16 type;
++ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
++ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
++ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
++ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
++ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL
+- __le32 event_data2;
+- u8 opaque_v;
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
++ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE
++ __le32 event_data2;
++ u8 opaque_v;
++ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+- u8 timestamp_lo;
+- __le16 timestamp_hi;
+- __le32 event_data1;
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
+- #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
+-};
+-
+-/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
+-struct hwrm_async_event_cmpl_func_drvr_unload {
+- __le16 type;
+- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_MASK 0x3fUL
+- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_SFT 0
+- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+- __le16 event_id;
+- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
+- __le32 event_data2;
+- u8 opaque_v;
+- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_V 0x1UL
+- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_OPAQUE_SFT 1
+- u8 timestamp_lo;
+- __le16 timestamp_hi;
+- __le32 event_data1;
+- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+- #define ASYNC_EVENT_CMPL_FUNC_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+-};
+-
+-/* HWRM Asynchronous Event Completion Record for Function Driver load (16 bytes) */
+-struct hwrm_async_event_cmpl_func_drvr_load {
+- __le16 type;
+- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_MASK 0x3fUL
+- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_SFT 0
+- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+- __le16 event_id;
+- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
+- __le32 event_data2;
+- u8 opaque_v;
+- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_V 0x1UL
+- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_MASK 0xfeUL
+- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_OPAQUE_SFT 1
+- u8 timestamp_lo;
+- __le16 timestamp_hi;
+- __le32 event_data1;
+- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+- #define ASYNC_EVENT_CMPL_FUNC_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+-};
+-
+-/* HWRM Asynchronous Event Completion Record to indicate completion of FLR related processing (16 bytes) */
+-struct hwrm_async_event_cmpl_func_flr_proc_cmplt {
+- __le16 type;
+- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_MASK 0x3fUL
+- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_SFT 0
+- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+- __le16 event_id;
+- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
+- __le32 event_data2;
+- u8 opaque_v;
+- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_V 0x1UL
+- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_MASK 0xfeUL
+- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_OPAQUE_SFT 1
+- u8 timestamp_lo;
+- __le16 timestamp_hi;
+- __le32 event_data1;
+- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+- #define ASYNC_EVENT_CMPL_FUNC_FLR_PROC_CMPLT_EVENT_DATA1_FUNC_ID_SFT 0
++ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
++ u8 timestamp_lo;
++ __le16 timestamp_hi;
++ __le32 event_data1;
++ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
++ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
++ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
++ #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
+ };
+
+-/* HWRM Asynchronous Event Completion Record for PF Driver Unload (16 bytes) */
++/* hwrm_async_event_cmpl_pf_drvr_unload (size:128b/16B) */
+ struct hwrm_async_event_cmpl_pf_drvr_unload {
+- __le16 type;
+- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
+- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
+- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+- __le16 event_id;
++ __le16 type;
++ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_MASK 0x3fUL
++ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_SFT 0
++ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
++ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_TYPE_HWRM_ASYNC_EVENT
++ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD 0x20UL
+- __le32 event_data2;
+- u8 opaque_v;
+- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
+- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
+- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
+- u8 timestamp_lo;
+- __le16 timestamp_hi;
+- __le32 event_data1;
++ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_LAST ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_ID_PF_DRVR_UNLOAD
++ __le32 event_data2;
++ u8 opaque_v;
++ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_V 0x1UL
++ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_MASK 0xfeUL
++ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_OPAQUE_SFT 1
++ u8 timestamp_lo;
++ __le16 timestamp_hi;
++ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_FUNC_ID_SFT 0
+- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
+- #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
++ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_MASK 0x70000UL
++ #define ASYNC_EVENT_CMPL_PF_DRVR_UNLOAD_EVENT_DATA1_PORT_SFT 16
+ };
+
+-/* HWRM Asynchronous Event Completion Record for PF Driver load (16 bytes) */
+-struct hwrm_async_event_cmpl_pf_drvr_load {
+- __le16 type;
+- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_MASK 0x3fUL
+- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_SFT 0
+- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+- __le16 event_id;
+- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_ID_PF_DRVR_LOAD 0x21UL
+- __le32 event_data2;
+- u8 opaque_v;
+- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_V 0x1UL
+- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_MASK 0xfeUL
+- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_OPAQUE_SFT 1
+- u8 timestamp_lo;
+- __le16 timestamp_hi;
+- __le32 event_data1;
+- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_MASK 0xffffUL
+- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_FUNC_ID_SFT 0
+- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_MASK 0x70000UL
+- #define ASYNC_EVENT_CMPL_PF_DRVR_LOAD_EVENT_DATA1_PORT_SFT 16
+-};
+-
+-/* HWRM Asynchronous Event Completion Record for VF FLR (16 bytes) */
+-struct hwrm_async_event_cmpl_vf_flr {
+- __le16 type;
+- #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_MASK 0x3fUL
+- #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_SFT 0
+- #define ASYNC_EVENT_CMPL_VF_FLR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+- __le16 event_id;
+- #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_ID_VF_FLR 0x30UL
+- __le32 event_data2;
+- u8 opaque_v;
+- #define ASYNC_EVENT_CMPL_VF_FLR_V 0x1UL
+- #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_MASK 0xfeUL
+- #define ASYNC_EVENT_CMPL_VF_FLR_OPAQUE_SFT 1
+- u8 timestamp_lo;
+- __le16 timestamp_hi;
+- __le32 event_data1;
+- #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_MASK 0xffffUL
+- #define ASYNC_EVENT_CMPL_VF_FLR_EVENT_DATA1_VF_ID_SFT 0
+-};
+-
+-/* HWRM Asynchronous Event Completion Record for VF MAC Addr change (16 bytes) */
+-struct hwrm_async_event_cmpl_vf_mac_addr_change {
+- __le16 type;
+- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_MASK 0x3fUL
+- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_SFT 0
+- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+- __le16 event_id;
+- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_ID_VF_MAC_ADDR_CHANGE 0x31UL
+- __le32 event_data2;
+- u8 opaque_v;
+- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_V 0x1UL
+- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_MASK 0xfeUL
+- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_OPAQUE_SFT 1
+- u8 timestamp_lo;
+- __le16 timestamp_hi;
+- __le32 event_data1;
+- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_MASK 0xffffUL
+- #define ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
+-};
+-
+-/* HWRM Asynchronous Event Completion Record for PF-VF communication status change (16 bytes) */
+-struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
+- __le16 type;
+- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
+- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
+- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+- __le16 event_id;
+- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE 0x32UL
+- __le32 event_data2;
+- u8 opaque_v;
+- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL
+- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
+- u8 timestamp_lo;
+- __le16 timestamp_hi;
+- __le32 event_data1;
+- #define ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL
+-};
+-
+-/* HWRM Asynchronous Event Completion Record for VF configuration change (16 bytes) */
++/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
+ struct hwrm_async_event_cmpl_vf_cfg_change {
+- __le16 type;
+- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
+- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
+- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+- __le16 event_id;
++ __le16 type;
++ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
++ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
++ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
++ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT
++ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE 0x33UL
+- __le32 event_data2;
+- u8 opaque_v;
+- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
+- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+- u8 timestamp_lo;
+- __le16 timestamp_hi;
+- __le32 event_data1;
+- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+- #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+-};
+-
+-/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
+-struct hwrm_async_event_cmpl_hwrm_error {
+- __le16 type;
+- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_MASK 0x3fUL
+- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_SFT 0
+- #define ASYNC_EVENT_CMPL_HWRM_ERROR_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+- __le16 event_id;
+- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_ID_HWRM_ERROR 0xffUL
+- __le32 event_data2;
+- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_MASK 0xffUL
+- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_SFT 0
+- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING 0x0UL
+- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL 0x1UL
+- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL 0x2UL
+- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
+- u8 opaque_v;
+- #define ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
+- #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
+- #define ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_SFT 1
+- u8 timestamp_lo;
+- __le16 timestamp_hi;
+- __le32 event_data1;
+- #define ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
++ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_LAST ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE
++ __le32 event_data2;
++ u8 opaque_v;
++ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
++ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
++ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
++ u8 timestamp_lo;
++ __le16 timestamp_hi;
++ __le32 event_data1;
++ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
++ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
++ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
++ #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+ };
+
+-/* hwrm_ver_get */
+-/* Input (24 bytes) */
+-struct hwrm_ver_get_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- u8 hwrm_intf_maj;
+- u8 hwrm_intf_min;
+- u8 hwrm_intf_upd;
+- u8 unused_0[5];
+-};
+-
+-/* Output (128 bytes) */
+-struct hwrm_ver_get_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- u8 hwrm_intf_maj;
+- u8 hwrm_intf_min;
+- u8 hwrm_intf_upd;
+- u8 hwrm_intf_rsvd;
+- u8 hwrm_fw_maj;
+- u8 hwrm_fw_min;
+- u8 hwrm_fw_bld;
+- u8 hwrm_fw_rsvd;
+- u8 mgmt_fw_maj;
+- u8 mgmt_fw_min;
+- u8 mgmt_fw_bld;
+- u8 mgmt_fw_rsvd;
+- u8 netctrl_fw_maj;
+- u8 netctrl_fw_min;
+- u8 netctrl_fw_bld;
+- u8 netctrl_fw_rsvd;
+- __le32 dev_caps_cfg;
+- #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
+- #define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
+- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
+- #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
+- u8 roce_fw_maj;
+- u8 roce_fw_min;
+- u8 roce_fw_bld;
+- u8 roce_fw_rsvd;
+- char hwrm_fw_name[16];
+- char mgmt_fw_name[16];
+- char netctrl_fw_name[16];
+- __le32 reserved2[4];
+- char roce_fw_name[16];
+- __le16 chip_num;
+- u8 chip_rev;
+- u8 chip_metal;
+- u8 chip_bond_id;
+- u8 chip_platform_type;
+- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_ASIC 0x0UL
+- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_FPGA 0x1UL
+- #define VER_GET_RESP_CHIP_PLATFORM_TYPE_PALLADIUM 0x2UL
+- __le16 max_req_win_len;
+- __le16 max_resp_len;
+- __le16 def_req_timeout;
+- u8 init_pending;
+- #define VER_GET_RESP_INIT_PENDING_DEV_NOT_RDY 0x1UL
+- u8 unused_0;
+- u8 unused_1;
+- u8 valid;
+-};
+-
+-/* hwrm_func_reset */
+-/* Input (24 bytes) */
++/* hwrm_func_reset_input (size:192b/24B) */
+ struct hwrm_func_reset_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 enables;
+- #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
+- __le16 vf_id;
+- u8 func_reset_level;
+- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
+- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
+- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
+- #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
+- u8 unused_0;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 enables;
++ #define FUNC_RESET_REQ_ENABLES_VF_ID_VALID 0x1UL
++ __le16 vf_id;
++ u8 func_reset_level;
++ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETALL 0x0UL
++ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME 0x1UL
++ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETCHILDREN 0x2UL
++ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF 0x3UL
++ #define FUNC_RESET_REQ_FUNC_RESET_LEVEL_LAST FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETVF
++ u8 unused_0;
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_func_reset_output (size:128b/16B) */
+ struct hwrm_func_reset_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_func_getfid */
+-/* Input (24 bytes) */
++/* hwrm_func_getfid_input (size:192b/24B) */
+ struct hwrm_func_getfid_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 enables;
+- #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
+- __le16 pci_id;
+- __le16 unused_0;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 enables;
++ #define FUNC_GETFID_REQ_ENABLES_PCI_ID 0x1UL
++ __le16 pci_id;
++ u8 unused_0[2];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_func_getfid_output (size:128b/16B) */
+ struct hwrm_func_getfid_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le16 fid;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 unused_4;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le16 fid;
++ u8 unused_0[5];
++ u8 valid;
+ };
+
+-/* hwrm_func_vf_alloc */
+-/* Input (24 bytes) */
++/* hwrm_func_vf_alloc_input (size:192b/24B) */
+ struct hwrm_func_vf_alloc_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 enables;
+- #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
+- __le16 first_vf_id;
+- __le16 num_vfs;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 enables;
++ #define FUNC_VF_ALLOC_REQ_ENABLES_FIRST_VF_ID 0x1UL
++ __le16 first_vf_id;
++ __le16 num_vfs;
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_func_vf_alloc_output (size:128b/16B) */
+ struct hwrm_func_vf_alloc_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le16 first_vf_id;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 unused_4;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le16 first_vf_id;
++ u8 unused_0[5];
++ u8 valid;
+ };
+
+-/* hwrm_func_vf_free */
+-/* Input (24 bytes) */
++/* hwrm_func_vf_free_input (size:192b/24B) */
+ struct hwrm_func_vf_free_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 enables;
+- #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
+- __le16 first_vf_id;
+- __le16 num_vfs;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 enables;
++ #define FUNC_VF_FREE_REQ_ENABLES_FIRST_VF_ID 0x1UL
++ __le16 first_vf_id;
++ __le16 num_vfs;
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_func_vf_free_output (size:128b/16B) */
+ struct hwrm_func_vf_free_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_func_vf_cfg */
+-/* Input (32 bytes) */
++/* hwrm_func_vf_cfg_input (size:448b/56B) */
+ struct hwrm_func_vf_cfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 enables;
+- #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
+- #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
+- #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
+- #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
+- __le16 mtu;
+- __le16 guest_vlan;
+- __le16 async_event_cr;
+- u8 dflt_mac_addr[6];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 enables;
++ #define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
++ #define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
++ #define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
++ #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
++ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x10UL
++ #define FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x20UL
++ #define FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS 0x40UL
++ #define FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS 0x80UL
++ #define FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS 0x100UL
++ #define FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS 0x200UL
++ #define FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x400UL
++ #define FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x800UL
++ __le16 mtu;
++ __le16 guest_vlan;
++ __le16 async_event_cr;
++ u8 dflt_mac_addr[6];
++ __le32 flags;
++ #define FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x1UL
++ #define FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x2UL
++ #define FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x4UL
++ #define FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x8UL
++ #define FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x10UL
++ #define FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x20UL
++ #define FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x40UL
++ #define FUNC_VF_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x80UL
++ __le16 num_rsscos_ctxs;
++ __le16 num_cmpl_rings;
++ __le16 num_tx_rings;
++ __le16 num_rx_rings;
++ __le16 num_l2_ctxs;
++ __le16 num_vnics;
++ __le16 num_stat_ctxs;
++ __le16 num_hw_ring_grps;
++ u8 unused_0[4];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_func_vf_cfg_output (size:128b/16B) */
+ struct hwrm_func_vf_cfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_func_qcaps */
+-/* Input (24 bytes) */
++/* hwrm_func_qcaps_input (size:192b/24B) */
+ struct hwrm_func_qcaps_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 fid;
+- __le16 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 fid;
++ u8 unused_0[6];
+ };
+
+-/* Output (80 bytes) */
++/* hwrm_func_qcaps_output (size:640b/80B) */
+ struct hwrm_func_qcaps_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le16 fid;
+- __le16 port_id;
+- __le32 flags;
+- #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
+- #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
+- #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
+- #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
+- #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
+- #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+- #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
+- #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
+- #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
+- #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
+- #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
+- u8 mac_address[6];
+- __le16 max_rsscos_ctx;
+- __le16 max_cmpl_rings;
+- __le16 max_tx_rings;
+- __le16 max_rx_rings;
+- __le16 max_l2_ctxs;
+- __le16 max_vnics;
+- __le16 first_vf_id;
+- __le16 max_vfs;
+- __le16 max_stat_ctx;
+- __le32 max_encap_records;
+- __le32 max_decap_records;
+- __le32 max_tx_em_flows;
+- __le32 max_tx_wm_flows;
+- __le32 max_rx_em_flows;
+- __le32 max_rx_wm_flows;
+- __le32 max_mcast_filters;
+- __le32 max_flow_id;
+- __le32 max_hw_ring_grps;
+- __le16 max_sp_tx_rings;
+- u8 unused_0;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le16 fid;
++ __le16 port_id;
++ __le32 flags;
++ #define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
++ #define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
++ #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
++ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
++ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
++ #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
++ #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
++ #define FUNC_QCAPS_RESP_FLAGS_TX_RING_RL_SUPPORTED 0x80UL
++ #define FUNC_QCAPS_RESP_FLAGS_TX_BW_CFG_SUPPORTED 0x100UL
++ #define FUNC_QCAPS_RESP_FLAGS_VF_TX_RING_RL_SUPPORTED 0x200UL
++ #define FUNC_QCAPS_RESP_FLAGS_VF_BW_CFG_SUPPORTED 0x400UL
++ #define FUNC_QCAPS_RESP_FLAGS_STD_TX_RING_MODE_SUPPORTED 0x800UL
++ #define FUNC_QCAPS_RESP_FLAGS_GENEVE_TUN_FLAGS_SUPPORTED 0x1000UL
++ #define FUNC_QCAPS_RESP_FLAGS_NVGRE_TUN_FLAGS_SUPPORTED 0x2000UL
++ #define FUNC_QCAPS_RESP_FLAGS_GRE_TUN_FLAGS_SUPPORTED 0x4000UL
++ #define FUNC_QCAPS_RESP_FLAGS_MPLS_TUN_FLAGS_SUPPORTED 0x8000UL
++ u8 mac_address[6];
++ __le16 max_rsscos_ctx;
++ __le16 max_cmpl_rings;
++ __le16 max_tx_rings;
++ __le16 max_rx_rings;
++ __le16 max_l2_ctxs;
++ __le16 max_vnics;
++ __le16 first_vf_id;
++ __le16 max_vfs;
++ __le16 max_stat_ctx;
++ __le32 max_encap_records;
++ __le32 max_decap_records;
++ __le32 max_tx_em_flows;
++ __le32 max_tx_wm_flows;
++ __le32 max_rx_em_flows;
++ __le32 max_rx_wm_flows;
++ __le32 max_mcast_filters;
++ __le32 max_flow_id;
++ __le32 max_hw_ring_grps;
++ __le16 max_sp_tx_rings;
++ u8 unused_0;
++ u8 valid;
+ };
+
+-/* hwrm_func_qcfg */
+-/* Input (24 bytes) */
++/* hwrm_func_qcfg_input (size:192b/24B) */
+ struct hwrm_func_qcfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 fid;
+- __le16 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 fid;
++ u8 unused_0[6];
+ };
+
+-/* Output (72 bytes) */
++/* hwrm_func_qcfg_output (size:640b/80B) */
+ struct hwrm_func_qcfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le16 fid;
+- __le16 port_id;
+- __le16 vlan;
+- __le16 flags;
+- #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
+- #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
+- #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
+- #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
+- #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
+- #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
+- u8 mac_address[6];
+- __le16 pci_id;
+- __le16 alloc_rsscos_ctx;
+- __le16 alloc_cmpl_rings;
+- __le16 alloc_tx_rings;
+- __le16 alloc_rx_rings;
+- __le16 alloc_l2_ctx;
+- __le16 alloc_vnics;
+- __le16 mtu;
+- __le16 mru;
+- __le16 stat_ctx_id;
+- u8 port_partition_type;
+- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
+- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
+- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
+- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
+- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
+- #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
+- u8 port_pf_cnt;
+- #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
+- __le16 dflt_vnic_id;
+- __le16 max_mtu_configured;
+- __le32 min_bw;
+- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
+- #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
+- #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
+- #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
+- #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
+- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
+- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le16 fid;
++ __le16 port_id;
++ __le16 vlan;
++ __le16 flags;
++ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
++ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
++ #define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
++ #define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
++ #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
++ #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
++ u8 mac_address[6];
++ __le16 pci_id;
++ __le16 alloc_rsscos_ctx;
++ __le16 alloc_cmpl_rings;
++ __le16 alloc_tx_rings;
++ __le16 alloc_rx_rings;
++ __le16 alloc_l2_ctx;
++ __le16 alloc_vnics;
++ __le16 mtu;
++ __le16 mru;
++ __le16 stat_ctx_id;
++ u8 port_partition_type;
++ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF 0x0UL
++ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS 0x1UL
++ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 0x2UL
++ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 0x3UL
++ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 0x4UL
++ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN 0xffUL
++ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_LAST FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN
++ u8 port_pf_cnt;
++ #define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
++ #define FUNC_QCFG_RESP_PORT_PF_CNT_LAST FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL
++ __le16 dflt_vnic_id;
++ __le16 max_mtu_configured;
++ __le32 min_bw;
++ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
++ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
++ #define FUNC_QCFG_RESP_MIN_BW_SCALE 0x10000000UL
++ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BITS (0x0UL << 28)
++ #define FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES (0x1UL << 28)
++ #define FUNC_QCFG_RESP_MIN_BW_SCALE_LAST FUNC_QCFG_RESP_MIN_BW_SCALE_BYTES
++ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_SFT 29
++ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
+- __le32 max_bw;
+- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
+- #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
+- #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
+- #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
+- #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
+- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
+- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MIN_BW_BW_VALUE_UNIT_INVALID
++ __le32 max_bw;
++ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_MASK 0xfffffffUL
++ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_SFT 0
++ #define FUNC_QCFG_RESP_MAX_BW_SCALE 0x10000000UL
++ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BITS (0x0UL << 28)
++ #define FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES (0x1UL << 28)
++ #define FUNC_QCFG_RESP_MAX_BW_SCALE_LAST FUNC_QCFG_RESP_MAX_BW_SCALE_BYTES
++ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_SFT 29
++ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
+- u8 evb_mode;
+- #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
+- #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
+- #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
+- u8 unused_0;
+- __le16 alloc_vfs;
+- __le32 alloc_mcast_filters;
+- __le32 alloc_hw_ring_grps;
+- __le16 alloc_sp_tx_rings;
+- u8 unused_1;
+- u8 valid;
++ #define FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_LAST FUNC_QCFG_RESP_MAX_BW_BW_VALUE_UNIT_INVALID
++ u8 evb_mode;
++ #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
++ #define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
++ #define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
++ #define FUNC_QCFG_RESP_EVB_MODE_LAST FUNC_QCFG_RESP_EVB_MODE_VEPA
++ u8 cache_linesize;
++ #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL
++ #define FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL
++ #define FUNC_QCFG_RESP_CACHE_LINESIZE_LAST FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128
++ __le16 alloc_vfs;
++ __le32 alloc_mcast_filters;
++ __le32 alloc_hw_ring_grps;
++ __le16 alloc_sp_tx_rings;
++ __le16 alloc_stat_ctx;
++ u8 unused_2[7];
++ u8 valid;
+ };
+
+-/* hwrm_func_vlan_cfg */
+-/* Input (48 bytes) */
++/* hwrm_func_vlan_cfg_input (size:384b/48B) */
+ struct hwrm_func_vlan_cfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 fid;
+- u8 unused_0;
+- u8 unused_1;
+- __le32 enables;
+- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL
+- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL
+- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL
+- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL
+- #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL
+- #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL
+- __le16 stag_vid;
+- u8 stag_pcp;
+- u8 unused_2;
+- __be16 stag_tpid;
+- __le16 ctag_vid;
+- u8 ctag_pcp;
+- u8 unused_3;
+- __be16 ctag_tpid;
+- __le32 rsvd1;
+- __le32 rsvd2;
+- __le32 unused_4;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 fid;
++ u8 unused_0[2];
++ __le32 enables;
++ #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL
++ #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL
++ #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL
++ #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL
++ #define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL
++ #define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL
++ __le16 stag_vid;
++ u8 stag_pcp;
++ u8 unused_1;
++ __be16 stag_tpid;
++ __le16 ctag_vid;
++ u8 ctag_pcp;
++ u8 unused_2;
++ __be16 ctag_tpid;
++ __le32 rsvd1;
++ __le32 rsvd2;
++ u8 unused_3[4];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_func_vlan_cfg_output (size:128b/16B) */
+ struct hwrm_func_vlan_cfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_func_cfg */
+-/* Input (88 bytes) */
++/* hwrm_func_cfg_input (size:704b/88B) */
+ struct hwrm_func_cfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 fid;
+- u8 unused_0;
+- u8 unused_1;
+- __le32 flags;
+- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
+- #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
+- #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
+- #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
+- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
+- #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
+- #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
+- #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
+- #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
+- __le32 enables;
+- #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
+- #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
+- #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
+- #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
+- #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
+- #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
+- #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
+- #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
+- #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
+- #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
+- #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
+- #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
+- #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
+- #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
+- #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
+- #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
+- #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
+- #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
+- #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
+- #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
+- __le16 mtu;
+- __le16 mru;
+- __le16 num_rsscos_ctxs;
+- __le16 num_cmpl_rings;
+- __le16 num_tx_rings;
+- __le16 num_rx_rings;
+- __le16 num_l2_ctxs;
+- __le16 num_vnics;
+- __le16 num_stat_ctxs;
+- __le16 num_hw_ring_grps;
+- u8 dflt_mac_addr[6];
+- __le16 dflt_vlan;
+- __be32 dflt_ip_addr[4];
+- __le32 min_bw;
+- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
+- #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
+- #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
+- #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
+- #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
+- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
+- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
+- __le32 max_bw;
+- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
+- #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
+- #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+- #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+- #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
+- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+- __le16 async_event_cr;
+- u8 vlan_antispoof_mode;
+- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
+- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
+- #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 fid;
++ u8 unused_0[2];
++ __le32 flags;
++ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE 0x1UL
++ #define FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE 0x2UL
++ #define FUNC_CFG_REQ_FLAGS_RSVD_MASK 0x1fcUL
++ #define FUNC_CFG_REQ_FLAGS_RSVD_SFT 2
++ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
++ #define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
++ #define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
++ #define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
++ #define FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST 0x2000UL
++ #define FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST 0x4000UL
++ #define FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST 0x8000UL
++ #define FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST 0x10000UL
++ #define FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST 0x20000UL
++ #define FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST 0x40000UL
++ #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL
++ #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL
++ __le32 enables;
++ #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
++ #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
++ #define FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS 0x4UL
++ #define FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS 0x8UL
++ #define FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS 0x10UL
++ #define FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS 0x20UL
++ #define FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS 0x40UL
++ #define FUNC_CFG_REQ_ENABLES_NUM_VNICS 0x80UL
++ #define FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS 0x100UL
++ #define FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x200UL
++ #define FUNC_CFG_REQ_ENABLES_DFLT_VLAN 0x400UL
++ #define FUNC_CFG_REQ_ENABLES_DFLT_IP_ADDR 0x800UL
++ #define FUNC_CFG_REQ_ENABLES_MIN_BW 0x1000UL
++ #define FUNC_CFG_REQ_ENABLES_MAX_BW 0x2000UL
++ #define FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4000UL
++ #define FUNC_CFG_REQ_ENABLES_VLAN_ANTISPOOF_MODE 0x8000UL
++ #define FUNC_CFG_REQ_ENABLES_ALLOWED_VLAN_PRIS 0x10000UL
++ #define FUNC_CFG_REQ_ENABLES_EVB_MODE 0x20000UL
++ #define FUNC_CFG_REQ_ENABLES_NUM_MCAST_FILTERS 0x40000UL
++ #define FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS 0x80000UL
++ #define FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE 0x100000UL
++ __le16 mtu;
++ __le16 mru;
++ __le16 num_rsscos_ctxs;
++ __le16 num_cmpl_rings;
++ __le16 num_tx_rings;
++ __le16 num_rx_rings;
++ __le16 num_l2_ctxs;
++ __le16 num_vnics;
++ __le16 num_stat_ctxs;
++ __le16 num_hw_ring_grps;
++ u8 dflt_mac_addr[6];
++ __le16 dflt_vlan;
++ __be32 dflt_ip_addr[4];
++ __le32 min_bw;
++ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_MASK 0xfffffffUL
++ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_SFT 0
++ #define FUNC_CFG_REQ_MIN_BW_SCALE 0x10000000UL
++ #define FUNC_CFG_REQ_MIN_BW_SCALE_BITS (0x0UL << 28)
++ #define FUNC_CFG_REQ_MIN_BW_SCALE_BYTES (0x1UL << 28)
++ #define FUNC_CFG_REQ_MIN_BW_SCALE_LAST FUNC_CFG_REQ_MIN_BW_SCALE_BYTES
++ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_SFT 29
++ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MIN_BW_BW_VALUE_UNIT_INVALID
++ __le32 max_bw;
++ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
++ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_SFT 0
++ #define FUNC_CFG_REQ_MAX_BW_SCALE 0x10000000UL
++ #define FUNC_CFG_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
++ #define FUNC_CFG_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
++ #define FUNC_CFG_REQ_MAX_BW_SCALE_LAST FUNC_CFG_REQ_MAX_BW_SCALE_BYTES
++ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
++ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_LAST FUNC_CFG_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
++ __le16 async_event_cr;
++ u8 vlan_antispoof_mode;
++ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_NOCHECK 0x0UL
++ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN 0x1UL
++ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_IF_VLANDNE 0x2UL
+ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN 0x3UL
+- u8 allowed_vlan_pris;
+- u8 evb_mode;
+- #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
+- #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
+- #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
+- u8 unused_2;
+- __le16 num_mcast_filters;
++ #define FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_LAST FUNC_CFG_REQ_VLAN_ANTISPOOF_MODE_INSERT_OR_OVERRIDE_VLAN
++ u8 allowed_vlan_pris;
++ u8 evb_mode;
++ #define FUNC_CFG_REQ_EVB_MODE_NO_EVB 0x0UL
++ #define FUNC_CFG_REQ_EVB_MODE_VEB 0x1UL
++ #define FUNC_CFG_REQ_EVB_MODE_VEPA 0x2UL
++ #define FUNC_CFG_REQ_EVB_MODE_LAST FUNC_CFG_REQ_EVB_MODE_VEPA
++ u8 cache_linesize;
++ #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_64 0x0UL
++ #define FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128 0x1UL
++ #define FUNC_CFG_REQ_CACHE_LINESIZE_LAST FUNC_CFG_REQ_CACHE_LINESIZE_CACHE_LINESIZE_128
++ __le16 num_mcast_filters;
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_func_cfg_output (size:128b/16B) */
+ struct hwrm_func_cfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_func_qstats */
+-/* Input (24 bytes) */
++/* hwrm_func_qstats_input (size:192b/24B) */
+ struct hwrm_func_qstats_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 fid;
+- __le16 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 fid;
++ u8 unused_0[6];
+ };
+
+-/* Output (176 bytes) */
++/* hwrm_func_qstats_output (size:1408b/176B) */
+ struct hwrm_func_qstats_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le64 tx_ucast_pkts;
+- __le64 tx_mcast_pkts;
+- __le64 tx_bcast_pkts;
+- __le64 tx_discard_pkts;
+- __le64 tx_drop_pkts;
+- __le64 tx_ucast_bytes;
+- __le64 tx_mcast_bytes;
+- __le64 tx_bcast_bytes;
+- __le64 rx_ucast_pkts;
+- __le64 rx_mcast_pkts;
+- __le64 rx_bcast_pkts;
+- __le64 rx_discard_pkts;
+- __le64 rx_drop_pkts;
+- __le64 rx_ucast_bytes;
+- __le64 rx_mcast_bytes;
+- __le64 rx_bcast_bytes;
+- __le64 rx_agg_pkts;
+- __le64 rx_agg_bytes;
+- __le64 rx_agg_events;
+- __le64 rx_agg_aborts;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le64 tx_ucast_pkts;
++ __le64 tx_mcast_pkts;
++ __le64 tx_bcast_pkts;
++ __le64 tx_discard_pkts;
++ __le64 tx_drop_pkts;
++ __le64 tx_ucast_bytes;
++ __le64 tx_mcast_bytes;
++ __le64 tx_bcast_bytes;
++ __le64 rx_ucast_pkts;
++ __le64 rx_mcast_pkts;
++ __le64 rx_bcast_pkts;
++ __le64 rx_discard_pkts;
++ __le64 rx_drop_pkts;
++ __le64 rx_ucast_bytes;
++ __le64 rx_mcast_bytes;
++ __le64 rx_bcast_bytes;
++ __le64 rx_agg_pkts;
++ __le64 rx_agg_bytes;
++ __le64 rx_agg_events;
++ __le64 rx_agg_aborts;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_func_clr_stats */
+-/* Input (24 bytes) */
++/* hwrm_func_clr_stats_input (size:192b/24B) */
+ struct hwrm_func_clr_stats_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 fid;
+- __le16 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 fid;
++ u8 unused_0[6];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_func_clr_stats_output (size:128b/16B) */
+ struct hwrm_func_clr_stats_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_func_vf_resc_free */
+-/* Input (24 bytes) */
++/* hwrm_func_vf_resc_free_input (size:192b/24B) */
+ struct hwrm_func_vf_resc_free_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 vf_id;
+- __le16 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 vf_id;
++ u8 unused_0[6];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_func_vf_resc_free_output (size:128b/16B) */
+ struct hwrm_func_vf_resc_free_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_func_vf_vnic_ids_query */
+-/* Input (32 bytes) */
++/* hwrm_func_vf_vnic_ids_query_input (size:256b/32B) */
+ struct hwrm_func_vf_vnic_ids_query_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 vf_id;
+- u8 unused_0;
+- u8 unused_1;
+- __le32 max_vnic_id_cnt;
+- __le64 vnic_id_tbl_addr;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 vf_id;
++ u8 unused_0[2];
++ __le32 max_vnic_id_cnt;
++ __le64 vnic_id_tbl_addr;
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_func_vf_vnic_ids_query_output (size:128b/16B) */
+ struct hwrm_func_vf_vnic_ids_query_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 vnic_id_cnt;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le32 vnic_id_cnt;
++ u8 unused_0[3];
++ u8 valid;
+ };
+
+-/* hwrm_func_drv_rgtr */
+-/* Input (80 bytes) */
++/* hwrm_func_drv_rgtr_input (size:832b/104B) */
+ struct hwrm_func_drv_rgtr_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
+- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
+- #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
+- __le32 enables;
+- #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
+- #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
+- #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
+- #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
+- #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
+- __le16 os_type;
+- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
+- #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
+- #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
+- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
+- #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
+- #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
+- #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
+- #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
+- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
+- #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
+- #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
+- u8 ver_maj;
+- u8 ver_min;
+- u8 ver_upd;
+- u8 unused_0;
+- __le16 unused_1;
+- __le32 timestamp;
+- __le32 unused_2;
+- __le32 vf_req_fwd[8];
+- __le32 async_event_fwd[8];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
++ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_ALL_MODE 0x1UL
++ #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL
++ __le32 enables;
++ #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
++ #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
++ #define FUNC_DRV_RGTR_REQ_ENABLES_TIMESTAMP 0x4UL
++ #define FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD 0x8UL
++ #define FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD 0x10UL
++ __le16 os_type;
++ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN 0x0UL
++ #define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER 0x1UL
++ #define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS 0xeUL
++ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS 0x12UL
++ #define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS 0x1dUL
++ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX 0x24UL
++ #define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD 0x2aUL
++ #define FUNC_DRV_RGTR_REQ_OS_TYPE_ESXI 0x68UL
++ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN864 0x73UL
++ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WIN2012R2 0x74UL
++ #define FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI 0x8000UL
++ #define FUNC_DRV_RGTR_REQ_OS_TYPE_LAST FUNC_DRV_RGTR_REQ_OS_TYPE_UEFI
++ u8 ver_maj;
++ u8 ver_min;
++ u8 ver_upd;
++ u8 unused_0[3];
++ __le32 timestamp;
++ u8 unused_1[4];
++ __le32 vf_req_fwd[8];
++ __le32 async_event_fwd[8];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_func_drv_rgtr_output (size:128b/16B) */
+ struct hwrm_func_drv_rgtr_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_func_drv_unrgtr */
+-/* Input (24 bytes) */
++/* hwrm_func_drv_unrgtr_input (size:192b/24B) */
+ struct hwrm_func_drv_unrgtr_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
+ #define FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN 0x1UL
+- __le32 unused_0;
++ u8 unused_0[4];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_func_drv_unrgtr_output (size:128b/16B) */
+ struct hwrm_func_drv_unrgtr_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_func_buf_rgtr */
+-/* Input (128 bytes) */
++/* hwrm_func_buf_rgtr_input (size:1024b/128B) */
+ struct hwrm_func_buf_rgtr_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 enables;
+- #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
+- #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
+- __le16 vf_id;
+- __le16 req_buf_num_pages;
+- __le16 req_buf_page_size;
+- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
+- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
+- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
+- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
+- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
+- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
+- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
+- __le16 req_buf_len;
+- __le16 resp_buf_len;
+- u8 unused_0;
+- u8 unused_1;
+- __le64 req_buf_page_addr0;
+- __le64 req_buf_page_addr1;
+- __le64 req_buf_page_addr2;
+- __le64 req_buf_page_addr3;
+- __le64 req_buf_page_addr4;
+- __le64 req_buf_page_addr5;
+- __le64 req_buf_page_addr6;
+- __le64 req_buf_page_addr7;
+- __le64 req_buf_page_addr8;
+- __le64 req_buf_page_addr9;
+- __le64 error_buf_addr;
+- __le64 resp_buf_addr;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 enables;
++ #define FUNC_BUF_RGTR_REQ_ENABLES_VF_ID 0x1UL
++ #define FUNC_BUF_RGTR_REQ_ENABLES_ERR_BUF_ADDR 0x2UL
++ __le16 vf_id;
++ __le16 req_buf_num_pages;
++ __le16 req_buf_page_size;
++ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_16B 0x4UL
++ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K 0xcUL
++ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K 0xdUL
++ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K 0x10UL
++ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M 0x15UL
++ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M 0x16UL
++ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G 0x1eUL
++ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_LAST FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G
++ __le16 req_buf_len;
++ __le16 resp_buf_len;
++ u8 unused_0[2];
++ __le64 req_buf_page_addr0;
++ __le64 req_buf_page_addr1;
++ __le64 req_buf_page_addr2;
++ __le64 req_buf_page_addr3;
++ __le64 req_buf_page_addr4;
++ __le64 req_buf_page_addr5;
++ __le64 req_buf_page_addr6;
++ __le64 req_buf_page_addr7;
++ __le64 req_buf_page_addr8;
++ __le64 req_buf_page_addr9;
++ __le64 error_buf_addr;
++ __le64 resp_buf_addr;
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_func_buf_rgtr_output (size:128b/16B) */
+ struct hwrm_func_buf_rgtr_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_func_drv_qver */
+-/* Input (24 bytes) */
++/* hwrm_func_drv_qver_input (size:192b/24B) */
+ struct hwrm_func_drv_qver_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 reserved;
+- __le16 fid;
+- __le16 unused_0;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 reserved;
++ __le16 fid;
++ u8 unused_0[2];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_func_drv_qver_output (size:128b/16B) */
+ struct hwrm_func_drv_qver_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le16 os_type;
+- #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
+- #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
+- #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
+- #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
+- #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
+- #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
+- #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
+- #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
+- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
+- #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
+- #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
+- u8 ver_maj;
+- u8 ver_min;
+- u8 ver_upd;
+- u8 unused_0;
+- u8 unused_1;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le16 os_type;
++ #define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN 0x0UL
++ #define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER 0x1UL
++ #define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS 0xeUL
++ #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS 0x12UL
++ #define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS 0x1dUL
++ #define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX 0x24UL
++ #define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD 0x2aUL
++ #define FUNC_DRV_QVER_RESP_OS_TYPE_ESXI 0x68UL
++ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN864 0x73UL
++ #define FUNC_DRV_QVER_RESP_OS_TYPE_WIN2012R2 0x74UL
++ #define FUNC_DRV_QVER_RESP_OS_TYPE_UEFI 0x8000UL
++ #define FUNC_DRV_QVER_RESP_OS_TYPE_LAST FUNC_DRV_QVER_RESP_OS_TYPE_UEFI
++ u8 ver_maj;
++ u8 ver_min;
++ u8 ver_upd;
++ u8 unused_0[2];
++ u8 valid;
++};
++
++/* hwrm_func_resource_qcaps_input (size:192b/24B) */
++struct hwrm_func_resource_qcaps_input {
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 fid;
++ u8 unused_0[6];
++};
++
++/* hwrm_func_resource_qcaps_output (size:384b/48B) */
++struct hwrm_func_resource_qcaps_output {
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le16 max_vfs;
++ __le16 max_msix;
++ __le16 vf_reservation_strategy;
++ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MAXIMAL 0x0UL
++ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL 0x1UL
++ #define FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_LAST FUNC_RESOURCE_QCAPS_RESP_VF_RESERVATION_STRATEGY_MINIMAL
++ __le16 min_rsscos_ctx;
++ __le16 max_rsscos_ctx;
++ __le16 min_cmpl_rings;
++ __le16 max_cmpl_rings;
++ __le16 min_tx_rings;
++ __le16 max_tx_rings;
++ __le16 min_rx_rings;
++ __le16 max_rx_rings;
++ __le16 min_l2_ctxs;
++ __le16 max_l2_ctxs;
++ __le16 min_vnics;
++ __le16 max_vnics;
++ __le16 min_stat_ctx;
++ __le16 max_stat_ctx;
++ __le16 min_hw_ring_grps;
++ __le16 max_hw_ring_grps;
++ u8 unused_0;
++ u8 valid;
++};
++
++/* hwrm_func_vf_resource_cfg_input (size:448b/56B) */
++struct hwrm_func_vf_resource_cfg_input {
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 vf_id;
++ __le16 max_msix;
++ __le16 min_rsscos_ctx;
++ __le16 max_rsscos_ctx;
++ __le16 min_cmpl_rings;
++ __le16 max_cmpl_rings;
++ __le16 min_tx_rings;
++ __le16 max_tx_rings;
++ __le16 min_rx_rings;
++ __le16 max_rx_rings;
++ __le16 min_l2_ctxs;
++ __le16 max_l2_ctxs;
++ __le16 min_vnics;
++ __le16 max_vnics;
++ __le16 min_stat_ctx;
++ __le16 max_stat_ctx;
++ __le16 min_hw_ring_grps;
++ __le16 max_hw_ring_grps;
++ u8 unused_0[4];
++};
++
++/* hwrm_func_vf_resource_cfg_output (size:256b/32B) */
++struct hwrm_func_vf_resource_cfg_output {
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le16 reserved_rsscos_ctx;
++ __le16 reserved_cmpl_rings;
++ __le16 reserved_tx_rings;
++ __le16 reserved_rx_rings;
++ __le16 reserved_l2_ctxs;
++ __le16 reserved_vnics;
++ __le16 reserved_stat_ctx;
++ __le16 reserved_hw_ring_grps;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_port_phy_cfg */
+-/* Input (56 bytes) */
++/* hwrm_port_phy_cfg_input (size:448b/56B) */
+ struct hwrm_port_phy_cfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
+- #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+- #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
+- #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+- #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+- #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+- #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
+- #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+- __le32 enables;
+- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
+- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
+- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
+- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
+- #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
+- #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
+- #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
+- #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
+- #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+- #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
+- #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+- __le16 port_id;
+- __le16 force_link_speed;
+- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
+- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
+- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
+- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
+- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
+- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
+- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
+- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
+- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
+- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
+- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
+- u8 auto_mode;
+- #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
+- #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
+- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
+- #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
+- #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
+- u8 auto_duplex;
+- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
+- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
+- #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
+- u8 auto_pause;
+- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
+- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
+- #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+- u8 unused_0;
+- __le16 auto_link_speed;
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
+- __le16 auto_link_speed_mask;
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+- #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+- u8 wirespeed;
+- #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
+- #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
+- u8 lpbk;
+- #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
+- #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
+- #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
+- u8 force_pause;
+- #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
+- #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
+- u8 unused_1;
+- __le32 preemphasis;
+- __le16 eee_link_speed_mask;
+- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
+- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
+- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+- #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
+- u8 unused_2;
+- u8 unused_3;
+- __le32 tx_lpi_timer;
+- __le32 unused_4;
+- #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
+- #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
++ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
++ #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
++ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
++ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
++ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
++ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
++ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
++ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
++ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
++ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
++ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
++ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
++ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
++ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
++ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
++ __le32 enables;
++ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
++ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
++ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE 0x4UL
++ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED 0x8UL
++ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK 0x10UL
++ #define PORT_PHY_CFG_REQ_ENABLES_WIRESPEED 0x20UL
++ #define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
++ #define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
++ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
++ #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
++ #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
++ __le16 port_id;
++ __le16 force_link_speed;
++ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
++ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB 0xaUL
++ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2GB 0x14UL
++ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB 0x19UL
++ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB 0x64UL
++ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB 0xc8UL
++ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB 0xfaUL
++ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB 0x190UL
++ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB 0x1f4UL
++ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB 0x3e8UL
++ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB 0xffffUL
++ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB
++ u8 auto_mode;
++ #define PORT_PHY_CFG_REQ_AUTO_MODE_NONE 0x0UL
++ #define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS 0x1UL
++ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED 0x2UL
++ #define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW 0x3UL
++ #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK 0x4UL
++ #define PORT_PHY_CFG_REQ_AUTO_MODE_LAST PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK
++ u8 auto_duplex;
++ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF 0x0UL
++ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL 0x1UL
++ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH 0x2UL
++ #define PORT_PHY_CFG_REQ_AUTO_DUPLEX_LAST PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH
++ u8 auto_pause;
++ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
++ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
++ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
++ u8 unused_0;
++ __le16 auto_link_speed;
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB 0x1UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB 0xaUL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2GB 0x14UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB 0x19UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB 0x64UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB 0xc8UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB 0xfaUL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB 0x190UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB 0x1f4UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB 0x3e8UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB 0xffffUL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_LAST PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB
++ __le16 auto_link_speed_mask;
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB 0x8UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2GB 0x10UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB 0x40UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_20GB 0x80UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
++ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
++ u8 wirespeed;
++ #define PORT_PHY_CFG_REQ_WIRESPEED_OFF 0x0UL
++ #define PORT_PHY_CFG_REQ_WIRESPEED_ON 0x1UL
++ #define PORT_PHY_CFG_REQ_WIRESPEED_LAST PORT_PHY_CFG_REQ_WIRESPEED_ON
++ u8 lpbk;
++ #define PORT_PHY_CFG_REQ_LPBK_NONE 0x0UL
++ #define PORT_PHY_CFG_REQ_LPBK_LOCAL 0x1UL
++ #define PORT_PHY_CFG_REQ_LPBK_REMOTE 0x2UL
++ #define PORT_PHY_CFG_REQ_LPBK_LAST PORT_PHY_CFG_REQ_LPBK_REMOTE
++ u8 force_pause;
++ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_TX 0x1UL
++ #define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
++ u8 unused_1;
++ __le32 preemphasis;
++ __le16 eee_link_speed_mask;
++ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
++ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
++ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
++ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
++ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
++ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
++ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
++ u8 unused_2[2];
++ __le32 tx_lpi_timer;
++ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
++ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
++ __le32 unused_3;
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_port_phy_cfg_output (size:128b/16B) */
+ struct hwrm_port_phy_cfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_port_phy_qcfg */
+-/* Input (24 bytes) */
++/* hwrm_port_phy_qcfg_input (size:192b/24B) */
+ struct hwrm_port_phy_qcfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 port_id;
+- __le16 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 port_id;
++ u8 unused_0[6];
+ };
+
+-/* Output (96 bytes) */
++/* hwrm_port_phy_qcfg_output (size:768b/96B) */
+ struct hwrm_port_phy_qcfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- u8 link;
+- #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
+- #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
+- #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
+- u8 unused_0;
+- __le16 link_speed;
+- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
+- #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
+- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
+- #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
+- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
+- #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
+- #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
+- #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
+- #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
+- #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
+- #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
+- u8 duplex_cfg;
+- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
+- #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
+- u8 pause;
+- #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
+- #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
+- __le16 support_speeds;
+- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
+- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
+- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
+- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
+- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
+- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
+- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
+- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
+- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
+- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
+- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
+- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
+- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
+- #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
+- __le16 force_link_speed;
+- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
+- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
+- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
+- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
+- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
+- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
+- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
+- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
+- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
+- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
+- #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
+- u8 auto_mode;
+- #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
+- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
+- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
+- #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
+- #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
+- u8 auto_pause;
+- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
+- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
+- #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
+- __le16 auto_link_speed;
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
+- __le16 auto_link_speed_mask;
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+- #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
+- u8 wirespeed;
+- #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
+- #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
+- u8 lpbk;
+- #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
+- #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
+- #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
+- u8 force_pause;
+- #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
+- #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
+- u8 module_status;
+- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
+- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
+- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
+- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
+- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
+- #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
+- __le32 preemphasis;
+- u8 phy_maj;
+- u8 phy_min;
+- u8 phy_bld;
+- u8 phy_type;
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
+- #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
+- u8 media_type;
+- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
+- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
+- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
+- #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
+- u8 xcvr_pkg_type;
+- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
+- #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
+- u8 eee_config_phy_addr;
+- #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
+- #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
+- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
+- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
+- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
+- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
+- #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
+- u8 parallel_detect;
+- #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
+- #define PORT_PHY_QCFG_RESP_RESERVED_MASK 0xfeUL
+- #define PORT_PHY_QCFG_RESP_RESERVED_SFT 1
+- __le16 link_partner_adv_speeds;
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
+- u8 link_partner_adv_auto_mode;
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 link;
++ #define PORT_PHY_QCFG_RESP_LINK_NO_LINK 0x0UL
++ #define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
++ #define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
++ #define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
++ u8 unused_0;
++ __le16 link_speed;
++ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
++ #define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
++ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2GB 0x14UL
++ #define PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB 0x19UL
++ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10GB 0x64UL
++ #define PORT_PHY_QCFG_RESP_LINK_SPEED_20GB 0xc8UL
++ #define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB 0xfaUL
++ #define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB 0x190UL
++ #define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
++ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
++ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
++ #define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
++ u8 duplex_cfg;
++ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
++ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
++ #define PORT_PHY_QCFG_RESP_DUPLEX_CFG_LAST PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL
++ u8 pause;
++ #define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
++ #define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
++ __le16 support_speeds;
++ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MBHD 0x1UL
++ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB 0x2UL
++ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GBHD 0x4UL
++ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB 0x8UL
++ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB 0x10UL
++ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2_5GB 0x20UL
++ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10GB 0x40UL
++ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_20GB 0x80UL
++ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
++ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
++ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
++ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
++ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
++ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
++ __le16 force_link_speed;
++ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB 0x1UL
++ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB 0xaUL
++ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2GB 0x14UL
++ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_2_5GB 0x19UL
++ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10GB 0x64UL
++ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_20GB 0xc8UL
++ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB 0xfaUL
++ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB 0x190UL
++ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB 0x1f4UL
++ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB 0x3e8UL
++ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB 0xffffUL
++ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB
++ u8 auto_mode;
++ #define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE 0x0UL
++ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS 0x1UL
++ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED 0x2UL
++ #define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW 0x3UL
++ #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK 0x4UL
++ #define PORT_PHY_QCFG_RESP_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
++ u8 auto_pause;
++ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
++ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
++ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
++ __le16 auto_link_speed;
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB 0x1UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB 0xaUL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2GB 0x14UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_2_5GB 0x19UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10GB 0x64UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_20GB 0xc8UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB 0xfaUL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB 0x190UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB 0x1f4UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB 0x3e8UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB 0xffffUL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB
++ __le16 auto_link_speed_mask;
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GBHD 0x4UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_1GB 0x8UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2GB 0x10UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_2_5GB 0x20UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10GB 0x40UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_20GB 0x80UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
++ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
++ u8 wirespeed;
++ #define PORT_PHY_QCFG_RESP_WIRESPEED_OFF 0x0UL
++ #define PORT_PHY_QCFG_RESP_WIRESPEED_ON 0x1UL
++ #define PORT_PHY_QCFG_RESP_WIRESPEED_LAST PORT_PHY_QCFG_RESP_WIRESPEED_ON
++ u8 lpbk;
++ #define PORT_PHY_QCFG_RESP_LPBK_NONE 0x0UL
++ #define PORT_PHY_QCFG_RESP_LPBK_LOCAL 0x1UL
++ #define PORT_PHY_QCFG_RESP_LPBK_REMOTE 0x2UL
++ #define PORT_PHY_QCFG_RESP_LPBK_LAST PORT_PHY_QCFG_RESP_LPBK_REMOTE
++ u8 force_pause;
++ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
++ #define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
++ u8 module_status;
++ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE 0x0UL
++ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX 0x1UL
++ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG 0x2UL
++ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN 0x3UL
++ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED 0x4UL
++ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE 0xffUL
++ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_LAST PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE
++ __le32 preemphasis;
++ u8 phy_maj;
++ u8 phy_min;
++ u8 phy_bld;
++ u8 phy_type;
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN 0x0UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR 0x1UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 0x2UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR 0x3UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR 0x4UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 0x5UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX 0x6UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR 0x7UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET 0x8UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE 0x9UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY 0xaUL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L 0xbUL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S 0xcUL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N 0xdUL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR 0xeUL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4 0xfUL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4 0x10UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4 0x11UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4 0x12UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10 0x13UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4 0x14UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4 0x15UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
++ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX
++ u8 media_type;
++ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
++ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
++ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC 0x2UL
++ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE 0x3UL
++ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_LAST PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE
++ u8 xcvr_pkg_type;
++ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL 0x1UL
++ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL 0x2UL
++ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL
++ u8 eee_config_phy_addr;
++ #define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
++ #define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
++ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
++ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
++ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
++ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
++ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
++ u8 parallel_detect;
++ #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
++ __le16 link_partner_adv_speeds;
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GBHD 0x4UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_1GB 0x8UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2GB 0x10UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_2_5GB 0x20UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10GB 0x40UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_20GB 0x80UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
++ u8 link_partner_adv_auto_mode;
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE 0x0UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS 0x1UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW 0x3UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
+- u8 link_partner_adv_pause;
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
+- __le16 adv_eee_link_speed_mask;
+- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+- #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+- __le16 link_partner_adv_eee_link_speed_mask;
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+- __le32 xcvr_identifier_type_tx_lpi_timer;
+- #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
+- #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
+- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
+- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK 0x4UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_LAST PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK
++ u8 link_partner_adv_pause;
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
++ __le16 adv_eee_link_speed_mask;
++ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
++ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
++ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
++ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
++ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
++ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
++ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
++ __le16 link_partner_adv_eee_link_speed_mask;
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
++ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
++ __le32 xcvr_identifier_type_tx_lpi_timer;
++ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
++ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
++ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
++ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
+- __le16 fec_cfg;
+- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+- u8 duplex_state;
+- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
+- #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
+- u8 unused_1;
+- char phy_vendor_name[16];
+- char phy_vendor_partnumber[16];
+- __le32 unused_2;
+- u8 unused_3;
+- u8 unused_4;
+- u8 unused_5;
+- u8 valid;
++ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28
++ __le16 fec_cfg;
++ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
++ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
++ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
++ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
++ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
++ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
++ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
++ u8 duplex_state;
++ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
++ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
++ #define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
++ u8 option_flags;
++ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
++ char phy_vendor_name[16];
++ char phy_vendor_partnumber[16];
++ u8 unused_2[7];
++ u8 valid;
+ };
+
+-/* hwrm_port_mac_cfg */
+-/* Input (40 bytes) */
++/* hwrm_port_mac_cfg_input (size:320b/40B) */
+ struct hwrm_port_mac_cfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
+- #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
+- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
+- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
+- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
+- #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
+- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
+- #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
+- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
+- #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
+- #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
+- #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
+- #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
+- __le32 enables;
+- #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
+- #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
+- #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
+- #define PORT_MAC_CFG_REQ_ENABLES_RESERVED1 0x8UL
+- #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
+- #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+- #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
+- #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
+- #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
+- __le16 port_id;
+- u8 ipg;
+- u8 lpbk;
+- #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
+- #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
+- #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
+- u8 vlan_pri2cos_map_pri;
+- u8 reserved1;
+- u8 tunnel_pri2cos_map_pri;
+- u8 dscp2pri_map_pri;
+- __le16 rx_ts_capture_ptp_msg_type;
+- __le16 tx_ts_capture_ptp_msg_type;
+- u8 cos_field_cfg;
+- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
+- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
+- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
+- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
+- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
+- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
+- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
+- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
+- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
+- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
+- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
+- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
+- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
+- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
+- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
+- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
+- #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
+- u8 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
++ #define PORT_MAC_CFG_REQ_FLAGS_MATCH_LINK 0x1UL
++ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_ENABLE 0x2UL
++ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
++ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
++ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
++ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
++ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
++ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
++ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
++ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
++ #define PORT_MAC_CFG_REQ_FLAGS_VLAN_PRI2COS_DISABLE 0x400UL
++ #define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_DISABLE 0x800UL
++ #define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_DISABLE 0x1000UL
++ __le32 enables;
++ #define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
++ #define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
++ #define PORT_MAC_CFG_REQ_ENABLES_VLAN_PRI2COS_MAP_PRI 0x4UL
++ #define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
++ #define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
++ #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
++ #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
++ #define PORT_MAC_CFG_REQ_ENABLES_COS_FIELD_CFG 0x100UL
++ __le16 port_id;
++ u8 ipg;
++ u8 lpbk;
++ #define PORT_MAC_CFG_REQ_LPBK_NONE 0x0UL
++ #define PORT_MAC_CFG_REQ_LPBK_LOCAL 0x1UL
++ #define PORT_MAC_CFG_REQ_LPBK_REMOTE 0x2UL
++ #define PORT_MAC_CFG_REQ_LPBK_LAST PORT_MAC_CFG_REQ_LPBK_REMOTE
++ u8 vlan_pri2cos_map_pri;
++ u8 reserved1;
++ u8 tunnel_pri2cos_map_pri;
++ u8 dscp2pri_map_pri;
++ __le16 rx_ts_capture_ptp_msg_type;
++ __le16 tx_ts_capture_ptp_msg_type;
++ u8 cos_field_cfg;
++ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_RSVD1 0x1UL
++ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_MASK 0x6UL
++ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_SFT 1
++ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_INNERMOST (0x0UL << 1)
++ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTER (0x1UL << 1)
++ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_OUTERMOST (0x2UL << 1)
++ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 1)
++ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_VLAN_PRI_SEL_UNSPECIFIED
++ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_MASK 0x18UL
++ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_SFT 3
++ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_INNERMOST (0x0UL << 3)
++ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTER (0x1UL << 3)
++ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_OUTERMOST (0x2UL << 3)
++ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED (0x3UL << 3)
++ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_LAST PORT_MAC_CFG_REQ_COS_FIELD_CFG_T_VLAN_PRI_SEL_UNSPECIFIED
++ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_MASK 0xe0UL
++ #define PORT_MAC_CFG_REQ_COS_FIELD_CFG_DEFAULT_COS_SFT 5
++ u8 unused_0[3];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_port_mac_cfg_output (size:128b/16B) */
+ struct hwrm_port_mac_cfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le16 mru;
+- __le16 mtu;
+- u8 ipg;
+- u8 lpbk;
+- #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
+- #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
+- #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
+- u8 unused_0;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le16 mru;
++ __le16 mtu;
++ u8 ipg;
++ u8 lpbk;
++ #define PORT_MAC_CFG_RESP_LPBK_NONE 0x0UL
++ #define PORT_MAC_CFG_RESP_LPBK_LOCAL 0x1UL
++ #define PORT_MAC_CFG_RESP_LPBK_REMOTE 0x2UL
++ #define PORT_MAC_CFG_RESP_LPBK_LAST PORT_MAC_CFG_RESP_LPBK_REMOTE
++ u8 unused_0;
++ u8 valid;
+ };
+
+-/* hwrm_port_mac_ptp_qcfg */
+-/* Input (24 bytes) */
++/* hwrm_port_mac_ptp_qcfg_input (size:192b/24B) */
+ struct hwrm_port_mac_ptp_qcfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 port_id;
+- __le16 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 port_id;
++ u8 unused_0[6];
+ };
+
+-/* Output (80 bytes) */
++/* hwrm_port_mac_ptp_qcfg_output (size:640b/80B) */
+ struct hwrm_port_mac_ptp_qcfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- u8 flags;
+- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
+- #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
+- u8 unused_0;
+- __le16 unused_1;
+- __le32 rx_ts_reg_off_lower;
+- __le32 rx_ts_reg_off_upper;
+- __le32 rx_ts_reg_off_seq_id;
+- __le32 rx_ts_reg_off_src_id_0;
+- __le32 rx_ts_reg_off_src_id_1;
+- __le32 rx_ts_reg_off_src_id_2;
+- __le32 rx_ts_reg_off_domain_id;
+- __le32 rx_ts_reg_off_fifo;
+- __le32 rx_ts_reg_off_fifo_adv;
+- __le32 rx_ts_reg_off_granularity;
+- __le32 tx_ts_reg_off_lower;
+- __le32 tx_ts_reg_off_upper;
+- __le32 tx_ts_reg_off_seq_id;
+- __le32 tx_ts_reg_off_fifo;
+- __le32 tx_ts_reg_off_granularity;
+- __le32 unused_2;
+- u8 unused_3;
+- u8 unused_4;
+- u8 unused_5;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 flags;
++ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
++ #define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
++ u8 unused_0[3];
++ __le32 rx_ts_reg_off_lower;
++ __le32 rx_ts_reg_off_upper;
++ __le32 rx_ts_reg_off_seq_id;
++ __le32 rx_ts_reg_off_src_id_0;
++ __le32 rx_ts_reg_off_src_id_1;
++ __le32 rx_ts_reg_off_src_id_2;
++ __le32 rx_ts_reg_off_domain_id;
++ __le32 rx_ts_reg_off_fifo;
++ __le32 rx_ts_reg_off_fifo_adv;
++ __le32 rx_ts_reg_off_granularity;
++ __le32 tx_ts_reg_off_lower;
++ __le32 tx_ts_reg_off_upper;
++ __le32 tx_ts_reg_off_seq_id;
++ __le32 tx_ts_reg_off_fifo;
++ __le32 tx_ts_reg_off_granularity;
++ u8 unused_1[7];
++ u8 valid;
+ };
+
+-/* hwrm_port_qstats */
+-/* Input (40 bytes) */
++/* hwrm_port_qstats_input (size:320b/40B) */
+ struct hwrm_port_qstats_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 port_id;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2[3];
+- u8 unused_3;
+- __le64 tx_stat_host_addr;
+- __le64 rx_stat_host_addr;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 port_id;
++ u8 unused_0[6];
++ __le64 tx_stat_host_addr;
++ __le64 rx_stat_host_addr;
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_port_qstats_output (size:128b/16B) */
+ struct hwrm_port_qstats_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le16 tx_stat_size;
+- __le16 rx_stat_size;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le16 tx_stat_size;
++ __le16 rx_stat_size;
++ u8 unused_0[3];
++ u8 valid;
+ };
+
+-/* hwrm_port_lpbk_qstats */
+-/* Input (16 bytes) */
++/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
+ struct hwrm_port_lpbk_qstats_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
+ };
+
+-/* Output (96 bytes) */
++/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
+ struct hwrm_port_lpbk_qstats_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le64 lpbk_ucast_frames;
+- __le64 lpbk_mcast_frames;
+- __le64 lpbk_bcast_frames;
+- __le64 lpbk_ucast_bytes;
+- __le64 lpbk_mcast_bytes;
+- __le64 lpbk_bcast_bytes;
+- __le64 tx_stat_discard;
+- __le64 tx_stat_error;
+- __le64 rx_stat_discard;
+- __le64 rx_stat_error;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le64 lpbk_ucast_frames;
++ __le64 lpbk_mcast_frames;
++ __le64 lpbk_bcast_frames;
++ __le64 lpbk_ucast_bytes;
++ __le64 lpbk_mcast_bytes;
++ __le64 lpbk_bcast_bytes;
++ __le64 tx_stat_discard;
++ __le64 tx_stat_error;
++ __le64 rx_stat_discard;
++ __le64 rx_stat_error;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_port_clr_stats */
+-/* Input (24 bytes) */
++/* hwrm_port_clr_stats_input (size:192b/24B) */
+ struct hwrm_port_clr_stats_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 port_id;
+- __le16 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 port_id;
++ u8 unused_0[6];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_port_clr_stats_output (size:128b/16B) */
+ struct hwrm_port_clr_stats_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_port_lpbk_clr_stats */
+-/* Input (16 bytes) */
++/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
+ struct hwrm_port_lpbk_clr_stats_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
+ struct hwrm_port_lpbk_clr_stats_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_port_phy_qcaps */
+-/* Input (24 bytes) */
++/* hwrm_port_phy_qcaps_input (size:192b/24B) */
+ struct hwrm_port_phy_qcaps_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 port_id;
+- __le16 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 port_id;
++ u8 unused_0[6];
+ };
+
+-/* Output (24 bytes) */
++/* hwrm_port_phy_qcaps_output (size:192b/24B) */
+ struct hwrm_port_phy_qcaps_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- u8 flags;
+- #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
+- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL
+- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1
+- u8 port_cnt;
+- #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
+- #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
+- #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
+- #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
+- #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
+- __le16 supported_speeds_force_mode;
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
+- __le16 supported_speeds_auto_mode;
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
+- __le16 supported_speeds_eee_mode;
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
+- #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
+- __le32 tx_lpi_timer_low;
+- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
+- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
+- #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
+- #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
+- __le32 valid_tx_lpi_timer_high;
+- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
+- #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
+- #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
+- #define PORT_PHY_QCAPS_RESP_VALID_SFT 24
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 flags;
++ #define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
++ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL
++ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1
++ u8 port_cnt;
++ #define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
++ #define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
++ #define PORT_PHY_QCAPS_RESP_PORT_CNT_2 0x2UL
++ #define PORT_PHY_QCAPS_RESP_PORT_CNT_3 0x3UL
++ #define PORT_PHY_QCAPS_RESP_PORT_CNT_4 0x4UL
++ #define PORT_PHY_QCAPS_RESP_PORT_CNT_LAST PORT_PHY_QCAPS_RESP_PORT_CNT_4
++ __le16 supported_speeds_force_mode;
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
++ __le16 supported_speeds_auto_mode;
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
++ __le16 supported_speeds_eee_mode;
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
++ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
++ __le32 tx_lpi_timer_low;
++ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
++ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
++ #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
++ #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
++ __le32 valid_tx_lpi_timer_high;
++ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
++ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
++ #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
++ #define PORT_PHY_QCAPS_RESP_VALID_SFT 24
+ };
+
+-/* hwrm_port_phy_i2c_read */
+-/* Input (40 bytes) */
++/* hwrm_port_phy_i2c_read_input (size:320b/40B) */
+ struct hwrm_port_phy_i2c_read_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
+- __le32 enables;
+- #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
+- __le16 port_id;
+- u8 i2c_slave_addr;
+- u8 unused_0;
+- __le16 page_number;
+- __le16 page_offset;
+- u8 data_length;
+- u8 unused_1[7];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
++ __le32 enables;
++ #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
++ __le16 port_id;
++ u8 i2c_slave_addr;
++ u8 unused_0;
++ __le16 page_number;
++ __le16 page_offset;
++ u8 data_length;
++ u8 unused_1[7];
+ };
+
+-/* Output (80 bytes) */
++/* hwrm_port_phy_i2c_read_output (size:640b/80B) */
+ struct hwrm_port_phy_i2c_read_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 data[16];
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le32 data[16];
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_port_led_cfg */
+-/* Input (64 bytes) */
++/* hwrm_port_led_cfg_input (size:512b/64B) */
+ struct hwrm_port_led_cfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 enables;
+- #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
+- #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
+- __le16 port_id;
+- u8 num_leds;
+- u8 rsvd;
+- u8 led0_id;
+- u8 led0_state;
+- #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
+- #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
+- #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
+- #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
+- #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
+- u8 led0_color;
+- #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
+- #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
+- #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
+- #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
+- u8 unused_0;
+- __le16 led0_blink_on;
+- __le16 led0_blink_off;
+- u8 led0_group_id;
+- u8 rsvd0;
+- u8 led1_id;
+- u8 led1_state;
+- #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
+- #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
+- #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
+- #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
+- #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
+- u8 led1_color;
+- #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
+- #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
+- #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
+- #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
+- u8 unused_1;
+- __le16 led1_blink_on;
+- __le16 led1_blink_off;
+- u8 led1_group_id;
+- u8 rsvd1;
+- u8 led2_id;
+- u8 led2_state;
+- #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
+- #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
+- #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
+- #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
+- #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
+- u8 led2_color;
+- #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
+- #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
+- #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
+- #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
+- u8 unused_2;
+- __le16 led2_blink_on;
+- __le16 led2_blink_off;
+- u8 led2_group_id;
+- u8 rsvd2;
+- u8 led3_id;
+- u8 led3_state;
+- #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
+- #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
+- #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
+- #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
+- #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
+- u8 led3_color;
+- #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
+- #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
+- #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
+- #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
+- u8 unused_3;
+- __le16 led3_blink_on;
+- __le16 led3_blink_off;
+- u8 led3_group_id;
+- u8 rsvd3;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 enables;
++ #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
++ #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
++ __le16 port_id;
++ u8 num_leds;
++ u8 rsvd;
++ u8 led0_id;
++ u8 led0_state;
++ #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
++ #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
++ #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
++ #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
++ #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
++ #define PORT_LED_CFG_REQ_LED0_STATE_LAST PORT_LED_CFG_REQ_LED0_STATE_BLINKALT
++ u8 led0_color;
++ #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
++ #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
++ #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
++ #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
++ #define PORT_LED_CFG_REQ_LED0_COLOR_LAST PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER
++ u8 unused_0;
++ __le16 led0_blink_on;
++ __le16 led0_blink_off;
++ u8 led0_group_id;
++ u8 rsvd0;
++ u8 led1_id;
++ u8 led1_state;
++ #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
++ #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
++ #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
++ #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
++ #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
++ #define PORT_LED_CFG_REQ_LED1_STATE_LAST PORT_LED_CFG_REQ_LED1_STATE_BLINKALT
++ u8 led1_color;
++ #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
++ #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
++ #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
++ #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
++ #define PORT_LED_CFG_REQ_LED1_COLOR_LAST PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER
++ u8 unused_1;
++ __le16 led1_blink_on;
++ __le16 led1_blink_off;
++ u8 led1_group_id;
++ u8 rsvd1;
++ u8 led2_id;
++ u8 led2_state;
++ #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
++ #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
++ #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
++ #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
++ #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
++ #define PORT_LED_CFG_REQ_LED2_STATE_LAST PORT_LED_CFG_REQ_LED2_STATE_BLINKALT
++ u8 led2_color;
++ #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
++ #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
++ #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
++ #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
++ #define PORT_LED_CFG_REQ_LED2_COLOR_LAST PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER
++ u8 unused_2;
++ __le16 led2_blink_on;
++ __le16 led2_blink_off;
++ u8 led2_group_id;
++ u8 rsvd2;
++ u8 led3_id;
++ u8 led3_state;
++ #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
++ #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
++ #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
++ #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
++ #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
++ #define PORT_LED_CFG_REQ_LED3_STATE_LAST PORT_LED_CFG_REQ_LED3_STATE_BLINKALT
++ u8 led3_color;
++ #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
++ #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
++ #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
++ #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
++ #define PORT_LED_CFG_REQ_LED3_COLOR_LAST PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER
++ u8 unused_3;
++ __le16 led3_blink_on;
++ __le16 led3_blink_off;
++ u8 led3_group_id;
++ u8 rsvd3;
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_port_led_cfg_output (size:128b/16B) */
+ struct hwrm_port_led_cfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
++};
++
++/* hwrm_port_led_qcfg_input (size:192b/24B) */
++struct hwrm_port_led_qcfg_input {
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 port_id;
++ u8 unused_0[6];
++};
++
++/* hwrm_port_led_qcfg_output (size:448b/56B) */
++struct hwrm_port_led_qcfg_output {
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 num_leds;
++ u8 led0_id;
++ u8 led0_type;
++ #define PORT_LED_QCFG_RESP_LED0_TYPE_SPEED 0x0UL
++ #define PORT_LED_QCFG_RESP_LED0_TYPE_ACTIVITY 0x1UL
++ #define PORT_LED_QCFG_RESP_LED0_TYPE_INVALID 0xffUL
++ #define PORT_LED_QCFG_RESP_LED0_TYPE_LAST PORT_LED_QCFG_RESP_LED0_TYPE_INVALID
++ u8 led0_state;
++ #define PORT_LED_QCFG_RESP_LED0_STATE_DEFAULT 0x0UL
++ #define PORT_LED_QCFG_RESP_LED0_STATE_OFF 0x1UL
++ #define PORT_LED_QCFG_RESP_LED0_STATE_ON 0x2UL
++ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINK 0x3UL
++ #define PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT 0x4UL
++ #define PORT_LED_QCFG_RESP_LED0_STATE_LAST PORT_LED_QCFG_RESP_LED0_STATE_BLINKALT
++ u8 led0_color;
++ #define PORT_LED_QCFG_RESP_LED0_COLOR_DEFAULT 0x0UL
++ #define PORT_LED_QCFG_RESP_LED0_COLOR_AMBER 0x1UL
++ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREEN 0x2UL
++ #define PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER 0x3UL
++ #define PORT_LED_QCFG_RESP_LED0_COLOR_LAST PORT_LED_QCFG_RESP_LED0_COLOR_GREENAMBER
++ u8 unused_0;
++ __le16 led0_blink_on;
++ __le16 led0_blink_off;
++ u8 led0_group_id;
++ u8 led1_id;
++ u8 led1_type;
++ #define PORT_LED_QCFG_RESP_LED1_TYPE_SPEED 0x0UL
++ #define PORT_LED_QCFG_RESP_LED1_TYPE_ACTIVITY 0x1UL
++ #define PORT_LED_QCFG_RESP_LED1_TYPE_INVALID 0xffUL
++ #define PORT_LED_QCFG_RESP_LED1_TYPE_LAST PORT_LED_QCFG_RESP_LED1_TYPE_INVALID
++ u8 led1_state;
++ #define PORT_LED_QCFG_RESP_LED1_STATE_DEFAULT 0x0UL
++ #define PORT_LED_QCFG_RESP_LED1_STATE_OFF 0x1UL
++ #define PORT_LED_QCFG_RESP_LED1_STATE_ON 0x2UL
++ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINK 0x3UL
++ #define PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT 0x4UL
++ #define PORT_LED_QCFG_RESP_LED1_STATE_LAST PORT_LED_QCFG_RESP_LED1_STATE_BLINKALT
++ u8 led1_color;
++ #define PORT_LED_QCFG_RESP_LED1_COLOR_DEFAULT 0x0UL
++ #define PORT_LED_QCFG_RESP_LED1_COLOR_AMBER 0x1UL
++ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREEN 0x2UL
++ #define PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER 0x3UL
++ #define PORT_LED_QCFG_RESP_LED1_COLOR_LAST PORT_LED_QCFG_RESP_LED1_COLOR_GREENAMBER
++ u8 unused_1;
++ __le16 led1_blink_on;
++ __le16 led1_blink_off;
++ u8 led1_group_id;
++ u8 led2_id;
++ u8 led2_type;
++ #define PORT_LED_QCFG_RESP_LED2_TYPE_SPEED 0x0UL
++ #define PORT_LED_QCFG_RESP_LED2_TYPE_ACTIVITY 0x1UL
++ #define PORT_LED_QCFG_RESP_LED2_TYPE_INVALID 0xffUL
++ #define PORT_LED_QCFG_RESP_LED2_TYPE_LAST PORT_LED_QCFG_RESP_LED2_TYPE_INVALID
++ u8 led2_state;
++ #define PORT_LED_QCFG_RESP_LED2_STATE_DEFAULT 0x0UL
++ #define PORT_LED_QCFG_RESP_LED2_STATE_OFF 0x1UL
++ #define PORT_LED_QCFG_RESP_LED2_STATE_ON 0x2UL
++ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINK 0x3UL
++ #define PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT 0x4UL
++ #define PORT_LED_QCFG_RESP_LED2_STATE_LAST PORT_LED_QCFG_RESP_LED2_STATE_BLINKALT
++ u8 led2_color;
++ #define PORT_LED_QCFG_RESP_LED2_COLOR_DEFAULT 0x0UL
++ #define PORT_LED_QCFG_RESP_LED2_COLOR_AMBER 0x1UL
++ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREEN 0x2UL
++ #define PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER 0x3UL
++ #define PORT_LED_QCFG_RESP_LED2_COLOR_LAST PORT_LED_QCFG_RESP_LED2_COLOR_GREENAMBER
++ u8 unused_2;
++ __le16 led2_blink_on;
++ __le16 led2_blink_off;
++ u8 led2_group_id;
++ u8 led3_id;
++ u8 led3_type;
++ #define PORT_LED_QCFG_RESP_LED3_TYPE_SPEED 0x0UL
++ #define PORT_LED_QCFG_RESP_LED3_TYPE_ACTIVITY 0x1UL
++ #define PORT_LED_QCFG_RESP_LED3_TYPE_INVALID 0xffUL
++ #define PORT_LED_QCFG_RESP_LED3_TYPE_LAST PORT_LED_QCFG_RESP_LED3_TYPE_INVALID
++ u8 led3_state;
++ #define PORT_LED_QCFG_RESP_LED3_STATE_DEFAULT 0x0UL
++ #define PORT_LED_QCFG_RESP_LED3_STATE_OFF 0x1UL
++ #define PORT_LED_QCFG_RESP_LED3_STATE_ON 0x2UL
++ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINK 0x3UL
++ #define PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT 0x4UL
++ #define PORT_LED_QCFG_RESP_LED3_STATE_LAST PORT_LED_QCFG_RESP_LED3_STATE_BLINKALT
++ u8 led3_color;
++ #define PORT_LED_QCFG_RESP_LED3_COLOR_DEFAULT 0x0UL
++ #define PORT_LED_QCFG_RESP_LED3_COLOR_AMBER 0x1UL
++ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREEN 0x2UL
++ #define PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER 0x3UL
++ #define PORT_LED_QCFG_RESP_LED3_COLOR_LAST PORT_LED_QCFG_RESP_LED3_COLOR_GREENAMBER
++ u8 unused_3;
++ __le16 led3_blink_on;
++ __le16 led3_blink_off;
++ u8 led3_group_id;
++ u8 unused_4[6];
++ u8 valid;
+ };
+
+-/* hwrm_port_led_qcaps */
+-/* Input (24 bytes) */
++/* hwrm_port_led_qcaps_input (size:192b/24B) */
+ struct hwrm_port_led_qcaps_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 port_id;
+- __le16 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 port_id;
++ u8 unused_0[6];
+ };
+
+-/* Output (48 bytes) */
++/* hwrm_port_led_qcaps_output (size:384b/48B) */
+ struct hwrm_port_led_qcaps_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- u8 num_leds;
+- u8 unused_0[3];
+- u8 led0_id;
+- u8 led0_type;
+- #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
+- #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
+- #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
+- u8 led0_group_id;
+- u8 unused_1;
+- __le16 led0_state_caps;
+- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
+- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
+- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
+- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+- #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+- __le16 led0_color_caps;
+- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
+- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+- #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+- u8 led1_id;
+- u8 led1_type;
+- #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
+- #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
+- #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
+- u8 led1_group_id;
+- u8 unused_2;
+- __le16 led1_state_caps;
+- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
+- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
+- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
+- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+- #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+- __le16 led1_color_caps;
+- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
+- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+- #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+- u8 led2_id;
+- u8 led2_type;
+- #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
+- #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
+- #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
+- u8 led2_group_id;
+- u8 unused_3;
+- __le16 led2_state_caps;
+- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
+- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
+- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
+- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+- #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+- __le16 led2_color_caps;
+- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
+- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+- #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+- u8 led3_id;
+- u8 led3_type;
+- #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
+- #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
+- #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
+- u8 led3_group_id;
+- u8 unused_4;
+- __le16 led3_state_caps;
+- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
+- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
+- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
+- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+- #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+- __le16 led3_color_caps;
+- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
+- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+- #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+- u8 unused_5;
+- u8 unused_6;
+- u8 unused_7;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 num_leds;
++ u8 unused[3];
++ u8 led0_id;
++ u8 led0_type;
++ #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
++ #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
++ #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
++ #define PORT_LED_QCAPS_RESP_LED0_TYPE_LAST PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID
++ u8 led0_group_id;
++ u8 unused_0;
++ __le16 led0_state_caps;
++ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
++ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
++ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
++ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
++ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
++ __le16 led0_color_caps;
++ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
++ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
++ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
++ u8 led1_id;
++ u8 led1_type;
++ #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
++ #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
++ #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
++ #define PORT_LED_QCAPS_RESP_LED1_TYPE_LAST PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID
++ u8 led1_group_id;
++ u8 unused_1;
++ __le16 led1_state_caps;
++ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
++ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
++ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
++ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
++ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
++ __le16 led1_color_caps;
++ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
++ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
++ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
++ u8 led2_id;
++ u8 led2_type;
++ #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
++ #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
++ #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
++ #define PORT_LED_QCAPS_RESP_LED2_TYPE_LAST PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID
++ u8 led2_group_id;
++ u8 unused_2;
++ __le16 led2_state_caps;
++ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
++ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
++ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
++ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
++ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
++ __le16 led2_color_caps;
++ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
++ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
++ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
++ u8 led3_id;
++ u8 led3_type;
++ #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
++ #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
++ #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
++ #define PORT_LED_QCAPS_RESP_LED3_TYPE_LAST PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID
++ u8 led3_group_id;
++ u8 unused_3;
++ __le16 led3_state_caps;
++ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
++ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
++ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
++ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
++ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
++ __le16 led3_color_caps;
++ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
++ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
++ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
++ u8 unused_4[3];
++ u8 valid;
+ };
+
+-/* hwrm_queue_qportcfg */
+-/* Input (24 bytes) */
++/* hwrm_queue_qportcfg_input (size:192b/24B) */
+ struct hwrm_queue_qportcfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
+- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
+- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
+- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
+- #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
+- __le16 port_id;
+- __le16 unused_0;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
++ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
++ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX 0x0UL
++ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX 0x1UL
++ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
++ __le16 port_id;
++ u8 unused_0[2];
+ };
+
+-/* Output (32 bytes) */
++/* hwrm_queue_qportcfg_output (size:256b/32B) */
+ struct hwrm_queue_qportcfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- u8 max_configurable_queues;
+- u8 max_configurable_lossless_queues;
+- u8 queue_cfg_allowed;
+- u8 queue_cfg_info;
+- #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+- u8 queue_pfcenable_cfg_allowed;
+- u8 queue_pri2cos_cfg_allowed;
+- u8 queue_cos2bw_cfg_allowed;
+- u8 queue_id0;
+- u8 queue_id0_service_profile;
+- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 max_configurable_queues;
++ u8 max_configurable_lossless_queues;
++ u8 queue_cfg_allowed;
++ u8 queue_cfg_info;
++ #define QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
++ u8 queue_pfcenable_cfg_allowed;
++ u8 queue_pri2cos_cfg_allowed;
++ u8 queue_cos2bw_cfg_allowed;
++ u8 queue_id0;
++ u8 queue_id0_service_profile;
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS 0x1UL
+- #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
+- u8 queue_id1;
+- u8 queue_id1_service_profile;
+- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN 0xffUL
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN
++ u8 queue_id1;
++ u8 queue_id1_service_profile;
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS 0x1UL
+- #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
+- u8 queue_id2;
+- u8 queue_id2_service_profile;
+- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN 0xffUL
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN
++ u8 queue_id2;
++ u8 queue_id2_service_profile;
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS 0x1UL
+- #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
+- u8 queue_id3;
+- u8 queue_id3_service_profile;
+- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN 0xffUL
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN
++ u8 queue_id3;
++ u8 queue_id3_service_profile;
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS 0x1UL
+- #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
+- u8 queue_id4;
+- u8 queue_id4_service_profile;
+- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN 0xffUL
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN
++ u8 queue_id4;
++ u8 queue_id4_service_profile;
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS 0x1UL
+- #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
+- u8 queue_id5;
+- u8 queue_id5_service_profile;
+- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN 0xffUL
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN
++ u8 queue_id5;
++ u8 queue_id5_service_profile;
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS 0x1UL
+- #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
+- u8 queue_id6;
+- u8 queue_id6_service_profile;
+- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN 0xffUL
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN
++ u8 queue_id6;
++ u8 queue_id6_service_profile;
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS 0x1UL
+- #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
+- u8 queue_id7;
+- u8 queue_id7_service_profile;
+- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN 0xffUL
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN
++ u8 queue_id7;
++ u8 queue_id7_service_profile;
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSY 0x0UL
+ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS 0x1UL
+- #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
+- u8 valid;
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN 0xffUL
++ #define QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_LAST QUEUE_QPORTCFG_RESP_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN
++ u8 valid;
+ };
+
+-/* hwrm_queue_cfg */
+-/* Input (40 bytes) */
++/* hwrm_queue_cfg_input (size:320b/40B) */
+ struct hwrm_queue_cfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
+- #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+- #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
+- #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
+- #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
+- #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
+- #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
+- __le32 enables;
+- #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
+- #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
+- __le32 queue_id;
+- __le32 dflt_len;
+- u8 service_profile;
+- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
+- #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
+- #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
+- u8 unused_0[7];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
++ #define QUEUE_CFG_REQ_FLAGS_PATH_MASK 0x3UL
++ #define QUEUE_CFG_REQ_FLAGS_PATH_SFT 0
++ #define QUEUE_CFG_REQ_FLAGS_PATH_TX 0x0UL
++ #define QUEUE_CFG_REQ_FLAGS_PATH_RX 0x1UL
++ #define QUEUE_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
++ #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_BIDIR
++ __le32 enables;
++ #define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
++ #define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
++ __le32 queue_id;
++ __le32 dflt_len;
++ u8 service_profile;
++ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSY 0x0UL
++ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LOSSLESS 0x1UL
++ #define QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN 0xffUL
++ #define QUEUE_CFG_REQ_SERVICE_PROFILE_LAST QUEUE_CFG_REQ_SERVICE_PROFILE_UNKNOWN
++ u8 unused_0[7];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_queue_cfg_output (size:128b/16B) */
+ struct hwrm_queue_cfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_queue_pfcenable_qcfg */
+-/* Input (24 bytes) */
++/* hwrm_queue_pfcenable_qcfg_input (size:192b/24B) */
+ struct hwrm_queue_pfcenable_qcfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 port_id;
+- __le16 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 port_id;
++ u8 unused_0[6];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_queue_pfcenable_qcfg_output (size:128b/16B) */
+ struct hwrm_queue_pfcenable_qcfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 flags;
+- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
+- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
+- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
+- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
+- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
+- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
+- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
+- #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le32 flags;
++ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI0_PFC_ENABLED 0x1UL
++ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI1_PFC_ENABLED 0x2UL
++ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI2_PFC_ENABLED 0x4UL
++ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI3_PFC_ENABLED 0x8UL
++ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI4_PFC_ENABLED 0x10UL
++ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI5_PFC_ENABLED 0x20UL
++ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI6_PFC_ENABLED 0x40UL
++ #define QUEUE_PFCENABLE_QCFG_RESP_FLAGS_PRI7_PFC_ENABLED 0x80UL
++ u8 unused_0[3];
++ u8 valid;
+ };
+
+-/* hwrm_queue_pfcenable_cfg */
+-/* Input (24 bytes) */
++/* hwrm_queue_pfcenable_cfg_input (size:192b/24B) */
+ struct hwrm_queue_pfcenable_cfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI0_PFC_ENABLED 0x1UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI1_PFC_ENABLED 0x2UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI2_PFC_ENABLED 0x4UL
+@@ -2440,1729 +2703,1664 @@ struct hwrm_queue_pfcenable_cfg_input {
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI5_PFC_ENABLED 0x20UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI6_PFC_ENABLED 0x40UL
+ #define QUEUE_PFCENABLE_CFG_REQ_FLAGS_PRI7_PFC_ENABLED 0x80UL
+- __le16 port_id;
+- __le16 unused_0;
++ __le16 port_id;
++ u8 unused_0[2];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_queue_pfcenable_cfg_output (size:128b/16B) */
+ struct hwrm_queue_pfcenable_cfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_queue_pri2cos_qcfg */
+-/* Input (24 bytes) */
++/* hwrm_queue_pri2cos_qcfg_input (size:192b/24B) */
+ struct hwrm_queue_pri2cos_qcfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
+- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
+- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
+- #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
+- u8 port_id;
+- u8 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
++ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH 0x1UL
++ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_TX 0x0UL
++ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX 0x1UL
++ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_QCFG_REQ_FLAGS_PATH_RX
++ #define QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN 0x2UL
++ u8 port_id;
++ u8 unused_0[3];
+ };
+
+-/* Output (24 bytes) */
++/* hwrm_queue_pri2cos_qcfg_output (size:192b/24B) */
+ struct hwrm_queue_pri2cos_qcfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- u8 pri0_cos_queue_id;
+- u8 pri1_cos_queue_id;
+- u8 pri2_cos_queue_id;
+- u8 pri3_cos_queue_id;
+- u8 pri4_cos_queue_id;
+- u8 pri5_cos_queue_id;
+- u8 pri6_cos_queue_id;
+- u8 pri7_cos_queue_id;
+- u8 queue_cfg_info;
+- #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
+- u8 unused_0;
+- __le16 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 unused_4;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 pri0_cos_queue_id;
++ u8 pri1_cos_queue_id;
++ u8 pri2_cos_queue_id;
++ u8 pri3_cos_queue_id;
++ u8 pri4_cos_queue_id;
++ u8 pri5_cos_queue_id;
++ u8 pri6_cos_queue_id;
++ u8 pri7_cos_queue_id;
++ u8 queue_cfg_info;
++ #define QUEUE_PRI2COS_QCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG 0x1UL
++ u8 unused_0[6];
++ u8 valid;
+ };
+
+-/* hwrm_queue_pri2cos_cfg */
+-/* Input (40 bytes) */
++/* hwrm_queue_pri2cos_cfg_input (size:320b/40B) */
+ struct hwrm_queue_pri2cos_cfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
+- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
+- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
+- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR (0x2UL << 0)
+- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
+- #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
+- __le32 enables;
+- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
+- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
+- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
+- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
+- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
+- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
+- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
+- #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
+- u8 port_id;
+- u8 pri0_cos_queue_id;
+- u8 pri1_cos_queue_id;
+- u8 pri2_cos_queue_id;
+- u8 pri3_cos_queue_id;
+- u8 pri4_cos_queue_id;
+- u8 pri5_cos_queue_id;
+- u8 pri6_cos_queue_id;
+- u8 pri7_cos_queue_id;
+- u8 unused_0[7];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
++ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_MASK 0x3UL
++ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_SFT 0
++ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX 0x0UL
++ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX 0x1UL
++ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR 0x2UL
++ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR
++ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x4UL
++ __le32 enables;
++ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID 0x1UL
++ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI1_COS_QUEUE_ID 0x2UL
++ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI2_COS_QUEUE_ID 0x4UL
++ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI3_COS_QUEUE_ID 0x8UL
++ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI4_COS_QUEUE_ID 0x10UL
++ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI5_COS_QUEUE_ID 0x20UL
++ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI6_COS_QUEUE_ID 0x40UL
++ #define QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI7_COS_QUEUE_ID 0x80UL
++ u8 port_id;
++ u8 pri0_cos_queue_id;
++ u8 pri1_cos_queue_id;
++ u8 pri2_cos_queue_id;
++ u8 pri3_cos_queue_id;
++ u8 pri4_cos_queue_id;
++ u8 pri5_cos_queue_id;
++ u8 pri6_cos_queue_id;
++ u8 pri7_cos_queue_id;
++ u8 unused_0[7];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_queue_pri2cos_cfg_output (size:128b/16B) */
+ struct hwrm_queue_pri2cos_cfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_queue_cos2bw_qcfg */
+-/* Input (24 bytes) */
++/* hwrm_queue_cos2bw_qcfg_input (size:192b/24B) */
+ struct hwrm_queue_cos2bw_qcfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 port_id;
+- __le16 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 port_id;
++ u8 unused_0[6];
+ };
+
+-/* Output (112 bytes) */
++/* hwrm_queue_cos2bw_qcfg_output (size:896b/112B) */
+ struct hwrm_queue_cos2bw_qcfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- u8 queue_id0;
+- u8 unused_0;
+- __le16 unused_1;
+- __le32 queue_id0_min_bw;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+- __le32 queue_id0_max_bw;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+- u8 queue_id0_tsa_assign;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 queue_id0;
++ u8 unused_0;
++ __le16 unused_1;
++ __le32 queue_id0_min_bw;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
++ __le32 queue_id0_max_bw;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
++ u8 queue_id0_tsa_assign;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+- u8 queue_id0_pri_lvl;
+- u8 queue_id0_bw_weight;
+- u8 queue_id1;
+- __le32 queue_id1_min_bw;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+- __le32 queue_id1_max_bw;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+- u8 queue_id1_tsa_assign;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
++ u8 queue_id0_pri_lvl;
++ u8 queue_id0_bw_weight;
++ u8 queue_id1;
++ __le32 queue_id1_min_bw;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
++ __le32 queue_id1_max_bw;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
++ u8 queue_id1_tsa_assign;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
+- u8 queue_id1_pri_lvl;
+- u8 queue_id1_bw_weight;
+- u8 queue_id2;
+- __le32 queue_id2_min_bw;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+- __le32 queue_id2_max_bw;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+- u8 queue_id2_tsa_assign;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
++ u8 queue_id1_pri_lvl;
++ u8 queue_id1_bw_weight;
++ u8 queue_id2;
++ __le32 queue_id2_min_bw;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
++ __le32 queue_id2_max_bw;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
++ u8 queue_id2_tsa_assign;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
+- u8 queue_id2_pri_lvl;
+- u8 queue_id2_bw_weight;
+- u8 queue_id3;
+- __le32 queue_id3_min_bw;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+- __le32 queue_id3_max_bw;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+- u8 queue_id3_tsa_assign;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
++ u8 queue_id2_pri_lvl;
++ u8 queue_id2_bw_weight;
++ u8 queue_id3;
++ __le32 queue_id3_min_bw;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
++ __le32 queue_id3_max_bw;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
++ u8 queue_id3_tsa_assign;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
+- u8 queue_id3_pri_lvl;
+- u8 queue_id3_bw_weight;
+- u8 queue_id4;
+- __le32 queue_id4_min_bw;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+- __le32 queue_id4_max_bw;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+- u8 queue_id4_tsa_assign;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
++ u8 queue_id3_pri_lvl;
++ u8 queue_id3_bw_weight;
++ u8 queue_id4;
++ __le32 queue_id4_min_bw;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
++ __le32 queue_id4_max_bw;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
++ u8 queue_id4_tsa_assign;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
+- u8 queue_id4_pri_lvl;
+- u8 queue_id4_bw_weight;
+- u8 queue_id5;
+- __le32 queue_id5_min_bw;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+- __le32 queue_id5_max_bw;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+- u8 queue_id5_tsa_assign;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
++ u8 queue_id4_pri_lvl;
++ u8 queue_id4_bw_weight;
++ u8 queue_id5;
++ __le32 queue_id5_min_bw;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
++ __le32 queue_id5_max_bw;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
++ u8 queue_id5_tsa_assign;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
+- u8 queue_id5_pri_lvl;
+- u8 queue_id5_bw_weight;
+- u8 queue_id6;
+- __le32 queue_id6_min_bw;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+- __le32 queue_id6_max_bw;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+- u8 queue_id6_tsa_assign;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
++ u8 queue_id5_pri_lvl;
++ u8 queue_id5_bw_weight;
++ u8 queue_id6;
++ __le32 queue_id6_min_bw;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
++ __le32 queue_id6_max_bw;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
++ u8 queue_id6_tsa_assign;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
+- u8 queue_id6_pri_lvl;
+- u8 queue_id6_bw_weight;
+- u8 queue_id7;
+- __le32 queue_id7_min_bw;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+- __le32 queue_id7_max_bw;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+- u8 queue_id7_tsa_assign;
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
++ u8 queue_id6_pri_lvl;
++ u8 queue_id6_bw_weight;
++ u8 queue_id7;
++ __le32 queue_id7_min_bw;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
++ __le32 queue_id7_max_bw;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
++ u8 queue_id7_tsa_assign;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+- #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
+- u8 queue_id7_pri_lvl;
+- u8 queue_id7_bw_weight;
+- u8 unused_2;
+- u8 unused_3;
+- u8 unused_4;
+- u8 unused_5;
+- u8 valid;
++ #define QUEUE_COS2BW_QCFG_RESP_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
++ u8 queue_id7_pri_lvl;
++ u8 queue_id7_bw_weight;
++ u8 unused_2[4];
++ u8 valid;
+ };
+
+-/* hwrm_queue_cos2bw_cfg */
+-/* Input (128 bytes) */
++/* hwrm_queue_cos2bw_cfg_input (size:1024b/128B) */
+ struct hwrm_queue_cos2bw_cfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
+- __le32 enables;
+- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
+- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
+- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
+- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
+- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
+- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
+- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
+- #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
+- __le16 port_id;
+- u8 queue_id0;
+- u8 unused_0;
+- __le32 queue_id0_min_bw;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
+- __le32 queue_id0_max_bw;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
+- u8 queue_id0_tsa_assign;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
++ __le32 enables;
++ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID 0x1UL
++ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID1_VALID 0x2UL
++ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID2_VALID 0x4UL
++ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID3_VALID 0x8UL
++ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID4_VALID 0x10UL
++ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID5_VALID 0x20UL
++ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID6_VALID 0x40UL
++ #define QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID7_VALID 0x80UL
++ __le16 port_id;
++ u8 queue_id0;
++ u8 unused_0;
++ __le32 queue_id0_min_bw;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MIN_BW_BW_VALUE_UNIT_INVALID
++ __le32 queue_id0_max_bw;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_MAX_BW_BW_VALUE_UNIT_INVALID
++ u8 queue_id0_tsa_assign;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP 0x0UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
+- u8 queue_id0_pri_lvl;
+- u8 queue_id0_bw_weight;
+- u8 queue_id1;
+- __le32 queue_id1_min_bw;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
+- __le32 queue_id1_max_bw;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
+- u8 queue_id1_tsa_assign;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST 0xffUL
++ u8 queue_id0_pri_lvl;
++ u8 queue_id0_bw_weight;
++ u8 queue_id1;
++ __le32 queue_id1_min_bw;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MIN_BW_BW_VALUE_UNIT_INVALID
++ __le32 queue_id1_max_bw;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_MAX_BW_BW_VALUE_UNIT_INVALID
++ u8 queue_id1_tsa_assign;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP 0x0UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
+- u8 queue_id1_pri_lvl;
+- u8 queue_id1_bw_weight;
+- u8 queue_id2;
+- __le32 queue_id2_min_bw;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
+- __le32 queue_id2_max_bw;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
+- u8 queue_id2_tsa_assign;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST 0xffUL
++ u8 queue_id1_pri_lvl;
++ u8 queue_id1_bw_weight;
++ u8 queue_id2;
++ __le32 queue_id2_min_bw;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MIN_BW_BW_VALUE_UNIT_INVALID
++ __le32 queue_id2_max_bw;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_MAX_BW_BW_VALUE_UNIT_INVALID
++ u8 queue_id2_tsa_assign;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP 0x0UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
+- u8 queue_id2_pri_lvl;
+- u8 queue_id2_bw_weight;
+- u8 queue_id3;
+- __le32 queue_id3_min_bw;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
+- __le32 queue_id3_max_bw;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
+- u8 queue_id3_tsa_assign;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST 0xffUL
++ u8 queue_id2_pri_lvl;
++ u8 queue_id2_bw_weight;
++ u8 queue_id3;
++ __le32 queue_id3_min_bw;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MIN_BW_BW_VALUE_UNIT_INVALID
++ __le32 queue_id3_max_bw;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_MAX_BW_BW_VALUE_UNIT_INVALID
++ u8 queue_id3_tsa_assign;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP 0x0UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
+- u8 queue_id3_pri_lvl;
+- u8 queue_id3_bw_weight;
+- u8 queue_id4;
+- __le32 queue_id4_min_bw;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
+- __le32 queue_id4_max_bw;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
+- u8 queue_id4_tsa_assign;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST 0xffUL
++ u8 queue_id3_pri_lvl;
++ u8 queue_id3_bw_weight;
++ u8 queue_id4;
++ __le32 queue_id4_min_bw;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MIN_BW_BW_VALUE_UNIT_INVALID
++ __le32 queue_id4_max_bw;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_MAX_BW_BW_VALUE_UNIT_INVALID
++ u8 queue_id4_tsa_assign;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP 0x0UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
+- u8 queue_id4_pri_lvl;
+- u8 queue_id4_bw_weight;
+- u8 queue_id5;
+- __le32 queue_id5_min_bw;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
+- __le32 queue_id5_max_bw;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
+- u8 queue_id5_tsa_assign;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST 0xffUL
++ u8 queue_id4_pri_lvl;
++ u8 queue_id4_bw_weight;
++ u8 queue_id5;
++ __le32 queue_id5_min_bw;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MIN_BW_BW_VALUE_UNIT_INVALID
++ __le32 queue_id5_max_bw;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_MAX_BW_BW_VALUE_UNIT_INVALID
++ u8 queue_id5_tsa_assign;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP 0x0UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
+- u8 queue_id5_pri_lvl;
+- u8 queue_id5_bw_weight;
+- u8 queue_id6;
+- __le32 queue_id6_min_bw;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
+- __le32 queue_id6_max_bw;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
+- u8 queue_id6_tsa_assign;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST 0xffUL
++ u8 queue_id5_pri_lvl;
++ u8 queue_id5_bw_weight;
++ u8 queue_id6;
++ __le32 queue_id6_min_bw;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MIN_BW_BW_VALUE_UNIT_INVALID
++ __le32 queue_id6_max_bw;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_MAX_BW_BW_VALUE_UNIT_INVALID
++ u8 queue_id6_tsa_assign;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP 0x0UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
+- u8 queue_id6_pri_lvl;
+- u8 queue_id6_bw_weight;
+- u8 queue_id7;
+- __le32 queue_id7_min_bw;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
+- __le32 queue_id7_max_bw;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
+- u8 queue_id7_tsa_assign;
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST 0xffUL
++ u8 queue_id6_pri_lvl;
++ u8 queue_id6_bw_weight;
++ u8 queue_id7;
++ __le32 queue_id7_min_bw;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MIN_BW_BW_VALUE_UNIT_INVALID
++ __le32 queue_id7_max_bw;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_MASK 0xfffffffUL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_SFT 0
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE 0x10000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BITS (0x0UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES (0x1UL << 28)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_SCALE_BYTES
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_SFT 29
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_LAST QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_MAX_BW_BW_VALUE_UNIT_INVALID
++ u8 queue_id7_tsa_assign;
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP 0x0UL
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS 0x1UL
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST 0x2UL
+- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
+- u8 queue_id7_pri_lvl;
+- u8 queue_id7_bw_weight;
+- u8 unused_1[5];
++ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST 0xffUL
++ u8 queue_id7_pri_lvl;
++ u8 queue_id7_bw_weight;
++ u8 unused_1[5];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_queue_cos2bw_cfg_output (size:128b/16B) */
+ struct hwrm_queue_cos2bw_cfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_queue_dscp_qcaps */
+-/* Input (24 bytes) */
++/* hwrm_queue_dscp_qcaps_input (size:192b/24B) */
+ struct hwrm_queue_dscp_qcaps_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- u8 port_id;
+- u8 unused_0[7];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ u8 port_id;
++ u8 unused_0[7];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_queue_dscp_qcaps_output (size:128b/16B) */
+ struct hwrm_queue_dscp_qcaps_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- u8 num_dscp_bits;
+- u8 unused_0;
+- __le16 max_entries;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 num_dscp_bits;
++ u8 unused_0;
++ __le16 max_entries;
++ u8 unused_1[3];
++ u8 valid;
+ };
+
+-/* hwrm_queue_dscp2pri_qcfg */
+-/* Input (32 bytes) */
++/* hwrm_queue_dscp2pri_qcfg_input (size:256b/32B) */
+ struct hwrm_queue_dscp2pri_qcfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le64 dest_data_addr;
+- u8 port_id;
+- u8 unused_0;
+- __le16 dest_data_buffer_size;
+- __le32 unused_1;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le64 dest_data_addr;
++ u8 port_id;
++ u8 unused_0;
++ __le16 dest_data_buffer_size;
++ u8 unused_1[4];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_queue_dscp2pri_qcfg_output (size:128b/16B) */
+ struct hwrm_queue_dscp2pri_qcfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le16 entry_cnt;
+- u8 default_pri;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le16 entry_cnt;
++ u8 default_pri;
++ u8 unused_0[4];
++ u8 valid;
+ };
+
+-/* hwrm_queue_dscp2pri_cfg */
+-/* Input (40 bytes) */
++/* hwrm_queue_dscp2pri_cfg_input (size:320b/40B) */
+ struct hwrm_queue_dscp2pri_cfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le64 src_data_addr;
+- __le32 flags;
+- #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
+- __le32 enables;
+- #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
+- u8 port_id;
+- u8 default_pri;
+- __le16 entry_cnt;
+- __le32 unused_0;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le64 src_data_addr;
++ __le32 flags;
++ #define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
++ __le32 enables;
++ #define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
++ u8 port_id;
++ u8 default_pri;
++ __le16 entry_cnt;
++ u8 unused_0[4];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_queue_dscp2pri_cfg_output (size:128b/16B) */
+ struct hwrm_queue_dscp2pri_cfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_vnic_alloc */
+-/* Input (24 bytes) */
++/* hwrm_vnic_alloc_input (size:192b/24B) */
+ struct hwrm_vnic_alloc_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
+- #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
+- __le32 unused_0;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
++ #define VNIC_ALLOC_REQ_FLAGS_DEFAULT 0x1UL
++ u8 unused_0[4];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_vnic_alloc_output (size:128b/16B) */
+ struct hwrm_vnic_alloc_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 vnic_id;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le32 vnic_id;
++ u8 unused_0[3];
++ u8 valid;
+ };
+
+-/* hwrm_vnic_free */
+-/* Input (24 bytes) */
++/* hwrm_vnic_free_input (size:192b/24B) */
+ struct hwrm_vnic_free_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 vnic_id;
+- __le32 unused_0;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 vnic_id;
++ u8 unused_0[4];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_vnic_free_output (size:128b/16B) */
+ struct hwrm_vnic_free_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_vnic_cfg */
+-/* Input (40 bytes) */
++/* hwrm_vnic_cfg_input (size:320b/40B) */
+ struct hwrm_vnic_cfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
+- #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
+- #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
+- #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
+- #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
+- #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
+- #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
+- #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
+- __le32 enables;
+- #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
+- #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
+- #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
+- #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
+- #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
+- __le16 vnic_id;
+- __le16 dflt_ring_grp;
+- __le16 rss_rule;
+- __le16 cos_rule;
+- __le16 lb_rule;
+- __le16 mru;
+- __le32 unused_0;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
++ #define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
++ #define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
++ #define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
++ #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
++ #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
++ #define VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE 0x20UL
++ #define VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE 0x40UL
++ __le32 enables;
++ #define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
++ #define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
++ #define VNIC_CFG_REQ_ENABLES_COS_RULE 0x4UL
++ #define VNIC_CFG_REQ_ENABLES_LB_RULE 0x8UL
++ #define VNIC_CFG_REQ_ENABLES_MRU 0x10UL
++ __le16 vnic_id;
++ __le16 dflt_ring_grp;
++ __le16 rss_rule;
++ __le16 cos_rule;
++ __le16 lb_rule;
++ __le16 mru;
++ u8 unused_0[4];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_vnic_cfg_output (size:128b/16B) */
+ struct hwrm_vnic_cfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_vnic_qcaps */
+-/* Input (24 bytes) */
++/* hwrm_vnic_qcaps_input (size:192b/24B) */
+ struct hwrm_vnic_qcaps_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 enables;
+- __le32 unused_0;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 enables;
++ u8 unused_0[4];
+ };
+
+-/* Output (24 bytes) */
++/* hwrm_vnic_qcaps_output (size:192b/24B) */
+ struct hwrm_vnic_qcaps_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le16 mru;
+- u8 unused_0;
+- u8 unused_1;
+- __le32 flags;
+- #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
+- #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
+- #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
+- #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
+- #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
+- #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
+- #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRROING_CAPABLE_VNIC_CAP 0x40UL
+- __le32 unused_2;
+- u8 unused_3;
+- u8 unused_4;
+- u8 unused_5;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le16 mru;
++ u8 unused_0[2];
++ __le32 flags;
++ #define VNIC_QCAPS_RESP_FLAGS_UNUSED 0x1UL
++ #define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
++ #define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
++ #define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
++ #define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
++ #define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
++ #define VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP 0x40UL
++ u8 unused_1[7];
++ u8 valid;
+ };
+
+-/* hwrm_vnic_tpa_cfg */
+-/* Input (40 bytes) */
++/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */
+ struct hwrm_vnic_tpa_cfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
+- #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
+- #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
+- #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
+- #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
+- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
+- #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
+- #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
+- #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
+- __le32 enables;
+- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
+- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
+- #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
+- #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
+- __le16 vnic_id;
+- __le16 max_agg_segs;
+- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
+- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
+- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
+- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
+- #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
+- __le16 max_aggs;
+- #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
+- #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
+- #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
+- #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
+- #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
+- #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
+- u8 unused_0;
+- u8 unused_1;
+- __le32 max_agg_timer;
+- __le32 min_agg_len;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
++ #define VNIC_TPA_CFG_REQ_FLAGS_TPA 0x1UL
++ #define VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA 0x2UL
++ #define VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE 0x4UL
++ #define VNIC_TPA_CFG_REQ_FLAGS_GRO 0x8UL
++ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN 0x10UL
++ #define VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
++ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_IPID_CHECK 0x40UL
++ #define VNIC_TPA_CFG_REQ_FLAGS_GRO_TTL_CHECK 0x80UL
++ __le32 enables;
++ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS 0x1UL
++ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS 0x2UL
++ #define VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_TIMER 0x4UL
++ #define VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN 0x8UL
++ __le16 vnic_id;
++ __le16 max_agg_segs;
++ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_1 0x0UL
++ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_2 0x1UL
++ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_4 0x2UL
++ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_8 0x3UL
++ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX 0x1fUL
++ #define VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_LAST VNIC_TPA_CFG_REQ_MAX_AGG_SEGS_MAX
++ __le16 max_aggs;
++ #define VNIC_TPA_CFG_REQ_MAX_AGGS_1 0x0UL
++ #define VNIC_TPA_CFG_REQ_MAX_AGGS_2 0x1UL
++ #define VNIC_TPA_CFG_REQ_MAX_AGGS_4 0x2UL
++ #define VNIC_TPA_CFG_REQ_MAX_AGGS_8 0x3UL
++ #define VNIC_TPA_CFG_REQ_MAX_AGGS_16 0x4UL
++ #define VNIC_TPA_CFG_REQ_MAX_AGGS_MAX 0x7UL
++ #define VNIC_TPA_CFG_REQ_MAX_AGGS_LAST VNIC_TPA_CFG_REQ_MAX_AGGS_MAX
++ u8 unused_0[2];
++ __le32 max_agg_timer;
++ __le32 min_agg_len;
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
+ struct hwrm_vnic_tpa_cfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
++};
++
++/* hwrm_vnic_tpa_qcfg_input (size:192b/24B) */
++struct hwrm_vnic_tpa_qcfg_input {
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 vnic_id;
++ u8 unused_0[6];
++};
++
++/* hwrm_vnic_tpa_qcfg_output (size:256b/32B) */
++struct hwrm_vnic_tpa_qcfg_output {
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le32 flags;
++ #define VNIC_TPA_QCFG_RESP_FLAGS_TPA 0x1UL
++ #define VNIC_TPA_QCFG_RESP_FLAGS_ENCAP_TPA 0x2UL
++ #define VNIC_TPA_QCFG_RESP_FLAGS_RSC_WND_UPDATE 0x4UL
++ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO 0x8UL
++ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_ECN 0x10UL
++ #define VNIC_TPA_QCFG_RESP_FLAGS_AGG_WITH_SAME_GRE_SEQ 0x20UL
++ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_IPID_CHECK 0x40UL
++ #define VNIC_TPA_QCFG_RESP_FLAGS_GRO_TTL_CHECK 0x80UL
++ __le16 max_agg_segs;
++ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_1 0x0UL
++ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_2 0x1UL
++ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_4 0x2UL
++ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_8 0x3UL
++ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX 0x1fUL
++ #define VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGG_SEGS_MAX
++ __le16 max_aggs;
++ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_1 0x0UL
++ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_2 0x1UL
++ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_4 0x2UL
++ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_8 0x3UL
++ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_16 0x4UL
++ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX 0x7UL
++ #define VNIC_TPA_QCFG_RESP_MAX_AGGS_LAST VNIC_TPA_QCFG_RESP_MAX_AGGS_MAX
++ __le32 max_agg_timer;
++ __le32 min_agg_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_vnic_rss_cfg */
+-/* Input (48 bytes) */
++/* hwrm_vnic_rss_cfg_input (size:384b/48B) */
+ struct hwrm_vnic_rss_cfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 hash_type;
+- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
+- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
+- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
+- #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
+- #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
+- #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
+- __le32 unused_0;
+- __le64 ring_grp_tbl_addr;
+- __le64 hash_key_tbl_addr;
+- __le16 rss_ctx_idx;
+- __le16 unused_1[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 hash_type;
++ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 0x1UL
++ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 0x2UL
++ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 0x4UL
++ #define VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 0x8UL
++ #define VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6 0x10UL
++ #define VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6 0x20UL
++ u8 unused_0[4];
++ __le64 ring_grp_tbl_addr;
++ __le64 hash_key_tbl_addr;
++ __le16 rss_ctx_idx;
++ u8 unused_1[6];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
+ struct hwrm_vnic_rss_cfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_vnic_plcmodes_cfg */
+-/* Input (40 bytes) */
++/* hwrm_vnic_plcmodes_cfg_input (size:320b/40B) */
+ struct hwrm_vnic_plcmodes_cfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
+- #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
+- #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
+- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
+- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
+- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
+- #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
+- __le32 enables;
+- #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
+- #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
+- #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
+- __le32 vnic_id;
+- __le16 jumbo_thresh;
+- __le16 hds_offset;
+- __le16 hds_threshold;
+- __le16 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
++ #define VNIC_PLCMODES_CFG_REQ_FLAGS_REGULAR_PLACEMENT 0x1UL
++ #define VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT 0x2UL
++ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 0x4UL
++ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6 0x8UL
++ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_FCOE 0x10UL
++ #define VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_ROCE 0x20UL
++ __le32 enables;
++ #define VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID 0x1UL
++ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_OFFSET_VALID 0x2UL
++ #define VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID 0x4UL
++ __le32 vnic_id;
++ __le16 jumbo_thresh;
++ __le16 hds_offset;
++ __le16 hds_threshold;
++ u8 unused_0[6];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_vnic_plcmodes_cfg_output (size:128b/16B) */
+ struct hwrm_vnic_plcmodes_cfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_vnic_rss_cos_lb_ctx_alloc */
+-/* Input (16 bytes) */
++/* hwrm_vnic_rss_cos_lb_ctx_alloc_input (size:128b/16B) */
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_vnic_rss_cos_lb_ctx_alloc_output (size:128b/16B) */
+ struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le16 rss_cos_lb_ctx_id;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 unused_4;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le16 rss_cos_lb_ctx_id;
++ u8 unused_0[5];
++ u8 valid;
+ };
+
+-/* hwrm_vnic_rss_cos_lb_ctx_free */
+-/* Input (24 bytes) */
++/* hwrm_vnic_rss_cos_lb_ctx_free_input (size:192b/24B) */
+ struct hwrm_vnic_rss_cos_lb_ctx_free_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 rss_cos_lb_ctx_id;
+- __le16 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 rss_cos_lb_ctx_id;
++ u8 unused_0[6];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_vnic_rss_cos_lb_ctx_free_output (size:128b/16B) */
+ struct hwrm_vnic_rss_cos_lb_ctx_free_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_ring_alloc */
+-/* Input (80 bytes) */
++/* hwrm_ring_alloc_input (size:640b/80B) */
+ struct hwrm_ring_alloc_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 enables;
+- #define RING_ALLOC_REQ_ENABLES_RESERVED1 0x1UL
+- #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
+- #define RING_ALLOC_REQ_ENABLES_RESERVED3 0x4UL
+- #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
+- #define RING_ALLOC_REQ_ENABLES_RESERVED4 0x10UL
+- #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
+- u8 ring_type;
+- #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
+- #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
+- #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
+- #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+- u8 unused_0;
+- __le16 unused_1;
+- __le64 page_tbl_addr;
+- __le32 fbo;
+- u8 page_size;
+- u8 page_tbl_depth;
+- u8 unused_2;
+- u8 unused_3;
+- __le32 length;
+- __le16 logical_id;
+- __le16 cmpl_ring_id;
+- __le16 queue_id;
+- u8 unused_4;
+- u8 unused_5;
+- __le32 reserved1;
+- __le16 ring_arb_cfg;
+- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
+- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
+- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP (0x1UL << 0)
+- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ (0x2UL << 0)
+- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
+- #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
+- #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
+- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
+- #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
+- u8 unused_6;
+- u8 unused_7;
+- __le32 reserved3;
+- __le32 stat_ctx_id;
+- __le32 reserved4;
+- __le32 max_bw;
+- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
+- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
+- #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
+- #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
+- #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
+- #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
+- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
+- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
+- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
+- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
+- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
+- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 enables;
++ #define RING_ALLOC_REQ_ENABLES_RING_ARB_CFG 0x2UL
++ #define RING_ALLOC_REQ_ENABLES_STAT_CTX_ID_VALID 0x8UL
++ #define RING_ALLOC_REQ_ENABLES_MAX_BW_VALID 0x20UL
++ u8 ring_type;
++ #define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
++ #define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
++ #define RING_ALLOC_REQ_RING_TYPE_RX 0x2UL
++ #define RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL 0x3UL
++ #define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL
++ u8 unused_0[3];
++ __le64 page_tbl_addr;
++ __le32 fbo;
++ u8 page_size;
++ u8 page_tbl_depth;
++ u8 unused_1[2];
++ __le32 length;
++ __le16 logical_id;
++ __le16 cmpl_ring_id;
++ __le16 queue_id;
++ u8 unused_2[2];
++ __le32 reserved1;
++ __le16 ring_arb_cfg;
++ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_MASK 0xfUL
++ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SFT 0
++ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_SP 0x1UL
++ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ 0x2UL
++ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_LAST RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_WFQ
++ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_MASK 0xf0UL
++ #define RING_ALLOC_REQ_RING_ARB_CFG_RSVD_SFT 4
++ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_MASK 0xff00UL
++ #define RING_ALLOC_REQ_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
++ __le16 unused_3;
++ __le32 reserved3;
++ __le32 stat_ctx_id;
++ __le32 reserved4;
++ __le32 max_bw;
++ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_MASK 0xfffffffUL
++ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_SFT 0
++ #define RING_ALLOC_REQ_MAX_BW_SCALE 0x10000000UL
++ #define RING_ALLOC_REQ_MAX_BW_SCALE_BITS (0x0UL << 28)
++ #define RING_ALLOC_REQ_MAX_BW_SCALE_BYTES (0x1UL << 28)
++ #define RING_ALLOC_REQ_MAX_BW_SCALE_LAST RING_ALLOC_REQ_MAX_BW_SCALE_BYTES
++ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MASK 0xe0000000UL
++ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_SFT 29
++ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_MEGA (0x0UL << 29)
++ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_KILO (0x2UL << 29)
++ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_BASE (0x4UL << 29)
++ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_GIGA (0x6UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID (0x7UL << 29)
+- #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
+- u8 int_mode;
+- #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
+- #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
+- #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
+- #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
+- u8 unused_8[3];
++ #define RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_LAST RING_ALLOC_REQ_MAX_BW_BW_VALUE_UNIT_INVALID
++ u8 int_mode;
++ #define RING_ALLOC_REQ_INT_MODE_LEGACY 0x0UL
++ #define RING_ALLOC_REQ_INT_MODE_RSVD 0x1UL
++ #define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
++ #define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
++ #define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
++ u8 unused_4[3];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_ring_alloc_output (size:128b/16B) */
+ struct hwrm_ring_alloc_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le16 ring_id;
+- __le16 logical_ring_id;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le16 ring_id;
++ __le16 logical_ring_id;
++ u8 unused_0[3];
++ u8 valid;
+ };
+
+-/* hwrm_ring_free */
+-/* Input (24 bytes) */
++/* hwrm_ring_free_input (size:192b/24B) */
+ struct hwrm_ring_free_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- u8 ring_type;
+- #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
+- #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
+- #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
+- #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+- u8 unused_0;
+- __le16 ring_id;
+- __le32 unused_1;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ u8 ring_type;
++ #define RING_FREE_REQ_RING_TYPE_L2_CMPL 0x0UL
++ #define RING_FREE_REQ_RING_TYPE_TX 0x1UL
++ #define RING_FREE_REQ_RING_TYPE_RX 0x2UL
++ #define RING_FREE_REQ_RING_TYPE_ROCE_CMPL 0x3UL
++ #define RING_FREE_REQ_RING_TYPE_LAST RING_FREE_REQ_RING_TYPE_ROCE_CMPL
++ u8 unused_0;
++ __le16 ring_id;
++ u8 unused_1[4];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_ring_free_output (size:128b/16B) */
+ struct hwrm_ring_free_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_ring_cmpl_ring_qaggint_params */
+-/* Input (24 bytes) */
++/* hwrm_ring_cmpl_ring_qaggint_params_input (size:192b/24B) */
+ struct hwrm_ring_cmpl_ring_qaggint_params_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 ring_id;
+- __le16 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 ring_id;
++ u8 unused_0[6];
+ };
+
+-/* Output (32 bytes) */
++/* hwrm_ring_cmpl_ring_qaggint_params_output (size:256b/32B) */
+ struct hwrm_ring_cmpl_ring_qaggint_params_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le16 flags;
+- #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
+- #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
+- __le16 num_cmpl_dma_aggr;
+- __le16 num_cmpl_dma_aggr_during_int;
+- __le16 cmpl_aggr_dma_tmr;
+- __le16 cmpl_aggr_dma_tmr_during_int;
+- __le16 int_lat_tmr_min;
+- __le16 int_lat_tmr_max;
+- __le16 num_cmpl_aggr_int;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le16 flags;
++ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_TIMER_RESET 0x1UL
++ #define RING_CMPL_RING_QAGGINT_PARAMS_RESP_FLAGS_RING_IDLE 0x2UL
++ __le16 num_cmpl_dma_aggr;
++ __le16 num_cmpl_dma_aggr_during_int;
++ __le16 cmpl_aggr_dma_tmr;
++ __le16 cmpl_aggr_dma_tmr_during_int;
++ __le16 int_lat_tmr_min;
++ __le16 int_lat_tmr_max;
++ __le16 num_cmpl_aggr_int;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_ring_cmpl_ring_cfg_aggint_params */
+-/* Input (40 bytes) */
++/* hwrm_ring_cmpl_ring_cfg_aggint_params_input (size:320b/40B) */
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 ring_id;
+- __le16 flags;
+- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
+- #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
+- __le16 num_cmpl_dma_aggr;
+- __le16 num_cmpl_dma_aggr_during_int;
+- __le16 cmpl_aggr_dma_tmr;
+- __le16 cmpl_aggr_dma_tmr_during_int;
+- __le16 int_lat_tmr_min;
+- __le16 int_lat_tmr_max;
+- __le16 num_cmpl_aggr_int;
+- __le16 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 ring_id;
++ __le16 flags;
++ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET 0x1UL
++ #define RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE 0x2UL
++ __le16 num_cmpl_dma_aggr;
++ __le16 num_cmpl_dma_aggr_during_int;
++ __le16 cmpl_aggr_dma_tmr;
++ __le16 cmpl_aggr_dma_tmr_during_int;
++ __le16 int_lat_tmr_min;
++ __le16 int_lat_tmr_max;
++ __le16 num_cmpl_aggr_int;
++ u8 unused_0[6];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_ring_cmpl_ring_cfg_aggint_params_output (size:128b/16B) */
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_ring_reset */
+-/* Input (24 bytes) */
++/* hwrm_ring_reset_input (size:192b/24B) */
+ struct hwrm_ring_reset_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- u8 ring_type;
+- #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
+- #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
+- #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
+- #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
+- u8 unused_0;
+- __le16 ring_id;
+- __le32 unused_1;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ u8 ring_type;
++ #define RING_RESET_REQ_RING_TYPE_L2_CMPL 0x0UL
++ #define RING_RESET_REQ_RING_TYPE_TX 0x1UL
++ #define RING_RESET_REQ_RING_TYPE_RX 0x2UL
++ #define RING_RESET_REQ_RING_TYPE_ROCE_CMPL 0x3UL
++ #define RING_RESET_REQ_RING_TYPE_LAST RING_RESET_REQ_RING_TYPE_ROCE_CMPL
++ u8 unused_0;
++ __le16 ring_id;
++ u8 unused_1[4];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_ring_reset_output (size:128b/16B) */
+ struct hwrm_ring_reset_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_ring_grp_alloc */
+-/* Input (24 bytes) */
++/* hwrm_ring_grp_alloc_input (size:192b/24B) */
+ struct hwrm_ring_grp_alloc_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 cr;
+- __le16 rr;
+- __le16 ar;
+- __le16 sc;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 cr;
++ __le16 rr;
++ __le16 ar;
++ __le16 sc;
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_ring_grp_alloc_output (size:128b/16B) */
+ struct hwrm_ring_grp_alloc_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 ring_group_id;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le32 ring_group_id;
++ u8 unused_0[3];
++ u8 valid;
+ };
+
+-/* hwrm_ring_grp_free */
+-/* Input (24 bytes) */
++/* hwrm_ring_grp_free_input (size:192b/24B) */
+ struct hwrm_ring_grp_free_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 ring_group_id;
+- __le32 unused_0;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 ring_group_id;
++ u8 unused_0[4];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_ring_grp_free_output (size:128b/16B) */
+ struct hwrm_ring_grp_free_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_cfa_l2_filter_alloc */
+-/* Input (96 bytes) */
++/* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */
+ struct hwrm_cfa_l2_filter_alloc_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
+- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
+- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0)
+- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0)
+- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
+- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
+- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
+- #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
+- __le32 enables;
+- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
+- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
+- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
+- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
+- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
+- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
+- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
+- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
+- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
+- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
+- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
+- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
+- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
+- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
+- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
+- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+- #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+- u8 l2_addr[6];
+- u8 unused_0;
+- u8 unused_1;
+- u8 l2_addr_mask[6];
+- __le16 l2_ovlan;
+- __le16 l2_ovlan_mask;
+- __le16 l2_ivlan;
+- __le16 l2_ivlan_mask;
+- u8 unused_2;
+- u8 unused_3;
+- u8 t_l2_addr[6];
+- u8 unused_4;
+- u8 unused_5;
+- u8 t_l2_addr_mask[6];
+- __le16 t_l2_ovlan;
+- __le16 t_l2_ovlan_mask;
+- __le16 t_l2_ivlan;
+- __le16 t_l2_ivlan_mask;
+- u8 src_type;
+- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
+- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
+- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
+- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
+- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
+- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
+- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
+- #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
+- u8 unused_6;
+- __le32 src_id;
+- u8 tunnel_type;
+- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+- #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+- u8 unused_7;
+- __le16 dst_id;
+- __le16 mirror_vnic_id;
+- u8 pri_hint;
+- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
+- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
+- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
+- #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
+- u8 unused_8;
+- __le32 unused_9;
+- __le64 l2_filter_id_hint;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
++ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
++ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX 0x0UL
++ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX 0x1UL
++ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
++ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
++ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
++ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
++ __le32 enables;
++ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x1UL
++ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK 0x2UL
++ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN 0x4UL
++ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_OVLAN_MASK 0x8UL
++ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x10UL
++ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN_MASK 0x20UL
++ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR 0x40UL
++ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_ADDR_MASK 0x80UL
++ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN 0x100UL
++ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_OVLAN_MASK 0x200UL
++ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN 0x400UL
++ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_T_L2_IVLAN_MASK 0x800UL
++ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_TYPE 0x1000UL
++ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_SRC_ID 0x2000UL
++ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4000UL
++ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
++ #define CFA_L2_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
++ u8 l2_addr[6];
++ u8 unused_0[2];
++ u8 l2_addr_mask[6];
++ __le16 l2_ovlan;
++ __le16 l2_ovlan_mask;
++ __le16 l2_ivlan;
++ __le16 l2_ivlan_mask;
++ u8 unused_1[2];
++ u8 t_l2_addr[6];
++ u8 unused_2[2];
++ u8 t_l2_addr_mask[6];
++ __le16 t_l2_ovlan;
++ __le16 t_l2_ovlan_mask;
++ __le16 t_l2_ivlan;
++ __le16 t_l2_ivlan_mask;
++ u8 src_type;
++ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT 0x0UL
++ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_PF 0x1UL
++ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VF 0x2UL
++ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_VNIC 0x3UL
++ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_KONG 0x4UL
++ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_APE 0x5UL
++ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_BONO 0x6UL
++ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG 0x7UL
++ #define CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_TANG
++ u8 unused_3;
++ __le32 src_id;
++ u8 tunnel_type;
++ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
++ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
++ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
++ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
++ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
++ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
++ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
++ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
++ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
++ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
++ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
++ #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
++ u8 unused_4;
++ __le16 dst_id;
++ __le16 mirror_vnic_id;
++ u8 pri_hint;
++ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
++ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_ABOVE_FILTER 0x1UL
++ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_BELOW_FILTER 0x2UL
++ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MAX 0x3UL
++ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN 0x4UL
++ #define CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_L2_FILTER_ALLOC_REQ_PRI_HINT_MIN
++ u8 unused_5;
++ __le32 unused_6;
++ __le64 l2_filter_id_hint;
+ };
+
+-/* Output (24 bytes) */
++/* hwrm_cfa_l2_filter_alloc_output (size:192b/24B) */
+ struct hwrm_cfa_l2_filter_alloc_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le64 l2_filter_id;
+- __le32 flow_id;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le64 l2_filter_id;
++ __le32 flow_id;
++ u8 unused_0[3];
++ u8 valid;
+ };
+
+-/* hwrm_cfa_l2_filter_free */
+-/* Input (24 bytes) */
++/* hwrm_cfa_l2_filter_free_input (size:192b/24B) */
+ struct hwrm_cfa_l2_filter_free_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le64 l2_filter_id;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le64 l2_filter_id;
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_cfa_l2_filter_free_output (size:128b/16B) */
+ struct hwrm_cfa_l2_filter_free_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_cfa_l2_filter_cfg */
+-/* Input (40 bytes) */
++/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */
+ struct hwrm_cfa_l2_filter_cfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
+- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
+- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+- __le32 enables;
+- #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
+- #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+- __le64 l2_filter_id;
+- __le32 dst_id;
+- __le32 new_mirror_vnic_id;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
++ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
++ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
++ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
++ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
++ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
++ __le32 enables;
++ #define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
++ #define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
++ __le64 l2_filter_id;
++ __le32 dst_id;
++ __le32 new_mirror_vnic_id;
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
+ struct hwrm_cfa_l2_filter_cfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_cfa_l2_set_rx_mask */
+-/* Input (56 bytes) */
++/* hwrm_cfa_l2_set_rx_mask_input (size:448b/56B) */
+ struct hwrm_cfa_l2_set_rx_mask_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 vnic_id;
+- __le32 mask;
+- #define CFA_L2_SET_RX_MASK_REQ_MASK_RESERVED 0x1UL
+- #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
+- #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
+- #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
+- #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
+- #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
+- #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
+- #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
+- #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
+- __le64 mc_tbl_addr;
+- __le32 num_mc_entries;
+- __le32 unused_0;
+- __le64 vlan_tag_tbl_addr;
+- __le32 num_vlan_tags;
+- __le32 unused_1;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 vnic_id;
++ __le32 mask;
++ #define CFA_L2_SET_RX_MASK_REQ_MASK_MCAST 0x2UL
++ #define CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST 0x4UL
++ #define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
++ #define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
++ #define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
++ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
++ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
++ #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
++ __le64 mc_tbl_addr;
++ __le32 num_mc_entries;
++ u8 unused_0[4];
++ __le64 vlan_tag_tbl_addr;
++ __le32 num_vlan_tags;
++ u8 unused_1[4];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_cfa_l2_set_rx_mask_output (size:128b/16B) */
+ struct hwrm_cfa_l2_set_rx_mask_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* Command specific Error Codes (8 bytes) */
++/* hwrm_cfa_l2_set_rx_mask_cmd_err (size:64b/8B) */
+ struct hwrm_cfa_l2_set_rx_mask_cmd_err {
+- u8 code;
+- #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
++ u8 code;
++ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR 0x1UL
+- u8 unused_0[7];
++ #define CFA_L2_SET_RX_MASK_CMD_ERR_CODE_LAST CFA_L2_SET_RX_MASK_CMD_ERR_CODE_NTUPLE_FILTER_CONFLICT_ERR
++ u8 unused_0[7];
+ };
+
+-/* hwrm_cfa_tunnel_filter_alloc */
+-/* Input (88 bytes) */
++/* hwrm_cfa_tunnel_filter_alloc_input (size:704b/88B) */
+ struct hwrm_cfa_tunnel_filter_alloc_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
+- #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+- __le32 enables;
+- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
+- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
+- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
+- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
+- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
+- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
+- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
+- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
+- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
+- #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
+- __le64 l2_filter_id;
+- u8 l2_addr[6];
+- __le16 l2_ivlan;
+- __le32 l3_addr[4];
+- __le32 t_l3_addr[4];
+- u8 l3_addr_type;
+- u8 t_l3_addr_type;
+- u8 tunnel_type;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
++ #define CFA_TUNNEL_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
++ __le32 enables;
++ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
++ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_ADDR 0x2UL
++ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L2_IVLAN 0x4UL
++ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR 0x8UL
++ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_L3_ADDR_TYPE 0x10UL
++ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR_TYPE 0x20UL
++ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_T_L3_ADDR 0x40UL
++ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x80UL
++ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_VNI 0x100UL
++ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID 0x200UL
++ #define CFA_TUNNEL_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x400UL
++ __le64 l2_filter_id;
++ u8 l2_addr[6];
++ __le16 l2_ivlan;
++ __le32 l3_addr[4];
++ __le32 t_l3_addr[4];
++ u8 l3_addr_type;
++ u8 t_l3_addr_type;
++ u8 tunnel_type;
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+@@ -4174,158 +4372,204 @@ struct hwrm_cfa_tunnel_filter_alloc_inpu
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+- u8 unused_0;
+- __le32 vni;
+- __le32 dst_vnic_id;
+- __le32 mirror_vnic_id;
++ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
++ u8 tunnel_flags;
++ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL
++ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL
++ #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_EXTHDR_SEQNUM_S0 0x4UL
++ __le32 vni;
++ __le32 dst_vnic_id;
++ __le32 mirror_vnic_id;
+ };
+
+-/* Output (24 bytes) */
++/* hwrm_cfa_tunnel_filter_alloc_output (size:192b/24B) */
+ struct hwrm_cfa_tunnel_filter_alloc_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le64 tunnel_filter_id;
+- __le32 flow_id;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le64 tunnel_filter_id;
++ __le32 flow_id;
++ u8 unused_0[3];
++ u8 valid;
+ };
+
+-/* hwrm_cfa_tunnel_filter_free */
+-/* Input (24 bytes) */
++/* hwrm_cfa_tunnel_filter_free_input (size:192b/24B) */
+ struct hwrm_cfa_tunnel_filter_free_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le64 tunnel_filter_id;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le64 tunnel_filter_id;
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_cfa_tunnel_filter_free_output (size:128b/16B) */
+ struct hwrm_cfa_tunnel_filter_free_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
++};
++
++/* hwrm_vxlan_ipv4_hdr (size:128b/16B) */
++struct hwrm_vxlan_ipv4_hdr {
++ u8 ver_hlen;
++ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_MASK 0xfUL
++ #define VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT 0
++ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_MASK 0xf0UL
++ #define VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT 4
++ u8 tos;
++ __be16 ip_id;
++ __be16 flags_frag_offset;
++ u8 ttl;
++ u8 protocol;
++ __be32 src_ip_addr;
++ __be32 dest_ip_addr;
+ };
+
+-/* hwrm_cfa_encap_record_alloc */
+-/* Input (32 bytes) */
++/* hwrm_vxlan_ipv6_hdr (size:320b/40B) */
++struct hwrm_vxlan_ipv6_hdr {
++ __be32 ver_tc_flow_label;
++ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_SFT 0x1cUL
++ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_VER_MASK 0xf0000000UL
++ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_SFT 0x14UL
++ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_TC_MASK 0xff00000UL
++ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_SFT 0x0UL
++ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK 0xfffffUL
++ #define VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_LAST VXLAN_IPV6_HDR_VER_TC_FLOW_LABEL_FLOW_LABEL_MASK
++ __be16 payload_len;
++ u8 next_hdr;
++ u8 ttl;
++ __be32 src_ip_addr[4];
++ __be32 dest_ip_addr[4];
++};
++
++/* hwrm_cfa_encap_data_vxlan (size:576b/72B) */
++struct hwrm_cfa_encap_data_vxlan {
++ u8 src_mac_addr[6];
++ __le16 unused_0;
++ u8 dst_mac_addr[6];
++ u8 num_vlan_tags;
++ u8 unused_1;
++ __be16 ovlan_tpid;
++ __be16 ovlan_tci;
++ __be16 ivlan_tpid;
++ __be16 ivlan_tci;
++ __le32 l3[10];
++ #define CFA_ENCAP_DATA_VXLAN_L3_VER_MASK 0xfUL
++ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV4 0x4UL
++ #define CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6 0x6UL
++ #define CFA_ENCAP_DATA_VXLAN_L3_LAST CFA_ENCAP_DATA_VXLAN_L3_VER_IPV6
++ __be16 src_port;
++ __be16 dst_port;
++ __be32 vni;
++};
++
++/* hwrm_cfa_encap_record_alloc_input (size:832b/104B) */
+ struct hwrm_cfa_encap_record_alloc_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
+- #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+- u8 encap_type;
+- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
+- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
+- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
+- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
+- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
+- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
+- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
+- #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
+- u8 unused_0;
+- __le16 unused_1;
+- __le32 encap_data[20];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
++ #define CFA_ENCAP_RECORD_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
++ u8 encap_type;
++ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN 0x1UL
++ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_NVGRE 0x2UL
++ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_L2GRE 0x3UL
++ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPIP 0x4UL
++ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_GENEVE 0x5UL
++ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_MPLS 0x6UL
++ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VLAN 0x7UL
++ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
++ #define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_LAST CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE
++ u8 unused_0[3];
++ __le32 encap_data[20];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_cfa_encap_record_alloc_output (size:128b/16B) */
+ struct hwrm_cfa_encap_record_alloc_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 encap_record_id;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le32 encap_record_id;
++ u8 unused_0[3];
++ u8 valid;
+ };
+
+-/* hwrm_cfa_encap_record_free */
+-/* Input (24 bytes) */
++/* hwrm_cfa_encap_record_free_input (size:192b/24B) */
+ struct hwrm_cfa_encap_record_free_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 encap_record_id;
+- __le32 unused_0;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 encap_record_id;
++ u8 unused_0[4];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_cfa_encap_record_free_output (size:128b/16B) */
+ struct hwrm_cfa_encap_record_free_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_cfa_ntuple_filter_alloc */
+-/* Input (128 bytes) */
++/* hwrm_cfa_ntuple_filter_alloc_input (size:1024b/128B) */
+ struct hwrm_cfa_ntuple_filter_alloc_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
+- __le32 enables;
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
+- __le64 l2_filter_id;
+- u8 src_macaddr[6];
+- __be16 ethertype;
+- u8 ip_addr_type;
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
+- u8 ip_protocol;
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
+- __le16 dst_id;
+- __le16 mirror_vnic_id;
+- u8 tunnel_type;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x1UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP 0x2UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_METER 0x4UL
++ __le32 enables;
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x4UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x8UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x10UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x20UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK 0x40UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x80UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK 0x100UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x200UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x400UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK 0x800UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x1000UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK 0x2000UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_PRI_HINT 0x4000UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_NTUPLE_FILTER_ID 0x8000UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x10000UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x20000UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x40000UL
++ __le64 l2_filter_id;
++ u8 src_macaddr[6];
++ __be16 ethertype;
++ u8 ip_addr_type;
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
++ u8 ip_protocol;
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
++ __le16 dst_id;
++ __le16 mirror_vnic_id;
++ u8 tunnel_type;
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+@@ -4337,2221 +4581,1723 @@ struct hwrm_cfa_ntuple_filter_alloc_inpu
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+- u8 pri_hint;
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
+- #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
+- __be32 src_ipaddr[4];
+- __be32 src_ipaddr_mask[4];
+- __be32 dst_ipaddr[4];
+- __be32 dst_ipaddr_mask[4];
+- __be16 src_port;
+- __be16 src_port_mask;
+- __be16 dst_port;
+- __be16 dst_port_mask;
+- __le64 ntuple_filter_id_hint;
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
++ u8 pri_hint;
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_BELOW 0x2UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_HIGHEST 0x3UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST 0x4UL
++ #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_LOWEST
++ __be32 src_ipaddr[4];
++ __be32 src_ipaddr_mask[4];
++ __be32 dst_ipaddr[4];
++ __be32 dst_ipaddr_mask[4];
++ __be16 src_port;
++ __be16 src_port_mask;
++ __be16 dst_port;
++ __be16 dst_port_mask;
++ __le64 ntuple_filter_id_hint;
+ };
+
+-/* Output (24 bytes) */
++/* hwrm_cfa_ntuple_filter_alloc_output (size:192b/24B) */
+ struct hwrm_cfa_ntuple_filter_alloc_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le64 ntuple_filter_id;
+- __le32 flow_id;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le64 ntuple_filter_id;
++ __le32 flow_id;
++ u8 unused_0[3];
++ u8 valid;
+ };
+
+-/* Command specific Error Codes (8 bytes) */
++/* hwrm_cfa_ntuple_filter_alloc_cmd_err (size:64b/8B) */
+ struct hwrm_cfa_ntuple_filter_alloc_cmd_err {
+- u8 code;
+- #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
++ u8 code;
++ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR 0x1UL
+- u8 unused_0[7];
++ #define CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_LAST CFA_NTUPLE_FILTER_ALLOC_CMD_ERR_CODE_RX_MASK_VLAN_CONFLICT_ERR
++ u8 unused_0[7];
+ };
+
+-/* hwrm_cfa_ntuple_filter_free */
+-/* Input (24 bytes) */
++/* hwrm_cfa_ntuple_filter_free_input (size:192b/24B) */
+ struct hwrm_cfa_ntuple_filter_free_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le64 ntuple_filter_id;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le64 ntuple_filter_id;
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_cfa_ntuple_filter_free_output (size:128b/16B) */
+ struct hwrm_cfa_ntuple_filter_free_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_cfa_ntuple_filter_cfg */
+-/* Input (48 bytes) */
++/* hwrm_cfa_ntuple_filter_cfg_input (size:384b/48B) */
+ struct hwrm_cfa_ntuple_filter_cfg_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 enables;
+- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
+- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+- #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
+- __le32 unused_0;
+- __le64 ntuple_filter_id;
+- __le32 new_dst_id;
+- __le32 new_mirror_vnic_id;
+- __le16 new_meter_instance_id;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 enables;
++ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_DST_ID 0x1UL
++ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
++ #define CFA_NTUPLE_FILTER_CFG_REQ_ENABLES_NEW_METER_INSTANCE_ID 0x4UL
++ u8 unused_0[4];
++ __le64 ntuple_filter_id;
++ __le32 new_dst_id;
++ __le32 new_mirror_vnic_id;
++ __le16 new_meter_instance_id;
+ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID 0xffffUL
+- __le16 unused_1[3];
++ #define CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_LAST CFA_NTUPLE_FILTER_CFG_REQ_NEW_METER_INSTANCE_ID_INVALID
++ u8 unused_1[6];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_cfa_ntuple_filter_cfg_output (size:128b/16B) */
+ struct hwrm_cfa_ntuple_filter_cfg_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_cfa_decap_filter_alloc */
+-/* Input (104 bytes) */
++/* hwrm_cfa_decap_filter_alloc_input (size:832b/104B) */
+ struct hwrm_cfa_decap_filter_alloc_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 flags;
+- #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
+- __le32 enables;
+- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
+- __be32 tunnel_id;
+- u8 tunnel_type;
+- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
+- u8 unused_0;
+- __le16 unused_1;
+- u8 src_macaddr[6];
+- u8 unused_2;
+- u8 unused_3;
+- u8 dst_macaddr[6];
+- __be16 ovlan_vid;
+- __be16 ivlan_vid;
+- __be16 t_ovlan_vid;
+- __be16 t_ivlan_vid;
+- __be16 ethertype;
+- u8 ip_addr_type;
+- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
+- u8 ip_protocol;
+- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
+- #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
+- u8 unused_4;
+- u8 unused_5;
+- u8 unused_6[3];
+- u8 unused_7;
+- __be32 src_ipaddr[4];
+- __be32 dst_ipaddr[4];
+- __be16 src_port;
+- __be16 dst_port;
+- __le16 dst_id;
+- __le16 l2_ctxt_ref_id;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 flags;
++ #define CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL 0x1UL
++ __le32 enables;
++ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE 0x1UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID 0x2UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR 0x4UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR 0x8UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_OVLAN_VID 0x10UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IVLAN_VID 0x20UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_OVLAN_VID 0x40UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID 0x80UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x100UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR 0x200UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR 0x400UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE 0x800UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL 0x1000UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_PORT 0x2000UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT 0x4000UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_ID 0x8000UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL
++ __be32 tunnel_id;
++ u8 tunnel_type;
++ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL
++ u8 unused_0;
++ __le16 unused_1;
++ u8 src_macaddr[6];
++ u8 unused_2[2];
++ u8 dst_macaddr[6];
++ __be16 ovlan_vid;
++ __be16 ivlan_vid;
++ __be16 t_ovlan_vid;
++ __be16 t_ivlan_vid;
++ __be16 ethertype;
++ u8 ip_addr_type;
++ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_UNKNOWN 0x0UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4 0x4UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6
++ u8 ip_protocol;
++ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
++ #define CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_LAST CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP
++ __le16 unused_3;
++ __le32 unused_4;
++ __be32 src_ipaddr[4];
++ __be32 dst_ipaddr[4];
++ __be16 src_port;
++ __be16 dst_port;
++ __le16 dst_id;
++ __le16 l2_ctxt_ref_id;
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_cfa_decap_filter_alloc_output (size:128b/16B) */
+ struct hwrm_cfa_decap_filter_alloc_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 decap_filter_id;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le32 decap_filter_id;
++ u8 unused_0[3];
++ u8 valid;
+ };
+
+-/* hwrm_cfa_decap_filter_free */
+-/* Input (24 bytes) */
++/* hwrm_cfa_decap_filter_free_input (size:192b/24B) */
+ struct hwrm_cfa_decap_filter_free_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 decap_filter_id;
+- __le32 unused_0;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 decap_filter_id;
++ u8 unused_0[4];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_cfa_decap_filter_free_output (size:128b/16B) */
+ struct hwrm_cfa_decap_filter_free_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_cfa_flow_alloc */
+-/* Input (128 bytes) */
++/* hwrm_cfa_flow_alloc_input (size:1024b/128B) */
+ struct hwrm_cfa_flow_alloc_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 flags;
+- #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
+- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
+- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
+- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
+- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
+- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
+- #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
+- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
+- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
+- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
+- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
+- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
+- #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
+- __le16 src_fid;
+- __le32 tunnel_handle;
+- __le16 action_flags;
+- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
+- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
+- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
+- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
+- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
+- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
+- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
+- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
+- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
+- #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
+- __le16 dst_fid;
+- __be16 l2_rewrite_vlan_tpid;
+- __be16 l2_rewrite_vlan_tci;
+- __le16 act_meter_id;
+- __le16 ref_flow_handle;
+- __be16 ethertype;
+- __be16 outer_vlan_tci;
+- __be16 dmac[3];
+- __be16 inner_vlan_tci;
+- __be16 smac[3];
+- u8 ip_dst_mask_len;
+- u8 ip_src_mask_len;
+- __be32 ip_dst[4];
+- __be32 ip_src[4];
+- __be16 l4_src_port;
+- __be16 l4_src_port_mask;
+- __be16 l4_dst_port;
+- __be16 l4_dst_port_mask;
+- __be32 nat_ip_address[4];
+- __be16 l2_rewrite_dmac[3];
+- __be16 nat_port;
+- __be16 l2_rewrite_smac[3];
+- u8 ip_proto;
+- u8 unused_0;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 flags;
++ #define CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL 0x1UL
++ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_MASK 0x6UL
++ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_SFT 1
++ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_NONE (0x0UL << 1)
++ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE (0x1UL << 1)
++ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO (0x2UL << 1)
++ #define CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_LAST CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_TWO
++ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_MASK 0x38UL
++ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_SFT 3
++ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2 (0x0UL << 3)
++ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 (0x1UL << 3)
++ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6 (0x2UL << 3)
++ #define CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_LAST CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6
++ __le16 src_fid;
++ __le32 tunnel_handle;
++ __le16 action_flags;
++ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL
++ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL
++ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL
++ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL
++ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL
++ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL
++ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL
++ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL
++ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL
++ #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL
++ __le16 dst_fid;
++ __be16 l2_rewrite_vlan_tpid;
++ __be16 l2_rewrite_vlan_tci;
++ __le16 act_meter_id;
++ __le16 ref_flow_handle;
++ __be16 ethertype;
++ __be16 outer_vlan_tci;
++ __be16 dmac[3];
++ __be16 inner_vlan_tci;
++ __be16 smac[3];
++ u8 ip_dst_mask_len;
++ u8 ip_src_mask_len;
++ __be32 ip_dst[4];
++ __be32 ip_src[4];
++ __be16 l4_src_port;
++ __be16 l4_src_port_mask;
++ __be16 l4_dst_port;
++ __be16 l4_dst_port_mask;
++ __be32 nat_ip_address[4];
++ __be16 l2_rewrite_dmac[3];
++ __be16 nat_port;
++ __be16 l2_rewrite_smac[3];
++ u8 ip_proto;
++ u8 unused_0;
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_cfa_flow_alloc_output (size:128b/16B) */
+ struct hwrm_cfa_flow_alloc_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le16 flow_handle;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 unused_4;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le16 flow_handle;
++ u8 unused_0[5];
++ u8 valid;
+ };
+
+-/* hwrm_cfa_flow_free */
+-/* Input (24 bytes) */
++/* hwrm_cfa_flow_free_input (size:192b/24B) */
+ struct hwrm_cfa_flow_free_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 flow_handle;
+- __le16 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 flow_handle;
++ u8 unused_0[6];
+ };
+
+-/* Output (32 bytes) */
++/* hwrm_cfa_flow_free_output (size:256b/32B) */
+ struct hwrm_cfa_flow_free_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le64 packet;
+- __le64 byte;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le64 packet;
++ __le64 byte;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_cfa_flow_stats */
+-/* Input (40 bytes) */
++/* hwrm_cfa_flow_stats_input (size:320b/40B) */
+ struct hwrm_cfa_flow_stats_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 num_flows;
+- __le16 flow_handle_0;
+- __le16 flow_handle_1;
+- __le16 flow_handle_2;
+- __le16 flow_handle_3;
+- __le16 flow_handle_4;
+- __le16 flow_handle_5;
+- __le16 flow_handle_6;
+- __le16 flow_handle_7;
+- __le16 flow_handle_8;
+- __le16 flow_handle_9;
+- __le16 unused_0;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 num_flows;
++ __le16 flow_handle_0;
++ __le16 flow_handle_1;
++ __le16 flow_handle_2;
++ __le16 flow_handle_3;
++ __le16 flow_handle_4;
++ __le16 flow_handle_5;
++ __le16 flow_handle_6;
++ __le16 flow_handle_7;
++ __le16 flow_handle_8;
++ __le16 flow_handle_9;
++ u8 unused_0[2];
+ };
+
+-/* Output (176 bytes) */
++/* hwrm_cfa_flow_stats_output (size:1408b/176B) */
+ struct hwrm_cfa_flow_stats_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le64 packet_0;
+- __le64 packet_1;
+- __le64 packet_2;
+- __le64 packet_3;
+- __le64 packet_4;
+- __le64 packet_5;
+- __le64 packet_6;
+- __le64 packet_7;
+- __le64 packet_8;
+- __le64 packet_9;
+- __le64 byte_0;
+- __le64 byte_1;
+- __le64 byte_2;
+- __le64 byte_3;
+- __le64 byte_4;
+- __le64 byte_5;
+- __le64 byte_6;
+- __le64 byte_7;
+- __le64 byte_8;
+- __le64 byte_9;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le64 packet_0;
++ __le64 packet_1;
++ __le64 packet_2;
++ __le64 packet_3;
++ __le64 packet_4;
++ __le64 packet_5;
++ __le64 packet_6;
++ __le64 packet_7;
++ __le64 packet_8;
++ __le64 packet_9;
++ __le64 byte_0;
++ __le64 byte_1;
++ __le64 byte_2;
++ __le64 byte_3;
++ __le64 byte_4;
++ __le64 byte_5;
++ __le64 byte_6;
++ __le64 byte_7;
++ __le64 byte_8;
++ __le64 byte_9;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_cfa_vfr_alloc */
+-/* Input (32 bytes) */
++/* hwrm_cfa_vfr_alloc_input (size:448b/56B) */
+ struct hwrm_cfa_vfr_alloc_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 vf_id;
+- __le16 reserved;
+- __le32 unused_0;
+- char vfr_name[32];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 vf_id;
++ __le16 reserved;
++ u8 unused_0[4];
++ char vfr_name[32];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_cfa_vfr_alloc_output (size:128b/16B) */
+ struct hwrm_cfa_vfr_alloc_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le16 rx_cfa_code;
+- __le16 tx_cfa_action;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le16 rx_cfa_code;
++ __le16 tx_cfa_action;
++ u8 unused_0[3];
++ u8 valid;
+ };
+
+-/* hwrm_cfa_vfr_free */
+-/* Input (24 bytes) */
++/* hwrm_cfa_vfr_free_input (size:384b/48B) */
+ struct hwrm_cfa_vfr_free_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- char vfr_name[32];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ char vfr_name[32];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_cfa_vfr_free_output (size:128b/16B) */
+ struct hwrm_cfa_vfr_free_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_tunnel_dst_port_query */
+-/* Input (24 bytes) */
++/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
+ struct hwrm_tunnel_dst_port_query_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- u8 tunnel_type;
+- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+- u8 unused_0[7];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ u8 tunnel_type;
++ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
++ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
++ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
++ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4
++ u8 unused_0[7];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
+ struct hwrm_tunnel_dst_port_query_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le16 tunnel_dst_port_id;
+- __be16 tunnel_dst_port_val;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le16 tunnel_dst_port_id;
++ __be16 tunnel_dst_port_val;
++ u8 unused_0[3];
++ u8 valid;
+ };
+
+-/* hwrm_tunnel_dst_port_alloc */
+-/* Input (24 bytes) */
++/* hwrm_tunnel_dst_port_alloc_input (size:192b/24B) */
+ struct hwrm_tunnel_dst_port_alloc_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- u8 tunnel_type;
+- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+- u8 unused_0;
+- __be16 tunnel_dst_port_val;
+- __be32 unused_1;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ u8 tunnel_type;
++ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
++ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
++ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
++ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4
++ u8 unused_0;
++ __be16 tunnel_dst_port_val;
++ u8 unused_1[4];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
+ struct hwrm_tunnel_dst_port_alloc_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le16 tunnel_dst_port_id;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 unused_4;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le16 tunnel_dst_port_id;
++ u8 unused_0[5];
++ u8 valid;
+ };
+
+-/* hwrm_tunnel_dst_port_free */
+-/* Input (24 bytes) */
++/* hwrm_tunnel_dst_port_free_input (size:192b/24B) */
+ struct hwrm_tunnel_dst_port_free_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- u8 tunnel_type;
+- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+- u8 unused_0;
+- __le16 tunnel_dst_port_id;
+- __le32 unused_1;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ u8 tunnel_type;
++ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
++ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
++ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
++ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4
++ u8 unused_0;
++ __le16 tunnel_dst_port_id;
++ u8 unused_1[4];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
+ struct hwrm_tunnel_dst_port_free_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_1[7];
++ u8 valid;
++};
++
++/* ctx_hw_stats (size:1280b/160B) */
++struct ctx_hw_stats {
++ __le64 rx_ucast_pkts;
++ __le64 rx_mcast_pkts;
++ __le64 rx_bcast_pkts;
++ __le64 rx_discard_pkts;
++ __le64 rx_drop_pkts;
++ __le64 rx_ucast_bytes;
++ __le64 rx_mcast_bytes;
++ __le64 rx_bcast_bytes;
++ __le64 tx_ucast_pkts;
++ __le64 tx_mcast_pkts;
++ __le64 tx_bcast_pkts;
++ __le64 tx_discard_pkts;
++ __le64 tx_drop_pkts;
++ __le64 tx_ucast_bytes;
++ __le64 tx_mcast_bytes;
++ __le64 tx_bcast_bytes;
++ __le64 tpa_pkts;
++ __le64 tpa_bytes;
++ __le64 tpa_events;
++ __le64 tpa_aborts;
+ };
+
+-/* hwrm_stat_ctx_alloc */
+-/* Input (32 bytes) */
++/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
+ struct hwrm_stat_ctx_alloc_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le64 stats_dma_addr;
+- __le32 update_period_ms;
+- u8 stat_ctx_flags;
+- #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
+- u8 unused_0[3];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le64 stats_dma_addr;
++ __le32 update_period_ms;
++ u8 stat_ctx_flags;
++ #define STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE 0x1UL
++ u8 unused_0[3];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
+ struct hwrm_stat_ctx_alloc_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 stat_ctx_id;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le32 stat_ctx_id;
++ u8 unused_0[3];
++ u8 valid;
+ };
+
+-/* hwrm_stat_ctx_free */
+-/* Input (24 bytes) */
++/* hwrm_stat_ctx_free_input (size:192b/24B) */
+ struct hwrm_stat_ctx_free_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 stat_ctx_id;
+- __le32 unused_0;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 stat_ctx_id;
++ u8 unused_0[4];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_stat_ctx_free_output (size:128b/16B) */
+ struct hwrm_stat_ctx_free_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 stat_ctx_id;
+- u8 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le32 stat_ctx_id;
++ u8 unused_0[3];
++ u8 valid;
+ };
+
+-/* hwrm_stat_ctx_query */
+-/* Input (24 bytes) */
++/* hwrm_stat_ctx_query_input (size:192b/24B) */
+ struct hwrm_stat_ctx_query_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 stat_ctx_id;
+- __le32 unused_0;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 stat_ctx_id;
++ u8 unused_0[4];
+ };
+
+-/* Output (176 bytes) */
++/* hwrm_stat_ctx_query_output (size:1408b/176B) */
+ struct hwrm_stat_ctx_query_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le64 tx_ucast_pkts;
+- __le64 tx_mcast_pkts;
+- __le64 tx_bcast_pkts;
+- __le64 tx_err_pkts;
+- __le64 tx_drop_pkts;
+- __le64 tx_ucast_bytes;
+- __le64 tx_mcast_bytes;
+- __le64 tx_bcast_bytes;
+- __le64 rx_ucast_pkts;
+- __le64 rx_mcast_pkts;
+- __le64 rx_bcast_pkts;
+- __le64 rx_err_pkts;
+- __le64 rx_drop_pkts;
+- __le64 rx_ucast_bytes;
+- __le64 rx_mcast_bytes;
+- __le64 rx_bcast_bytes;
+- __le64 rx_agg_pkts;
+- __le64 rx_agg_bytes;
+- __le64 rx_agg_events;
+- __le64 rx_agg_aborts;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ __le64 tx_ucast_pkts;
++ __le64 tx_mcast_pkts;
++ __le64 tx_bcast_pkts;
++ __le64 tx_err_pkts;
++ __le64 tx_drop_pkts;
++ __le64 tx_ucast_bytes;
++ __le64 tx_mcast_bytes;
++ __le64 tx_bcast_bytes;
++ __le64 rx_ucast_pkts;
++ __le64 rx_mcast_pkts;
++ __le64 rx_bcast_pkts;
++ __le64 rx_err_pkts;
++ __le64 rx_drop_pkts;
++ __le64 rx_ucast_bytes;
++ __le64 rx_mcast_bytes;
++ __le64 rx_bcast_bytes;
++ __le64 rx_agg_pkts;
++ __le64 rx_agg_bytes;
++ __le64 rx_agg_events;
++ __le64 rx_agg_aborts;
++ u8 unused_0[7];
++ u8 valid;
+ };
+
+-/* hwrm_stat_ctx_clr_stats */
+-/* Input (24 bytes) */
++/* hwrm_stat_ctx_clr_stats_input (size:192b/24B) */
+ struct hwrm_stat_ctx_clr_stats_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le32 stat_ctx_id;
+- __le32 unused_0;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le32 stat_ctx_id;
++ u8 unused_0[4];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_stat_ctx_clr_stats_output (size:128b/16B) */
+ struct hwrm_stat_ctx_clr_stats_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- __le32 unused_0;
+- u8 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 unused_0[7];
++ u8 valid;
++};
++
++/* tx_port_stats (size:3264b/408B) */
++struct tx_port_stats {
++ __le64 tx_64b_frames;
++ __le64 tx_65b_127b_frames;
++ __le64 tx_128b_255b_frames;
++ __le64 tx_256b_511b_frames;
++ __le64 tx_512b_1023b_frames;
++ __le64 tx_1024b_1518_frames;
++ __le64 tx_good_vlan_frames;
++ __le64 tx_1519b_2047_frames;
++ __le64 tx_2048b_4095b_frames;
++ __le64 tx_4096b_9216b_frames;
++ __le64 tx_9217b_16383b_frames;
++ __le64 tx_good_frames;
++ __le64 tx_total_frames;
++ __le64 tx_ucast_frames;
++ __le64 tx_mcast_frames;
++ __le64 tx_bcast_frames;
++ __le64 tx_pause_frames;
++ __le64 tx_pfc_frames;
++ __le64 tx_jabber_frames;
++ __le64 tx_fcs_err_frames;
++ __le64 tx_control_frames;
++ __le64 tx_oversz_frames;
++ __le64 tx_single_dfrl_frames;
++ __le64 tx_multi_dfrl_frames;
++ __le64 tx_single_coll_frames;
++ __le64 tx_multi_coll_frames;
++ __le64 tx_late_coll_frames;
++ __le64 tx_excessive_coll_frames;
++ __le64 tx_frag_frames;
++ __le64 tx_err;
++ __le64 tx_tagged_frames;
++ __le64 tx_dbl_tagged_frames;
++ __le64 tx_runt_frames;
++ __le64 tx_fifo_underruns;
++ __le64 tx_pfc_ena_frames_pri0;
++ __le64 tx_pfc_ena_frames_pri1;
++ __le64 tx_pfc_ena_frames_pri2;
++ __le64 tx_pfc_ena_frames_pri3;
++ __le64 tx_pfc_ena_frames_pri4;
++ __le64 tx_pfc_ena_frames_pri5;
++ __le64 tx_pfc_ena_frames_pri6;
++ __le64 tx_pfc_ena_frames_pri7;
++ __le64 tx_eee_lpi_events;
++ __le64 tx_eee_lpi_duration;
++ __le64 tx_llfc_logical_msgs;
++ __le64 tx_hcfc_msgs;
++ __le64 tx_total_collisions;
++ __le64 tx_bytes;
++ __le64 tx_xthol_frames;
++ __le64 tx_stat_discard;
++ __le64 tx_stat_error;
++};
++
++/* rx_port_stats (size:4224b/528B) */
++struct rx_port_stats {
++ __le64 rx_64b_frames;
++ __le64 rx_65b_127b_frames;
++ __le64 rx_128b_255b_frames;
++ __le64 rx_256b_511b_frames;
++ __le64 rx_512b_1023b_frames;
++ __le64 rx_1024b_1518_frames;
++ __le64 rx_good_vlan_frames;
++ __le64 rx_1519b_2047b_frames;
++ __le64 rx_2048b_4095b_frames;
++ __le64 rx_4096b_9216b_frames;
++ __le64 rx_9217b_16383b_frames;
++ __le64 rx_total_frames;
++ __le64 rx_ucast_frames;
++ __le64 rx_mcast_frames;
++ __le64 rx_bcast_frames;
++ __le64 rx_fcs_err_frames;
++ __le64 rx_ctrl_frames;
++ __le64 rx_pause_frames;
++ __le64 rx_pfc_frames;
++ __le64 rx_unsupported_opcode_frames;
++ __le64 rx_unsupported_da_pausepfc_frames;
++ __le64 rx_wrong_sa_frames;
++ __le64 rx_align_err_frames;
++ __le64 rx_oor_len_frames;
++ __le64 rx_code_err_frames;
++ __le64 rx_false_carrier_frames;
++ __le64 rx_ovrsz_frames;
++ __le64 rx_jbr_frames;
++ __le64 rx_mtu_err_frames;
++ __le64 rx_match_crc_frames;
++ __le64 rx_promiscuous_frames;
++ __le64 rx_tagged_frames;
++ __le64 rx_double_tagged_frames;
++ __le64 rx_trunc_frames;
++ __le64 rx_good_frames;
++ __le64 rx_pfc_xon2xoff_frames_pri0;
++ __le64 rx_pfc_xon2xoff_frames_pri1;
++ __le64 rx_pfc_xon2xoff_frames_pri2;
++ __le64 rx_pfc_xon2xoff_frames_pri3;
++ __le64 rx_pfc_xon2xoff_frames_pri4;
++ __le64 rx_pfc_xon2xoff_frames_pri5;
++ __le64 rx_pfc_xon2xoff_frames_pri6;
++ __le64 rx_pfc_xon2xoff_frames_pri7;
++ __le64 rx_pfc_ena_frames_pri0;
++ __le64 rx_pfc_ena_frames_pri1;
++ __le64 rx_pfc_ena_frames_pri2;
++ __le64 rx_pfc_ena_frames_pri3;
++ __le64 rx_pfc_ena_frames_pri4;
++ __le64 rx_pfc_ena_frames_pri5;
++ __le64 rx_pfc_ena_frames_pri6;
++ __le64 rx_pfc_ena_frames_pri7;
++ __le64 rx_sch_crc_err_frames;
++ __le64 rx_undrsz_frames;
++ __le64 rx_frag_frames;
++ __le64 rx_eee_lpi_events;
++ __le64 rx_eee_lpi_duration;
++ __le64 rx_llfc_physical_msgs;
++ __le64 rx_llfc_logical_msgs;
++ __le64 rx_llfc_msgs_with_crc_err;
++ __le64 rx_hcfc_msgs;
++ __le64 rx_hcfc_msgs_with_crc_err;
++ __le64 rx_bytes;
++ __le64 rx_runt_bytes;
++ __le64 rx_runt_frames;
++ __le64 rx_stat_discard;
++ __le64 rx_stat_err;
+ };
+
+-/* hwrm_fw_reset */
+-/* Input (24 bytes) */
++/* hwrm_fw_reset_input (size:192b/24B) */
+ struct hwrm_fw_reset_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- u8 embedded_proc_type;
+- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+- #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+- u8 selfrst_status;
+- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
+- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
+- #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+- u8 host_idx;
+- u8 unused_0[5];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ u8 embedded_proc_type;
++ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
++ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
++ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
++ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
++ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
++ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
++ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
++ #define FW_RESET_REQ_EMBEDDED_PROC_TYPE_LAST FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP
++ u8 selfrst_status;
++ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
++ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
++ #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
++ #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST
++ u8 host_idx;
++ u8 unused_0[5];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_fw_reset_output (size:128b/16B) */
+ struct hwrm_fw_reset_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- u8 selfrst_status;
+- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+- #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+- u8 unused_0;
+- __le16 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 unused_4;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 selfrst_status;
++ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
++ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
++ #define FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
++ #define FW_RESET_RESP_SELFRST_STATUS_LAST FW_RESET_RESP_SELFRST_STATUS_SELFRSTPCIERST
++ u8 unused_0[6];
++ u8 valid;
+ };
+
+-/* hwrm_fw_qstatus */
+-/* Input (24 bytes) */
++/* hwrm_fw_qstatus_input (size:192b/24B) */
+ struct hwrm_fw_qstatus_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- u8 embedded_proc_type;
+- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
+- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
+- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
+- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
+- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
+- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
+- #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
+- u8 unused_0[7];
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ u8 embedded_proc_type;
++ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT 0x0UL
++ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
++ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
++ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
++ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
++ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_AP 0x5UL
++ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP 0x6UL
++ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_LAST FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_CHIP
++ u8 unused_0[7];
+ };
+
+-/* Output (16 bytes) */
++/* hwrm_fw_qstatus_output (size:128b/16B) */
+ struct hwrm_fw_qstatus_output {
+- __le16 error_code;
+- __le16 req_type;
+- __le16 seq_id;
+- __le16 resp_len;
+- u8 selfrst_status;
+- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
+- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
+- #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
+- u8 unused_0;
+- __le16 unused_1;
+- u8 unused_2;
+- u8 unused_3;
+- u8 unused_4;
+- u8 valid;
++ __le16 error_code;
++ __le16 req_type;
++ __le16 seq_id;
++ __le16 resp_len;
++ u8 selfrst_status;
++ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE 0x0UL
++ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP 0x1UL
++ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
++ #define FW_QSTATUS_RESP_SELFRST_STATUS_LAST FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST
++ u8 unused_0[6];
++ u8 valid;
+ };
+
+-/* hwrm_fw_set_time */
+-/* Input (32 bytes) */
++/* hwrm_fw_set_time_input (size:256b/32B) */
+ struct hwrm_fw_set_time_input {
+- __le16 req_type;
+- __le16 cmpl_ring;
+- __le16 seq_id;
+- __le16 target_id;
+- __le64 resp_addr;
+- __le16 year;
+- #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
+- u8 month;
+- u8 day;
+- u8 hour;
+- u8 minute;
+- u8 second;
+- u8 unused_0;
+- __le16 millisecond;
+- __le16 zone;
+- #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL
+- #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL
+- __le32 unused_1;
++ __le16 req_type;
++ __le16 cmpl_ring;
++ __le16 seq_id;
++ __le16 target_id;
++ __le64 resp_addr;
++ __le16 year;
++ #define FW_SET_TIME_REQ_YEAR_UNKNOWN 0x0UL
++ #define FW_SET_TIME_REQ_YEAR_LAST FW_SET_TIME_REQ_YEAR_UNKNOWN
++ u8 month;
++ u8 day;
++ u8 hour;
++ u8 minute;
++ u8 second;
++ u8 unused_0;
++ __le16 millisecond;
++ __le16 zone;
++ #define FW_SET_TIME_REQ_ZONE_UTC 0x0UL
++ #define FW_SET_TIME_REQ_ZONE_UNKNOWN 0xffffUL
++ #define FW_SET_TIME_REQ_ZONE_LAST FW_SET_TIME_REQ_ZONE_UNKNOWN
++ u8 unused_1[4];
+ };