]> pilppa.com Git - linux-2.6-omap-h63xx.git/commitdiff
Neterion: New driver: Traffic & alarm handler
authorRamkrishna Vepa <ram.vepa@neterion.com>
Wed, 1 Apr 2009 18:14:58 +0000 (18:14 +0000)
committerDavid S. Miller <davem@davemloft.net>
Thu, 2 Apr 2009 07:33:43 +0000 (00:33 -0700)
This patch takes care of trafic handling related APIS.
- Interrupt Enable and disable
- Mask / Unmask Interrupt
- Traffic Interrupt handling.
- Alarm Interrupt handling.

- Changes in this submission -
        - General clean up - removed redundant includes, defines and macros.

- Changes in previous submissions -
 - General cleanup - removed unused functions and variables.
 - Use asserts where necessary - Reported by Andi Kleen
 - Fixed sparse warnings - Reported by Andi Kleen
 - Use a prefix, "__vxge" in front of hw functions to make them globally
   unique - Ben Hutchings

Signed-off-by: Sivakumar Subramani <sivakumar.subramani@neterion.com>
Signed-off-by: Rastapur Santosh <santosh.rastapur@neterion.com>
Signed-off-by: Ramkrishna Vepa <ram.vepa@neterion.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/vxge/vxge-traffic.c [new file with mode: 0644]
drivers/net/vxge/vxge-traffic.h [new file with mode: 0644]

diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
new file mode 100644 (file)
index 0000000..7be0ae1
--- /dev/null
@@ -0,0 +1,2528 @@
+/******************************************************************************
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice.  This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ * See the file COPYING in this distribution for more information.
+ *
+ * vxge-traffic.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
+ *                 Virtualized Server Adapter.
+ * Copyright(c) 2002-2009 Neterion Inc.
+ ******************************************************************************/
+#include <linux/etherdevice.h>
+
+#include "vxge-traffic.h"
+#include "vxge-config.h"
+#include "vxge-main.h"
+
+/*
+ * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
+ * @vp: Virtual Path handle.
+ *
+ * Enable vpath interrupts. The function is to be executed the last in
+ * vpath initialization sequence.
+ *
+ * See also: vxge_hw_vpath_intr_disable()
+ */
+enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
+{
+       u64 val64;
+
+       struct __vxge_hw_virtualpath *vpath;
+       struct vxge_hw_vpath_reg __iomem *vp_reg;
+       enum vxge_hw_status status = VXGE_HW_OK;
+       if (vp == NULL) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
+
+       vpath = vp->vpath;
+
+       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+               status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+               goto exit;
+       }
+
+       vp_reg = vpath->vp_reg;
+
+       writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->general_errors_reg);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->pci_config_errors_reg);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->mrpcim_to_vpath_alarm_reg);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->srpcim_to_vpath_alarm_reg);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->vpath_ppif_int_status);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->srpcim_msg_to_vpath_reg);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->vpath_pcipif_int_status);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->prc_alarm_reg);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->wrdma_alarm_status);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->asic_ntwk_vp_err_reg);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->xgmac_vp_int_status);
+
+       val64 = readq(&vp_reg->vpath_general_int_status);
+
+       /* Mask unwanted interrupts */
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->vpath_pcipif_int_mask);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->srpcim_msg_to_vpath_mask);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->srpcim_to_vpath_alarm_mask);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->mrpcim_to_vpath_alarm_mask);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->pci_config_errors_mask);
+
+       /* Unmask the individual interrupts */
+
+       writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
+               VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
+               VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
+               VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
+               &vp_reg->general_errors_mask);
+
+       __vxge_hw_pio_mem_write32_upper(
+               (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
+               VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
+               VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
+               VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
+               VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
+               VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR), 0, 32),
+               &vp_reg->kdfcctl_errors_mask);
+
+       __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
+
+       __vxge_hw_pio_mem_write32_upper(
+               (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
+               &vp_reg->prc_alarm_mask);
+
+       __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
+       __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
+
+       if (vpath->hldev->first_vp_id != vpath->vp_id)
+               __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->asic_ntwk_vp_err_mask);
+       else
+               __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
+               VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
+               VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
+               &vp_reg->asic_ntwk_vp_err_mask);
+
+       __vxge_hw_pio_mem_write32_upper(0,
+               &vp_reg->vpath_general_int_mask);
+exit:
+       return status;
+
+}
+
+/*
+ * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
+ * @vp: Virtual Path handle.
+ *
+ * Disable vpath interrupts. The function is to be executed the last in
+ * vpath initialization sequence.
+ *
+ * See also: vxge_hw_vpath_intr_enable()
+ */
+enum vxge_hw_status vxge_hw_vpath_intr_disable(
+                       struct __vxge_hw_vpath_handle *vp)
+{
+       u64 val64;
+
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status = VXGE_HW_OK;
+       struct vxge_hw_vpath_reg __iomem *vp_reg;
+       if (vp == NULL) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
+
+       vpath = vp->vpath;
+
+       if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+               status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+               goto exit;
+       }
+       vp_reg = vpath->vp_reg;
+
+       __vxge_hw_pio_mem_write32_upper(
+               (u32)VXGE_HW_INTR_MASK_ALL,
+               &vp_reg->vpath_general_int_mask);
+
+       val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
+
+       writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->general_errors_mask);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->pci_config_errors_mask);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->mrpcim_to_vpath_alarm_mask);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->srpcim_to_vpath_alarm_mask);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->vpath_ppif_int_mask);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->srpcim_msg_to_vpath_mask);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->vpath_pcipif_int_mask);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->wrdma_alarm_mask);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->prc_alarm_mask);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->xgmac_vp_int_mask);
+
+       __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
+                       &vp_reg->asic_ntwk_vp_err_mask);
+
+exit:
+       return status;
+}
+
+/**
+ * vxge_hw_channel_msix_mask - Mask MSIX Vector.
+ * @channeh: Channel for rx or tx handle
+ * @msix_id:  MSIX ID
+ *
+ * The function masks the msix interrupt for the given msix_id
+ *
+ * Returns: 0
+ */
+void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
+{
+
+       __vxge_hw_pio_mem_write32_upper(
+               (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
+                       0, 32),
+               &channel->common_reg->set_msix_mask_vect[msix_id%4]);
+
+       return;
+}
+
+/**
+ * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
+ * @channeh: Channel for rx or tx handle
+ * @msix_id:  MSI ID
+ *
+ * The function unmasks the msix interrupt for the given msix_id
+ *
+ * Returns: 0
+ */
+void
+vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
+{
+
+       __vxge_hw_pio_mem_write32_upper(
+               (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)),
+                       0, 32),
+               &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
+
+       return;
+}
+
+/**
+ * vxge_hw_device_set_intr_type - Updates the configuration
+ *             with new interrupt type.
+ * @hldev: HW device handle.
+ * @intr_mode: New interrupt type
+ */
+u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
+{
+
+       if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
+          (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
+          (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
+          (intr_mode != VXGE_HW_INTR_MODE_DEF))
+               intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
+
+       hldev->config.intr_mode = intr_mode;
+       return intr_mode;
+}
+
+/**
+ * vxge_hw_device_intr_enable - Enable interrupts.
+ * @hldev: HW device handle.
+ * @op: One of the enum vxge_hw_device_intr enumerated values specifying
+ *      the type(s) of interrupts to enable.
+ *
+ * Enable Titan interrupts. The function is to be executed the last in
+ * Titan initialization sequence.
+ *
+ * See also: vxge_hw_device_intr_disable()
+ */
+void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
+{
+       u32 i;
+       u64 val64;
+       u32 val32;
+
+       for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+
+               if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
+                       continue;
+
+               vxge_hw_vpath_intr_enable(
+                       VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
+       }
+
+       if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
+               val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
+                       hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
+
+               if (val64 != 0) {
+                       writeq(val64, &hldev->common_reg->tim_int_status0);
+
+                       writeq(~val64, &hldev->common_reg->tim_int_mask0);
+               }
+
+               val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
+                       hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
+
+               if (val32 != 0) {
+                       __vxge_hw_pio_mem_write32_upper(val32,
+                                       &hldev->common_reg->tim_int_status1);
+
+                       __vxge_hw_pio_mem_write32_upper(~val32,
+                                       &hldev->common_reg->tim_int_mask1);
+               }
+       }
+
+       val64 = readq(&hldev->common_reg->titan_general_int_status);
+
+       vxge_hw_device_unmask_all(hldev);
+
+       return;
+}
+
+/**
+ * vxge_hw_device_intr_disable - Disable Titan interrupts.
+ * @hldev: HW device handle.
+ * @op: One of the enum vxge_hw_device_intr enumerated values specifying
+ *      the type(s) of interrupts to disable.
+ *
+ * Disable Titan interrupts.
+ *
+ * See also: vxge_hw_device_intr_enable()
+ */
+void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
+{
+       u32 i;
+
+       vxge_hw_device_mask_all(hldev);
+
+       /* mask all the tim interrupts */
+       writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
+       __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
+               &hldev->common_reg->tim_int_mask1);
+
+       for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+
+               if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
+                       continue;
+
+               vxge_hw_vpath_intr_disable(
+                       VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
+       }
+
+       return;
+}
+
+/**
+ * vxge_hw_device_mask_all - Mask all device interrupts.
+ * @hldev: HW device handle.
+ *
+ * Mask        all device interrupts.
+ *
+ * See also: vxge_hw_device_unmask_all()
+ */
+void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
+{
+       u64 val64;
+
+       val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
+               VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
+
+       __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
+                               &hldev->common_reg->titan_mask_all_int);
+
+       return;
+}
+
+/**
+ * vxge_hw_device_unmask_all - Unmask all device interrupts.
+ * @hldev: HW device handle.
+ *
+ * Unmask all device interrupts.
+ *
+ * See also: vxge_hw_device_mask_all()
+ */
+void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
+{
+       u64 val64 = 0;
+
+       if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
+               val64 =  VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
+
+       __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
+                       &hldev->common_reg->titan_mask_all_int);
+
+       return;
+}
+
+/**
+ * vxge_hw_device_flush_io - Flush io writes.
+ * @hldev: HW device handle.
+ *
+ * The function        performs a read operation to flush io writes.
+ *
+ * Returns: void
+ */
+void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
+{
+       u32 val32;
+
+       val32 = readl(&hldev->common_reg->titan_general_int_status);
+}
+
+/**
+ * vxge_hw_device_begin_irq - Begin IRQ processing.
+ * @hldev: HW device handle.
+ * @skip_alarms: Do not clear the alarms
+ * @reason: "Reason" for the interrupt, the value of Titan's
+ *     general_int_status register.
+ *
+ * The function        performs two actions, It first checks whether (shared IRQ) the
+ * interrupt was raised        by the device. Next, it masks the device interrupts.
+ *
+ * Note:
+ * vxge_hw_device_begin_irq() does not flush MMIO writes through the
+ * bridge. Therefore, two back-to-back interrupts are potentially possible.
+ *
+ * Returns: 0, if the interrupt        is not "ours" (note that in this case the
+ * device remain enabled).
+ * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
+ * status.
+ */
+enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
+                                            u32 skip_alarms, u64 *reason)
+{
+       u32 i;
+       u64 val64;
+       u64 adapter_status;
+       u64 vpath_mask;
+       enum vxge_hw_status ret = VXGE_HW_OK;
+
+       val64 = readq(&hldev->common_reg->titan_general_int_status);
+
+       if (unlikely(!val64)) {
+               /* not Titan interrupt  */
+               *reason = 0;
+               ret = VXGE_HW_ERR_WRONG_IRQ;
+               goto exit;
+       }
+
+       if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
+
+               adapter_status = readq(&hldev->common_reg->adapter_status);
+
+               if (adapter_status == VXGE_HW_ALL_FOXES) {
+
+                       __vxge_hw_device_handle_error(hldev,
+                               NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
+                       *reason = 0;
+                       ret = VXGE_HW_ERR_SLOT_FREEZE;
+                       goto exit;
+               }
+       }
+
+       hldev->stats.sw_dev_info_stats.total_intr_cnt++;
+
+       *reason = val64;
+
+       vpath_mask = hldev->vpaths_deployed >>
+                               (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
+
+       if (val64 &
+           VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
+               hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
+
+               return VXGE_HW_OK;
+       }
+
+       hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
+
+       if (unlikely(val64 &
+                       VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
+
+               enum vxge_hw_status error_level = VXGE_HW_OK;
+
+               hldev->stats.sw_dev_err_stats.vpath_alarms++;
+
+               for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+
+                       if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
+                               continue;
+
+                       ret = __vxge_hw_vpath_alarm_process(
+                               &hldev->virtual_paths[i], skip_alarms);
+
+                       error_level = VXGE_HW_SET_LEVEL(ret, error_level);
+
+                       if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
+                               (ret == VXGE_HW_ERR_SLOT_FREEZE)))
+                               break;
+               }
+
+               ret = error_level;
+       }
+exit:
+       return ret;
+}
+
+/*
+ * __vxge_hw_device_handle_link_up_ind
+ * @hldev: HW device handle.
+ *
+ * Link up indication handler. The function is invoked by HW when
+ * Titan indicates that the link is up for programmable amount of time.
+ */
+enum vxge_hw_status
+__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
+{
+       /*
+        * If the previous link state is not down, return.
+        */
+       if (hldev->link_state == VXGE_HW_LINK_UP)
+               goto exit;
+
+       hldev->link_state = VXGE_HW_LINK_UP;
+
+       /* notify driver */
+       if (hldev->uld_callbacks.link_up)
+               hldev->uld_callbacks.link_up(hldev);
+exit:
+       return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_handle_link_down_ind
+ * @hldev: HW device handle.
+ *
+ * Link down indication handler. The function is invoked by HW when
+ * Titan indicates that the link is down.
+ */
+enum vxge_hw_status
+__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
+{
+       /*
+        * If the previous link state is not down, return.
+        */
+       if (hldev->link_state == VXGE_HW_LINK_DOWN)
+               goto exit;
+
+       hldev->link_state = VXGE_HW_LINK_DOWN;
+
+       /* notify driver */
+       if (hldev->uld_callbacks.link_down)
+               hldev->uld_callbacks.link_down(hldev);
+exit:
+       return VXGE_HW_OK;
+}
+
+/**
+ * __vxge_hw_device_handle_error - Handle error
+ * @hldev: HW device
+ * @vp_id: Vpath Id
+ * @type: Error type. Please see enum vxge_hw_event{}
+ *
+ * Handle error.
+ */
+enum vxge_hw_status
+__vxge_hw_device_handle_error(
+               struct __vxge_hw_device *hldev,
+               u32 vp_id,
+               enum vxge_hw_event type)
+{
+       switch (type) {
+       case VXGE_HW_EVENT_UNKNOWN:
+               break;
+       case VXGE_HW_EVENT_RESET_START:
+       case VXGE_HW_EVENT_RESET_COMPLETE:
+       case VXGE_HW_EVENT_LINK_DOWN:
+       case VXGE_HW_EVENT_LINK_UP:
+               goto out;
+       case VXGE_HW_EVENT_ALARM_CLEARED:
+               goto out;
+       case VXGE_HW_EVENT_ECCERR:
+       case VXGE_HW_EVENT_MRPCIM_ECCERR:
+               goto out;
+       case VXGE_HW_EVENT_FIFO_ERR:
+       case VXGE_HW_EVENT_VPATH_ERR:
+       case VXGE_HW_EVENT_CRITICAL_ERR:
+       case VXGE_HW_EVENT_SERR:
+               break;
+       case VXGE_HW_EVENT_SRPCIM_SERR:
+       case VXGE_HW_EVENT_MRPCIM_SERR:
+               goto out;
+       case VXGE_HW_EVENT_SLOT_FREEZE:
+               break;
+       default:
+               vxge_assert(0);
+               goto out;
+       }
+
+       /* notify driver */
+       if (hldev->uld_callbacks.crit_err)
+               hldev->uld_callbacks.crit_err(
+                       (struct __vxge_hw_device *)hldev,
+                       type, vp_id);
+out:
+
+       return VXGE_HW_OK;
+}
+
+/**
+ * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
+ * condition that has caused the Tx and RX interrupt.
+ * @hldev: HW device.
+ *
+ * Acknowledge (that is, clear) the condition that has caused
+ * the Tx and Rx interrupt.
+ * See also: vxge_hw_device_begin_irq(),
+ * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
+ */
+void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
+{
+
+       if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
+          (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
+               writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
+                                hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
+                               &hldev->common_reg->tim_int_status0);
+       }
+
+       if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
+          (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
+               __vxge_hw_pio_mem_write32_upper(
+                               (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
+                                hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
+                               &hldev->common_reg->tim_int_status1);
+       }
+
+       return;
+}
+
+/*
+ * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
+ * @channel: Channel
+ * @dtrh: Buffer to return the DTR pointer
+ *
+ * Allocates a dtr from the reserve array. If the reserve array is empty,
+ * it swaps the reserve and free arrays.
+ *
+ */
+enum vxge_hw_status
+vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
+{
+       void **tmp_arr;
+
+       if (channel->reserve_ptr - channel->reserve_top > 0) {
+_alloc_after_swap:
+               *dtrh = channel->reserve_arr[--channel->reserve_ptr];
+
+               return VXGE_HW_OK;
+       }
+
+       /* switch between empty and full arrays */
+
+       /* the idea behind such a design is that by having free and reserved
+        * arrays separated we basically separated irq and non-irq parts.
+        * i.e. no additional lock need to be done when we free a resource */
+
+       if (channel->length - channel->free_ptr > 0) {
+
+               tmp_arr = channel->reserve_arr;
+               channel->reserve_arr = channel->free_arr;
+               channel->free_arr = tmp_arr;
+               channel->reserve_ptr = channel->length;
+               channel->reserve_top = channel->free_ptr;
+               channel->free_ptr = channel->length;
+
+               channel->stats->reserve_free_swaps_cnt++;
+
+               goto _alloc_after_swap;
+       }
+
+       channel->stats->full_cnt++;
+
+       *dtrh = NULL;
+       return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
+}
+
+/*
+ * vxge_hw_channel_dtr_post - Post a dtr to the channel
+ * @channelh: Channel
+ * @dtrh: DTR pointer
+ *
+ * Posts a dtr to work array.
+ *
+ */
+void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
+{
+       vxge_assert(channel->work_arr[channel->post_index] == NULL);
+
+       channel->work_arr[channel->post_index++] = dtrh;
+
+       /* wrap-around */
+       if (channel->post_index == channel->length)
+               channel->post_index = 0;
+}
+
+/*
+ * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
+ * @channel: Channel
+ * @dtr: Buffer to return the next completed DTR pointer
+ *
+ * Returns the next completed dtr with out removing it from work array
+ *
+ */
+void
+vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
+{
+       vxge_assert(channel->compl_index < channel->length);
+
+       *dtrh = channel->work_arr[channel->compl_index];
+}
+
+/*
+ * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
+ * @channel: Channel handle
+ *
+ * Removes the next completed dtr from work array
+ *
+ */
+void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
+{
+       channel->work_arr[channel->compl_index] = NULL;
+
+       /* wrap-around */
+       if (++channel->compl_index == channel->length)
+               channel->compl_index = 0;
+
+       channel->stats->total_compl_cnt++;
+}
+
+/*
+ * vxge_hw_channel_dtr_free - Frees a dtr
+ * @channel: Channel handle
+ * @dtr:  DTR pointer
+ *
+ * Returns the dtr to free array
+ *
+ */
+void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
+{
+       channel->free_arr[--channel->free_ptr] = dtrh;
+}
+
+/*
+ * vxge_hw_channel_dtr_count
+ * @channel: Channel handle. Obtained via vxge_hw_channel_open().
+ *
+ * Retreive number of DTRs available. This function can not be called
+ * from data path. ring_initial_replenishi() is the only user.
+ */
+int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
+{
+       return (channel->reserve_ptr - channel->reserve_top) +
+               (channel->length - channel->free_ptr);
+}
+
+/**
+ * vxge_hw_ring_rxd_reserve    - Reserve ring descriptor.
+ * @ring: Handle to the ring object used for receive
+ * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
+ * with a valid handle.
+ *
+ * Reserve Rx descriptor for the subsequent filling-in driver
+ * and posting on the corresponding channel (@channelh)
+ * via vxge_hw_ring_rxd_post().
+ *
+ * Returns: VXGE_HW_OK - success.
+ * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
+ *
+ */
+enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
+       void **rxdh)
+{
+       enum vxge_hw_status status;
+       struct __vxge_hw_channel *channel;
+
+       channel = &ring->channel;
+
+       status = vxge_hw_channel_dtr_alloc(channel, rxdh);
+
+       if (status == VXGE_HW_OK) {
+               struct vxge_hw_ring_rxd_1 *rxdp =
+                       (struct vxge_hw_ring_rxd_1 *)*rxdh;
+
+               rxdp->control_0 = rxdp->control_1 = 0;
+       }
+
+       return status;
+}
+
+/**
+ * vxge_hw_ring_rxd_free - Free descriptor.
+ * @ring: Handle to the ring object used for receive
+ * @rxdh: Descriptor handle.
+ *
+ * Free        the reserved descriptor. This operation is "symmetrical" to
+ * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
+ * lifecycle.
+ *
+ * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
+ * be:
+ *
+ * - reserved (vxge_hw_ring_rxd_reserve);
+ *
+ * - posted    (vxge_hw_ring_rxd_post);
+ *
+ * - completed (vxge_hw_ring_rxd_next_completed);
+ *
+ * - and recycled again        (vxge_hw_ring_rxd_free).
+ *
+ * For alternative state transitions and more details please refer to
+ * the design doc.
+ *
+ */
+void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
+{
+       struct __vxge_hw_channel *channel;
+
+       channel = &ring->channel;
+
+       vxge_hw_channel_dtr_free(channel, rxdh);
+
+}
+
+/**
+ * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
+ * @ring: Handle to the ring object used for receive
+ * @rxdh: Descriptor handle.
+ *
+ * This routine prepares a rxd and posts
+ */
+void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
+{
+       struct __vxge_hw_channel *channel;
+
+       channel = &ring->channel;
+
+       vxge_hw_channel_dtr_post(channel, rxdh);
+}
+
+/**
+ * vxge_hw_ring_rxd_post_post - Process rxd after post.
+ * @ring: Handle to the ring object used for receive
+ * @rxdh: Descriptor handle.
+ *
+ * Processes rxd after post
+ */
+void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
+{
+       struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
+       struct __vxge_hw_channel *channel;
+
+       channel = &ring->channel;
+
+       rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
+
+       if (ring->stats->common_stats.usage_cnt > 0)
+               ring->stats->common_stats.usage_cnt--;
+}
+
+/**
+ * vxge_hw_ring_rxd_post - Post descriptor on the ring.
+ * @ring: Handle to the ring object used for receive
+ * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
+ *
+ * Post        descriptor on the ring.
+ * Prior to posting the        descriptor should be filled in accordance with
+ * Host/Titan interface specification for a given service (LL, etc.).
+ *
+ */
+void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
+{
+       struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
+       struct __vxge_hw_channel *channel;
+
+       channel = &ring->channel;
+
+       wmb();
+       rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
+
+       vxge_hw_channel_dtr_post(channel, rxdh);
+
+       if (ring->stats->common_stats.usage_cnt > 0)
+               ring->stats->common_stats.usage_cnt--;
+}
+
+/**
+ * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
+ * @ring: Handle to the ring object used for receive
+ * @rxdh: Descriptor handle.
+ *
+ * Processes rxd after post with memory barrier.
+ */
+void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
+{
+       struct __vxge_hw_channel *channel;
+
+       channel = &ring->channel;
+
+       wmb();
+       vxge_hw_ring_rxd_post_post(ring, rxdh);
+}
+
+/**
+ * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
+ * @ring: Handle to the ring object used for receive
+ * @rxdh: Descriptor handle. Returned by HW.
+ * @t_code:    Transfer code, as per Titan User Guide,
+ *      Receive Descriptor Format. Returned by HW.
+ *
+ * Retrieve the        _next_ completed descriptor.
+ * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
+ * driver of new completed descriptors. After that
+ * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
+ * completions (the very first completion is passed by HW via
+ * vxge_hw_ring_callback_f).
+ *
+ * Implementation-wise, the driver is free to call
+ * vxge_hw_ring_rxd_next_completed either immediately from inside the
+ * ring callback, or in a deferred fashion and separate (from HW)
+ * context.
+ *
+ * Non-zero @t_code means failure to fill-in receive buffer(s)
+ * of the descriptor.
+ * For instance, parity        error detected during the data transfer.
+ * In this case        Titan will complete the descriptor and indicate
+ * for the host        that the received data is not to be used.
+ * For details please refer to Titan User Guide.
+ *
+ * Returns: VXGE_HW_OK - success.
+ * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
+ * are currently available for processing.
+ *
+ * See also: vxge_hw_ring_callback_f{},
+ * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
+ */
+enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
+       struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
+{
+       struct __vxge_hw_channel *channel;
+       struct vxge_hw_ring_rxd_1 *rxdp;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       channel = &ring->channel;
+
+       vxge_hw_channel_dtr_try_complete(channel, rxdh);
+
+       rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
+       if (rxdp == NULL) {
+               status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+               goto exit;
+       }
+
+       /* check whether it is not the end */
+       if (!(rxdp->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)) {
+
+               vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
+                               0);
+
+               ++ring->cmpl_cnt;
+               vxge_hw_channel_dtr_complete(channel);
+
+               *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(rxdp->control_0);
+
+               vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
+
+               ring->stats->common_stats.usage_cnt++;
+               if (ring->stats->common_stats.usage_max <
+                               ring->stats->common_stats.usage_cnt)
+                       ring->stats->common_stats.usage_max =
+                               ring->stats->common_stats.usage_cnt;
+
+               status = VXGE_HW_OK;
+               goto exit;
+       }
+
+       /* reset it. since we don't want to return
+        * garbage to the driver */
+       *rxdh = NULL;
+       status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+exit:
+       return status;
+}
+
+/**
+ * vxge_hw_ring_handle_tcode - Handle transfer code.
+ * @ring: Handle to the ring object used for receive
+ * @rxdh: Descriptor handle.
+ * @t_code: One of the enumerated (and documented in the Titan user guide)
+ * "transfer codes".
+ *
+ * Handle descriptor's transfer code. The latter comes with each completed
+ * descriptor.
+ *
+ * Returns: one of the enum vxge_hw_status{} enumerated types.
+ * VXGE_HW_OK                  - for success.
+ * VXGE_HW_ERR_CRITICAL         - when encounters critical error.
+ */
+enum vxge_hw_status vxge_hw_ring_handle_tcode(
+       struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
+{
+       struct __vxge_hw_channel *channel;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       channel = &ring->channel;
+
+       /* If the t_code is not supported and if the
+        * t_code is other than 0x5 (unparseable packet
+        * such as unknown UPV6 header), Drop it !!!
+        */
+
+       if (t_code == 0 || t_code == 5) {
+               status = VXGE_HW_OK;
+               goto exit;
+       }
+
+       if (t_code > 0xF) {
+               status = VXGE_HW_ERR_INVALID_TCODE;
+               goto exit;
+       }
+
+       ring->stats->rxd_t_code_err_cnt[t_code]++;
+exit:
+       return status;
+}
+
+/**
+ * __vxge_hw_non_offload_db_post - Post non offload doorbell
+ *
+ * @fifo: fifohandle
+ * @txdl_ptr: The starting location of the TxDL in host memory
+ * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
+ * @no_snoop: No snoop flags
+ *
+ * This function posts a non-offload doorbell to doorbell FIFO
+ *
+ */
+static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
+       u64 txdl_ptr, u32 num_txds, u32 no_snoop)
+{
+       struct __vxge_hw_channel *channel;
+
+       channel = &fifo->channel;
+
+       writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
+               VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
+               VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
+               &fifo->nofl_db->control_0);
+
+       wmb();
+
+       writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
+       wmb();
+
+}
+
+/**
+ * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
+ * the fifo
+ * @fifoh: Handle to the fifo object used for non offload send
+ */
+u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
+{
+       return vxge_hw_channel_dtr_count(&fifoh->channel);
+}
+
+/**
+ * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
+ * @fifoh: Handle to the fifo object used for non offload send
+ * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
+ *        with a valid handle.
+ * @txdl_priv: Buffer to return the pointer to per txdl space
+ *
+ * Reserve a single TxDL (that is, fifo descriptor)
+ * for the subsequent filling-in by driver)
+ * and posting on the corresponding channel (@channelh)
+ * via vxge_hw_fifo_txdl_post().
+ *
+ * Note: it is the responsibility of driver to reserve multiple descriptors
+ * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
+ * carries up to configured number (fifo.max_frags) of contiguous buffers.
+ *
+ * Returns: VXGE_HW_OK - success;
+ * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
+ *
+ */
+enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
+       struct __vxge_hw_fifo *fifo,
+       void **txdlh, void **txdl_priv)
+{
+       struct __vxge_hw_channel *channel;
+       enum vxge_hw_status status;
+       int i;
+
+       channel = &fifo->channel;
+
+       status = vxge_hw_channel_dtr_alloc(channel, txdlh);
+
+       if (status == VXGE_HW_OK) {
+               struct vxge_hw_fifo_txd *txdp =
+                       (struct vxge_hw_fifo_txd *)*txdlh;
+               struct __vxge_hw_fifo_txdl_priv *priv;
+
+               priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
+
+               /* reset the TxDL's private */
+               priv->align_dma_offset = 0;
+               priv->align_vaddr_start = priv->align_vaddr;
+               priv->align_used_frags = 0;
+               priv->frags = 0;
+               priv->alloc_frags = fifo->config->max_frags;
+               priv->next_txdl_priv = NULL;
+
+               *txdl_priv = (void *)(size_t)txdp->host_control;
+
+               for (i = 0; i < fifo->config->max_frags; i++) {
+                       txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
+                       txdp->control_0 = txdp->control_1 = 0;
+               }
+       }
+
+       return status;
+}
+
+/**
+ * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
+ * descriptor.
+ * @fifo: Handle to the fifo object used for non offload send
+ * @txdlh: Descriptor handle.
+ * @frag_idx: Index of the data buffer in the caller's scatter-gather list
+ *            (of buffers).
+ * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
+ * @size: Size of the data buffer (in bytes).
+ *
+ * This API is part of the preparation of the transmit descriptor for posting
+ * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
+ * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
+ * All three APIs fill in the fields of the fifo descriptor,
+ * in accordance with the Titan specification.
+ *
+ */
+void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
+                                 void *txdlh, u32 frag_idx,
+                                 dma_addr_t dma_pointer, u32 size)
+{
+       struct __vxge_hw_fifo_txdl_priv *txdl_priv;
+       struct vxge_hw_fifo_txd *txdp, *txdp_last;
+       struct __vxge_hw_channel *channel;
+
+       channel = &fifo->channel;
+
+       txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
+       txdp = (struct vxge_hw_fifo_txd *)txdlh  +  txdl_priv->frags;
+
+       if (frag_idx != 0)
+               txdp->control_0 = txdp->control_1 = 0;
+       else {
+               txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
+                       VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
+               txdp->control_1 |= fifo->interrupt_type;
+               txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
+                       fifo->tx_intr_num);
+               if (txdl_priv->frags) {
+                       txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +
+                       (txdl_priv->frags - 1);
+                       txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
+                               VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
+               }
+       }
+
+       vxge_assert(frag_idx < txdl_priv->alloc_frags);
+
+       txdp->buffer_pointer = (u64)dma_pointer;
+       txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
+       fifo->stats->total_buffers++;
+       txdl_priv->frags++;
+}
+
+/**
+ * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
+ * @fifo: Handle to the fifo object used for non offload send
+ * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
+ * @frags: Number of contiguous buffers that are part of a single
+ *         transmit operation.
+ *
+ * Post descriptor on the 'fifo' type channel for transmission.
+ * Prior to posting the descriptor should be filled in accordance with
+ * Host/Titan interface specification for a given service (LL, etc.).
+ *
+ */
+void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
+{
+       struct __vxge_hw_fifo_txdl_priv *txdl_priv;
+       struct vxge_hw_fifo_txd *txdp_last;
+       struct vxge_hw_fifo_txd *txdp_first;
+       struct __vxge_hw_channel *channel;
+
+       channel = &fifo->channel;
+
+       txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
+       txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
+
+       txdp_last = (struct vxge_hw_fifo_txd *)txdlh  +  (txdl_priv->frags - 1);
+       txdp_last->control_0 |=
+             VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
+       txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
+
+       vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
+
+       __vxge_hw_non_offload_db_post(fifo,
+               (u64)(size_t)txdl_priv->dma_addr,
+               txdl_priv->frags - 1,
+               fifo->no_snoop_bits);
+
+       fifo->stats->total_posts++;
+       fifo->stats->common_stats.usage_cnt++;
+       if (fifo->stats->common_stats.usage_max <
+               fifo->stats->common_stats.usage_cnt)
+               fifo->stats->common_stats.usage_max =
+                       fifo->stats->common_stats.usage_cnt;
+}
+
+/**
+ * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
+ * @fifo: Handle to the fifo object used for non offload send
+ * @txdlh: Descriptor handle. Returned by HW.
+ * @t_code: Transfer code, as per Titan User Guide,
+ *          Transmit Descriptor Format.
+ *          Returned by HW.
+ *
+ * Retrieve the _next_ completed descriptor.
+ * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
+ * driver of new completed descriptors. After that
+ * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
+ * completions (the very first completion is passed by HW via
+ * vxge_hw_channel_callback_f).
+ *
+ * Implementation-wise, the driver is free to call
+ * vxge_hw_fifo_txdl_next_completed either immediately from inside the
+ * channel callback, or in a deferred fashion and separate (from HW)
+ * context.
+ *
+ * Non-zero @t_code means failure to process the descriptor.
+ * The failure could happen, for instance, when the link is
+ * down, in which case Titan completes the descriptor because it
+ * is not able to send the data out.
+ *
+ * For details please refer to Titan User Guide.
+ *
+ * Returns: VXGE_HW_OK - success.
+ * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
+ * are currently available for processing.
+ *
+ */
+enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
+       struct __vxge_hw_fifo *fifo, void **txdlh,
+       enum vxge_hw_fifo_tcode *t_code)
+{
+       struct __vxge_hw_channel *channel;
+       struct vxge_hw_fifo_txd *txdp;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       channel = &fifo->channel;
+
+       vxge_hw_channel_dtr_try_complete(channel, txdlh);
+
+       txdp = (struct vxge_hw_fifo_txd *)*txdlh;
+       if (txdp == NULL) {
+               status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+               goto exit;
+       }
+
+       /* check whether host owns it */
+       if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
+
+               vxge_assert(txdp->host_control != 0);
+
+               vxge_hw_channel_dtr_complete(channel);
+
+               *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
+
+               if (fifo->stats->common_stats.usage_cnt > 0)
+                       fifo->stats->common_stats.usage_cnt--;
+
+               status = VXGE_HW_OK;
+               goto exit;
+       }
+
+       /* no more completions */
+       *txdlh = NULL;
+       status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
+exit:
+       return status;
+}
+
+/**
+ * vxge_hw_fifo_handle_tcode - Handle transfer code.
+ * @fifo: Handle to the fifo object used for non offload send
+ * @txdlh: Descriptor handle.
+ * @t_code: One of the enumerated (and documented in the Titan user guide)
+ *          "transfer codes".
+ *
+ * Handle descriptor's transfer code. The latter comes with each completed
+ * descriptor.
+ *
+ * Returns: one of the enum vxge_hw_status{} enumerated types.
+ * VXGE_HW_OK - for success.
+ * VXGE_HW_ERR_CRITICAL - when encounters critical error.
+ */
+enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
+                                             void *txdlh,
+                                             enum vxge_hw_fifo_tcode t_code)
+{
+       struct __vxge_hw_channel *channel;
+
+       enum vxge_hw_status status = VXGE_HW_OK;
+       channel = &fifo->channel;
+
+       if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
+               status = VXGE_HW_ERR_INVALID_TCODE;
+               goto exit;
+       }
+
+       fifo->stats->txd_t_code_err_cnt[t_code]++;
+exit:
+       return status;
+}
+
+/**
+ * vxge_hw_fifo_txdl_free - Free descriptor.
+ * @fifo: Handle to the fifo object used for non offload send
+ * @txdlh: Descriptor handle.
+ *
+ * Free the reserved descriptor. This operation is "symmetrical" to
+ * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
+ * lifecycle.
+ *
+ * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
+ * be:
+ *
+ * - reserved (vxge_hw_fifo_txdl_reserve);
+ *
+ * - posted (vxge_hw_fifo_txdl_post);
+ *
+ * - completed (vxge_hw_fifo_txdl_next_completed);
+ *
+ * - and recycled again (vxge_hw_fifo_txdl_free).
+ *
+ * For alternative state transitions and more details please refer to
+ * the design doc.
+ *
+ */
+void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
+{
+       struct __vxge_hw_fifo_txdl_priv *txdl_priv;
+       u32 max_frags;
+       struct __vxge_hw_channel *channel;
+
+       channel = &fifo->channel;
+
+       txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
+                       (struct vxge_hw_fifo_txd *)txdlh);
+
+       max_frags = fifo->config->max_frags;
+
+       vxge_hw_channel_dtr_free(channel, txdlh);
+}
+
+/**
+ * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
+ *               to MAC address table.
+ * @vp: Vpath handle.
+ * @macaddr: MAC address to be added for this vpath into the list
+ * @macaddr_mask: MAC address mask for macaddr
+ * @duplicate_mode: Duplicate MAC address add mode. Please see
+ *             enum vxge_hw_vpath_mac_addr_add_mode{}
+ *
+ * Adds the given mac address and mac address mask into the list for this
+ * vpath.
+ * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
+ * vxge_hw_vpath_mac_addr_get_next
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_mac_addr_add(
+       struct __vxge_hw_vpath_handle *vp,
+       u8 (macaddr)[ETH_ALEN],
+       u8 (macaddr_mask)[ETH_ALEN],
+       enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
+{
+       u32 i;
+       u64 data1 = 0ULL;
+       u64 data2 = 0ULL;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       if (vp == NULL) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
+
+       for (i = 0; i < ETH_ALEN; i++) {
+               data1 <<= 8;
+               data1 |= (u8)macaddr[i];
+
+               data2 <<= 8;
+               data2 |= (u8)macaddr_mask[i];
+       }
+
+       switch (duplicate_mode) {
+       case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
+               i = 0;
+               break;
+       case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
+               i = 1;
+               break;
+       case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
+               i = 2;
+               break;
+       default:
+               i = 0;
+               break;
+       }
+
+       status = __vxge_hw_vpath_rts_table_set(vp,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
+                       0,
+                       VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
+                       VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
+                       VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
+exit:
+       return status;
+}
+
+/**
+ * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
+ *               from MAC address table.
+ * @vp: Vpath handle.
+ * @macaddr: First MAC address entry for this vpath in the list
+ * @macaddr_mask: MAC address mask for macaddr
+ *
+ * Returns the first mac address and mac address mask in the list for this
+ * vpath.
+ * see also: vxge_hw_vpath_mac_addr_get_next
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_mac_addr_get(
+       struct __vxge_hw_vpath_handle *vp,
+       u8 (macaddr)[ETH_ALEN],
+       u8 (macaddr_mask)[ETH_ALEN])
+{
+       u32 i;
+       u64 data1 = 0ULL;
+       u64 data2 = 0ULL;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       if (vp == NULL) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
+
+       status = __vxge_hw_vpath_rts_table_get(vp,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
+                       0, &data1, &data2);
+
+       if (status != VXGE_HW_OK)
+               goto exit;
+
+       data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
+
+       data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
+
+       for (i = ETH_ALEN; i > 0; i--) {
+               macaddr[i-1] = (u8)(data1 & 0xFF);
+               data1 >>= 8;
+
+               macaddr_mask[i-1] = (u8)(data2 & 0xFF);
+               data2 >>= 8;
+       }
+exit:
+       return status;
+}
+
+/**
+ * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
+ * vpath
+ *               from MAC address table.
+ * @vp: Vpath handle.
+ * @macaddr: Next MAC address entry for this vpath in the list
+ * @macaddr_mask: MAC address mask for macaddr
+ *
+ * Returns the next mac address and mac address mask in the list for this
+ * vpath.
+ * see also: vxge_hw_vpath_mac_addr_get
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_mac_addr_get_next(
+       struct __vxge_hw_vpath_handle *vp,
+       u8 (macaddr)[ETH_ALEN],
+       u8 (macaddr_mask)[ETH_ALEN])
+{
+       u32 i;
+       u64 data1 = 0ULL;
+       u64 data2 = 0ULL;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       if (vp == NULL) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
+
+       status = __vxge_hw_vpath_rts_table_get(vp,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
+                       0, &data1, &data2);
+
+       if (status != VXGE_HW_OK)
+               goto exit;
+
+       data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
+
+       data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
+
+       for (i = ETH_ALEN; i > 0; i--) {
+               macaddr[i-1] = (u8)(data1 & 0xFF);
+               data1 >>= 8;
+
+               macaddr_mask[i-1] = (u8)(data2 & 0xFF);
+               data2 >>= 8;
+       }
+
+exit:
+       return status;
+}
+
+/**
+ * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
+ *               to MAC address table.
+ * @vp: Vpath handle.
+ * @macaddr: MAC address to be added for this vpath into the list
+ * @macaddr_mask: MAC address mask for macaddr
+ *
+ * Delete the given mac address and mac address mask into the list for this
+ * vpath.
+ * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
+ * vxge_hw_vpath_mac_addr_get_next
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_mac_addr_delete(
+       struct __vxge_hw_vpath_handle *vp,
+       u8 (macaddr)[ETH_ALEN],
+       u8 (macaddr_mask)[ETH_ALEN])
+{
+       u32 i;
+       u64 data1 = 0ULL;
+       u64 data2 = 0ULL;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       if (vp == NULL) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
+
+       for (i = 0; i < ETH_ALEN; i++) {
+               data1 <<= 8;
+               data1 |= (u8)macaddr[i];
+
+               data2 <<= 8;
+               data2 |= (u8)macaddr_mask[i];
+       }
+
+       status = __vxge_hw_vpath_rts_table_set(vp,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
+                       0,
+                       VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
+                       VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
+exit:
+       return status;
+}
+
+/**
+ * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
+ *               to vlan id table.
+ * @vp: Vpath handle.
+ * @vid: vlan id to be added for this vpath into the list
+ *
+ * Adds the given vlan id into the list for this  vpath.
+ * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
+ * vxge_hw_vpath_vid_get_next
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
+{
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       if (vp == NULL) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
+
+       status = __vxge_hw_vpath_rts_table_set(vp,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
+                       0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
+exit:
+       return status;
+}
+
+/**
+ * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
+ *               from vlan id table.
+ * @vp: Vpath handle.
+ * @vid: Buffer to return vlan id
+ *
+ * Returns the first vlan id in the list for this vpath.
+ * see also: vxge_hw_vpath_vid_get_next
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
+{
+       u64 data;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       if (vp == NULL) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
+
+       status = __vxge_hw_vpath_rts_table_get(vp,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
+                       0, vid, &data);
+
+       *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
+exit:
+       return status;
+}
+
+/**
+ * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
+ *               from vlan id table.
+ * @vp: Vpath handle.
+ * @vid: Buffer to return vlan id
+ *
+ * Returns the next vlan id in the list for this vpath.
+ * see also: vxge_hw_vpath_vid_get
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
+{
+       u64 data;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       if (vp == NULL) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
+
+       status = __vxge_hw_vpath_rts_table_get(vp,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
+                       0, vid, &data);
+
+       *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
+exit:
+       return status;
+}
+
+/**
+ * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
+ *               to vlan id table.
+ * @vp: Vpath handle.
+ * @vid: vlan id to be added for this vpath into the list
+ *
+ * Adds the given vlan id into the list for this  vpath.
+ * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
+ * vxge_hw_vpath_vid_get_next
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
+{
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       if (vp == NULL) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
+
+       status = __vxge_hw_vpath_rts_table_set(vp,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
+                       VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
+                       0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
+exit:
+       return status;
+}
+
+/**
+ * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
+ * @vp: Vpath handle.
+ *
+ * Enable promiscuous mode of Titan-e operation.
+ *
+ * See also: vxge_hw_vpath_promisc_disable().
+ */
+enum vxge_hw_status vxge_hw_vpath_promisc_enable(
+                       struct __vxge_hw_vpath_handle *vp)
+{
+       u64 val64;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
+
+       vpath = vp->vpath;
+
+       /* Enable promiscous mode for function 0 only */
+       if (!(vpath->hldev->access_rights &
+               VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
+               return VXGE_HW_OK;
+
+       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
+
+       if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
+
+               val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
+                        VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
+                        VXGE_HW_RXMAC_VCFG0_BCAST_EN |
+                        VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
+
+               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+       }
+exit:
+       return status;
+}
+
+/**
+ * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
+ * @vp: Vpath handle.
+ *
+ * Disable promiscuous mode of Titan-e operation.
+ *
+ * See also: vxge_hw_vpath_promisc_enable().
+ */
+enum vxge_hw_status vxge_hw_vpath_promisc_disable(
+                       struct __vxge_hw_vpath_handle *vp)
+{
+       u64 val64;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
+
+       vpath = vp->vpath;
+
+       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
+
+       if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
+
+               val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
+                          VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
+                          VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
+
+               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+       }
+exit:
+       return status;
+}
+
+/*
+ * vxge_hw_vpath_bcast_enable - Enable broadcast
+ * @vp: Vpath handle.
+ *
+ * Enable receiving broadcasts.
+ */
+enum vxge_hw_status vxge_hw_vpath_bcast_enable(
+                       struct __vxge_hw_vpath_handle *vp)
+{
+       u64 val64;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
+
+       vpath = vp->vpath;
+
+       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
+
+       if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
+               val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
+               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+       }
+exit:
+       return status;
+}
+
+/**
+ * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
+ * @vp: Vpath handle.
+ *
+ * Enable Titan-e multicast addresses.
+ * Returns: VXGE_HW_OK on success.
+ *
+ */
+enum vxge_hw_status vxge_hw_vpath_mcast_enable(
+                       struct __vxge_hw_vpath_handle *vp)
+{
+       u64 val64;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
+
+       vpath = vp->vpath;
+
+       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
+
+       if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
+               val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
+               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+       }
+exit:
+       return status;
+}
+
+/**
+ * vxge_hw_vpath_mcast_disable - Disable  multicast addresses.
+ * @vp: Vpath handle.
+ *
+ * Disable Titan-e multicast addresses.
+ * Returns: VXGE_HW_OK - success.
+ * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
+ *
+ */
+enum vxge_hw_status
+vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
+{
+       u64 val64;
+       struct __vxge_hw_virtualpath *vpath;
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
+
+       vpath = vp->vpath;
+
+       val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
+
+       if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
+               val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
+               writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
+       }
+exit:
+       return status;
+}
+
+/*
+ * __vxge_hw_vpath_alarm_process - Process Alarms.
+ * @vpath: Virtual Path.
+ * @skip_alarms: Do not clear the alarms
+ *
+ * Process vpath alarms.
+ *
+ */
+enum vxge_hw_status __vxge_hw_vpath_alarm_process(
+                       struct __vxge_hw_virtualpath *vpath,
+                       u32 skip_alarms)
+{
+       u64 val64;
+       u64 alarm_status;
+       u64 pic_status;
+       struct __vxge_hw_device *hldev = NULL;
+       enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
+       u64 mask64;
+       struct vxge_hw_vpath_stats_sw_info *sw_stats;
+       struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+       if (vpath == NULL) {
+               alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
+                       alarm_event);
+               goto out;
+       }
+
+       hldev = vpath->hldev;
+       vp_reg = vpath->vp_reg;
+       alarm_status = readq(&vp_reg->vpath_general_int_status);
+
+       if (alarm_status == VXGE_HW_ALL_FOXES) {
+               alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
+                       alarm_event);
+               goto out;
+       }
+
+       sw_stats = vpath->sw_stats;
+
+       if (alarm_status & ~(
+               VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
+               VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
+               VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
+               VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
+               sw_stats->error_stats.unknown_alarms++;
+
+               alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
+                       alarm_event);
+               goto out;
+       }
+
+       if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
+
+               val64 = readq(&vp_reg->xgmac_vp_int_status);
+
+               if (val64 &
+               VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
+
+                       val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
+
+                       if (((val64 &
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
+                           (!(val64 &
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
+                           ((val64 &
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
+                               && (!(val64 &
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
+                       ))) {
+                               sw_stats->error_stats.network_sustained_fault++;
+
+                               writeq(
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
+                                       &vp_reg->asic_ntwk_vp_err_mask);
+
+                               __vxge_hw_device_handle_link_down_ind(hldev);
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_LINK_DOWN, alarm_event);
+                       }
+
+                       if (((val64 &
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
+                           (!(val64 &
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
+                           ((val64 &
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
+                               && (!(val64 &
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
+                       ))) {
+
+                               sw_stats->error_stats.network_sustained_ok++;
+
+                               writeq(
+                               VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
+                                       &vp_reg->asic_ntwk_vp_err_mask);
+
+                               __vxge_hw_device_handle_link_up_ind(hldev);
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_LINK_UP, alarm_event);
+                       }
+
+                       writeq(VXGE_HW_INTR_MASK_ALL,
+                               &vp_reg->asic_ntwk_vp_err_reg);
+
+                       alarm_event = VXGE_HW_SET_LEVEL(
+                               VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
+
+                       if (skip_alarms)
+                               return VXGE_HW_OK;
+               }
+       }
+
+       if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
+
+               pic_status = readq(&vp_reg->vpath_ppif_int_status);
+
+               if (pic_status &
+                   VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
+
+                       val64 = readq(&vp_reg->general_errors_reg);
+                       mask64 = readq(&vp_reg->general_errors_mask);
+
+                       if ((val64 &
+                               VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
+                               ~mask64) {
+                               sw_stats->error_stats.ini_serr_det++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_SERR, alarm_event);
+                       }
+
+                       if ((val64 &
+                           VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
+                               ~mask64) {
+                               sw_stats->error_stats.dblgen_fifo0_overflow++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_FIFO_ERR, alarm_event);
+                       }
+
+                       if ((val64 &
+                           VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
+                               ~mask64)
+                               sw_stats->error_stats.statsb_pif_chain_error++;
+
+                       if ((val64 &
+                          VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
+                               ~mask64)
+                               sw_stats->error_stats.statsb_drop_timeout++;
+
+                       if ((val64 &
+                               VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
+                               ~mask64)
+                               sw_stats->error_stats.target_illegal_access++;
+
+                       if (!skip_alarms) {
+                               writeq(VXGE_HW_INTR_MASK_ALL,
+                                       &vp_reg->general_errors_reg);
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_ALARM_CLEARED,
+                                       alarm_event);
+                       }
+               }
+
+               if (pic_status &
+                   VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
+
+                       val64 = readq(&vp_reg->kdfcctl_errors_reg);
+                       mask64 = readq(&vp_reg->kdfcctl_errors_mask);
+
+                       if ((val64 &
+                           VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
+                               ~mask64) {
+                               sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_FIFO_ERR,
+                                       alarm_event);
+                       }
+
+                       if ((val64 &
+                           VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
+                               ~mask64) {
+                               sw_stats->error_stats.kdfcctl_fifo0_poison++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_FIFO_ERR,
+                                       alarm_event);
+                       }
+
+                       if ((val64 &
+                           VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
+                               ~mask64) {
+                               sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_FIFO_ERR,
+                                       alarm_event);
+                       }
+
+                       if (!skip_alarms) {
+                               writeq(VXGE_HW_INTR_MASK_ALL,
+                                       &vp_reg->kdfcctl_errors_reg);
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_ALARM_CLEARED,
+                                       alarm_event);
+                       }
+               }
+
+       }
+
+       if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
+
+               val64 = readq(&vp_reg->wrdma_alarm_status);
+
+               if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
+
+                       val64 = readq(&vp_reg->prc_alarm_reg);
+                       mask64 = readq(&vp_reg->prc_alarm_mask);
+
+                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
+                               ~mask64)
+                               sw_stats->error_stats.prc_ring_bumps++;
+
+                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
+                               ~mask64) {
+                               sw_stats->error_stats.prc_rxdcm_sc_err++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_VPATH_ERR,
+                                       alarm_event);
+                       }
+
+                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
+                               & ~mask64) {
+                               sw_stats->error_stats.prc_rxdcm_sc_abort++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                               VXGE_HW_EVENT_VPATH_ERR,
+                                               alarm_event);
+                       }
+
+                       if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
+                                & ~mask64) {
+                               sw_stats->error_stats.prc_quanta_size_err++;
+
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                       VXGE_HW_EVENT_VPATH_ERR,
+                                       alarm_event);
+                       }
+
+                       if (!skip_alarms) {
+                               writeq(VXGE_HW_INTR_MASK_ALL,
+                                       &vp_reg->prc_alarm_reg);
+                               alarm_event = VXGE_HW_SET_LEVEL(
+                                               VXGE_HW_EVENT_ALARM_CLEARED,
+                                               alarm_event);
+                       }
+               }
+       }
+out:
+       hldev->stats.sw_dev_err_stats.vpath_alarms++;
+
+       if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
+               (alarm_event == VXGE_HW_EVENT_UNKNOWN))
+               return VXGE_HW_OK;
+
+       __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
+
+       if (alarm_event == VXGE_HW_EVENT_SERR)
+               return VXGE_HW_ERR_CRITICAL;
+
+       return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
+               VXGE_HW_ERR_SLOT_FREEZE :
+               (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
+               VXGE_HW_ERR_VPATH;
+}
+
+/*
+ * vxge_hw_vpath_alarm_process - Process Alarms.
+ * @vpath: Virtual Path.
+ * @skip_alarms: Do not clear the alarms
+ *
+ * Process vpath alarms.
+ *
+ */
+enum vxge_hw_status vxge_hw_vpath_alarm_process(
+                       struct __vxge_hw_vpath_handle *vp,
+                       u32 skip_alarms)
+{
+       enum vxge_hw_status status = VXGE_HW_OK;
+
+       if (vp == NULL) {
+               status = VXGE_HW_ERR_INVALID_HANDLE;
+               goto exit;
+       }
+
+       status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
+exit:
+       return status;
+}
+
+/**
+ * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
+ *                            alrms
+ * @vp: Virtual Path handle.
+ * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
+ *             interrupts(Can be repeated). If fifo or ring are not enabled
+ *             the MSIX vector for that should be set to 0
+ * @alarm_msix_id: MSIX vector for alarm.
+ *
+ * This API will associate a given MSIX vector numbers with the four TIM
+ * interrupts and alarm interrupt.
+ */
+enum vxge_hw_status
+vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
+                      int alarm_msix_id)
+{
+       u64 val64;
+       struct __vxge_hw_virtualpath *vpath = vp->vpath;
+       struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
+       u32 first_vp_id = vpath->hldev->first_vp_id;
+
+       val64 =  VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
+                 (first_vp_id * 4) + tim_msix_id[0]) |
+                VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
+                 (first_vp_id * 4) + tim_msix_id[1]) |
+                VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(
+                       (first_vp_id * 4) + tim_msix_id[2]);
+
+               val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(
+                       (first_vp_id * 4) + tim_msix_id[3]);
+
+       writeq(val64, &vp_reg->interrupt_cfg0);
+
+       writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
+                       (first_vp_id * 4) + alarm_msix_id),
+                       &vp_reg->interrupt_cfg2);
+
+       if (vpath->hldev->config.intr_mode ==
+                                       VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
+               __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
+                               VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
+                               0, 32), &vp_reg->one_shot_vect1_en);
+       }
+
+       if (vpath->hldev->config.intr_mode ==
+               VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
+               __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
+                               VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
+                               0, 32), &vp_reg->one_shot_vect2_en);
+
+               __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
+                               VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
+                               0, 32), &vp_reg->one_shot_vect3_en);
+       }
+
+       return VXGE_HW_OK;
+}
+
+/**
+ * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
+ * @vp: Virtual Path handle.
+ * @msix_id:  MSIX ID
+ *
+ * The function masks the msix interrupt for the given msix_id
+ *
+ * Returns: 0,
+ * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
+ * status.
+ * See also:
+ */
+void
+vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
+{
+       struct __vxge_hw_device *hldev = vp->vpath->hldev;
+       __vxge_hw_pio_mem_write32_upper(
+               (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
+                       (msix_id  / 4)), 0, 32),
+               &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
+
+       return;
+}
+
+/**
+ * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
+ * @vp: Virtual Path handle.
+ * @msix_id:  MSI ID
+ *
+ * The function clears the msix interrupt for the given msix_id
+ *
+ * Returns: 0,
+ * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
+ * status.
+ * See also:
+ */
+void
+vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
+{
+       struct __vxge_hw_device *hldev = vp->vpath->hldev;
+       if (hldev->config.intr_mode ==
+                       VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
+               __vxge_hw_pio_mem_write32_upper(
+                       (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
+                               (msix_id/4)), 0, 32),
+                               &hldev->common_reg->
+                                       clr_msix_one_shot_vec[msix_id%4]);
+       } else {
+               __vxge_hw_pio_mem_write32_upper(
+                       (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
+                               (msix_id/4)), 0, 32),
+                               &hldev->common_reg->
+                                       clear_msix_mask_vect[msix_id%4]);
+       }
+
+       return;
+}
+
+/**
+ * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
+ * @vp: Virtual Path handle.
+ * @msix_id:  MSI ID
+ *
+ * The function unmasks the msix interrupt for the given msix_id
+ *
+ * Returns: 0,
+ * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
+ * status.
+ * See also:
+ */
+void
+vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
+{
+       struct __vxge_hw_device *hldev = vp->vpath->hldev;
+       __vxge_hw_pio_mem_write32_upper(
+                       (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id +
+                       (msix_id/4)), 0, 32),
+                       &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
+
+       return;
+}
+
+/**
+ * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
+ * @vp: Virtual Path handle.
+ *
+ * The function masks all msix interrupt for the given vpath
+ *
+ */
+void
+vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
+{
+
+       __vxge_hw_pio_mem_write32_upper(
+               (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
+               &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
+
+       return;
+}
+
+/**
+ * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
+ * @vp: Virtual Path handle.
+ *
+ * Mask Tx and Rx vpath interrupts.
+ *
+ * See also: vxge_hw_vpath_inta_mask_tx_rx()
+ */
+void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
+{
+       u64     tim_int_mask0[4] = {[0 ...3] = 0};
+       u32     tim_int_mask1[4] = {[0 ...3] = 0};
+       u64     val64;
+       struct __vxge_hw_device *hldev = vp->vpath->hldev;
+
+       VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
+               tim_int_mask1, vp->vpath->vp_id);
+
+       val64 = readq(&hldev->common_reg->tim_int_mask0);
+
+       if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
+               (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
+               writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
+                       tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
+                       &hldev->common_reg->tim_int_mask0);
+       }
+
+       val64 = readl(&hldev->common_reg->tim_int_mask1);
+
+       if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
+               (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
+               __vxge_hw_pio_mem_write32_upper(
+                       (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
+                       tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
+                       &hldev->common_reg->tim_int_mask1);
+       }
+
+       return;
+}
+
+/**
+ * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
+ * @vp: Virtual Path handle.
+ *
+ * Unmask Tx and Rx vpath interrupts.
+ *
+ * See also: vxge_hw_vpath_inta_mask_tx_rx()
+ */
+void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
+{
+       u64     tim_int_mask0[4] = {[0 ...3] = 0};
+       u32     tim_int_mask1[4] = {[0 ...3] = 0};
+       u64     val64;
+       struct __vxge_hw_device *hldev = vp->vpath->hldev;
+
+       VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
+               tim_int_mask1, vp->vpath->vp_id);
+
+       val64 = readq(&hldev->common_reg->tim_int_mask0);
+
+       if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
+          (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
+               writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
+                       tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
+                       &hldev->common_reg->tim_int_mask0);
+       }
+
+       if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
+          (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
+               __vxge_hw_pio_mem_write32_upper(
+                       (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
+                         tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
+                       &hldev->common_reg->tim_int_mask1);
+       }
+
+       return;
+}
+
+/**
+ * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
+ * descriptors and process the same.
+ * @ring: Handle to the ring object used for receive
+ *
+ * The function        polls the Rx for the completed  descriptors and calls
+ * the driver via supplied completion  callback.
+ *
+ * Returns: VXGE_HW_OK, if the polling is completed successful.
+ * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
+ * descriptors available which are yet to be processed.
+ *
+ * See also: vxge_hw_vpath_poll_rx()
+ */
+enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
+{
+       u8 t_code;
+       enum vxge_hw_status status = VXGE_HW_OK;
+       void *first_rxdh;
+       u64 val64 = 0;
+       int new_count = 0;
+
+       ring->cmpl_cnt = 0;
+
+       status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
+       if (status == VXGE_HW_OK)
+               ring->callback(ring, first_rxdh,
+                       t_code, ring->channel.userdata);
+
+       if (ring->cmpl_cnt != 0) {
+               ring->doorbell_cnt += ring->cmpl_cnt;
+               if (ring->doorbell_cnt >= ring->rxds_limit) {
+                       /*
+                        * Each RxD is of 4 qwords, update the number of
+                        * qwords replenished
+                        */
+                       new_count = (ring->doorbell_cnt * 4);
+
+                       /* For each block add 4 more qwords */
+                       ring->total_db_cnt += ring->doorbell_cnt;
+                       if (ring->total_db_cnt >= ring->rxds_per_block) {
+                               new_count += 4;
+                               /* Reset total count */
+                               ring->total_db_cnt %= ring->rxds_per_block;
+                       }
+                       writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
+                               &ring->vp_reg->prc_rxd_doorbell);
+                       val64 =
+                         readl(&ring->common_reg->titan_general_int_status);
+                       ring->doorbell_cnt = 0;
+               }
+       }
+
+       return status;
+}
+
+/**
+ * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
+ * the same.
+ * @fifo: Handle to the fifo object used for non offload send
+ *
+ * The function        polls the Tx for the completed  descriptors and calls
+ * the driver via supplied completion callback.
+ *
+ * Returns: VXGE_HW_OK, if the polling is completed successful.
+ * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
+ * descriptors available which are yet to be processed.
+ *
+ * See also: vxge_hw_vpath_poll_tx().
+ */
+enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
+                                       void **skb_ptr)
+{
+       enum vxge_hw_fifo_tcode t_code;
+       void *first_txdlh;
+       enum vxge_hw_status status = VXGE_HW_OK;
+       struct __vxge_hw_channel *channel;
+
+       channel = &fifo->channel;
+
+       status = vxge_hw_fifo_txdl_next_completed(fifo,
+                               &first_txdlh, &t_code);
+       if (status == VXGE_HW_OK)
+               if (fifo->callback(fifo, first_txdlh,
+                       t_code, channel->userdata, skb_ptr) != VXGE_HW_OK)
+                       status = VXGE_HW_COMPLETIONS_REMAIN;
+
+       return status;
+}
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
new file mode 100644 (file)
index 0000000..7567a11
--- /dev/null
@@ -0,0 +1,2409 @@
+/******************************************************************************
+ * This software may be used and distributed according to the terms of
+ * the GNU General Public License (GPL), incorporated herein by reference.
+ * Drivers based on or derived from this code fall under the GPL and must
+ * retain the authorship, copyright and license notice.  This file is not
+ * a complete program and may only be used when the entire operating
+ * system is licensed under the GPL.
+ * See the file COPYING in this distribution for more information.
+ *
+ * vxge-traffic.h: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
+ *                 Virtualized Server Adapter.
+ * Copyright(c) 2002-2009 Neterion Inc.
+ ******************************************************************************/
+#ifndef VXGE_TRAFFIC_H
+#define VXGE_TRAFFIC_H
+
+#include "vxge-reg.h"
+#include "vxge-version.h"
+
+#define VXGE_HW_DTR_MAX_T_CODE         16
+#define VXGE_HW_ALL_FOXES              0xFFFFFFFFFFFFFFFFULL
+#define VXGE_HW_INTR_MASK_ALL          0xFFFFFFFFFFFFFFFFULL
+#define        VXGE_HW_MAX_VIRTUAL_PATHS       17
+
+#define VXGE_HW_MAC_MAX_MAC_PORT_ID    2
+
+#define VXGE_HW_DEFAULT_32             0xffffffff
+/* frames sizes */
+#define VXGE_HW_HEADER_802_2_SIZE      3
+#define VXGE_HW_HEADER_SNAP_SIZE       5
+#define VXGE_HW_HEADER_VLAN_SIZE       4
+#define VXGE_HW_MAC_HEADER_MAX_SIZE \
+                       (ETH_HLEN + \
+                       VXGE_HW_HEADER_802_2_SIZE + \
+                       VXGE_HW_HEADER_VLAN_SIZE + \
+                       VXGE_HW_HEADER_SNAP_SIZE)
+
+#define VXGE_HW_TCPIP_HEADER_MAX_SIZE  (64 + 64)
+
+/* 32bit alignments */
+#define VXGE_HW_HEADER_ETHERNET_II_802_3_ALIGN         2
+#define VXGE_HW_HEADER_802_2_SNAP_ALIGN                        2
+#define VXGE_HW_HEADER_802_2_ALIGN                     3
+#define VXGE_HW_HEADER_SNAP_ALIGN                      1
+
+#define VXGE_HW_L3_CKSUM_OK                            0xFFFF
+#define VXGE_HW_L4_CKSUM_OK                            0xFFFF
+
+/* Forward declarations */
+struct __vxge_hw_device;
+struct __vxge_hw_vpath_handle;
+struct vxge_hw_vp_config;
+struct __vxge_hw_virtualpath;
+struct __vxge_hw_channel;
+struct __vxge_hw_fifo;
+struct __vxge_hw_ring;
+struct vxge_hw_ring_attr;
+struct vxge_hw_mempool;
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+/*VXGE_HW_STATUS_H*/
+
+#define VXGE_HW_EVENT_BASE                     0
+#define VXGE_LL_EVENT_BASE                     100
+
+/**
+ * enum vxge_hw_event- Enumerates slow-path HW events.
+ * @VXGE_HW_EVENT_UNKNOWN: Unknown (and invalid) event.
+ * @VXGE_HW_EVENT_SERR: Serious vpath hardware error event.
+ * @VXGE_HW_EVENT_ECCERR: vpath ECC error event.
+ * @VXGE_HW_EVENT_VPATH_ERR: Error local to the respective vpath
+ * @VXGE_HW_EVENT_FIFO_ERR: FIFO Doorbell fifo error.
+ * @VXGE_HW_EVENT_SRPCIM_SERR: srpcim hardware error event.
+ * @VXGE_HW_EVENT_MRPCIM_SERR: mrpcim hardware error event.
+ * @VXGE_HW_EVENT_MRPCIM_ECCERR: mrpcim ecc error event.
+ * @VXGE_HW_EVENT_RESET_START: Privileged entity is starting device reset
+ * @VXGE_HW_EVENT_RESET_COMPLETE: Device reset has been completed
+ * @VXGE_HW_EVENT_SLOT_FREEZE: Slot-freeze event. Driver tries to distinguish
+ * slot-freeze from the rest critical events (e.g. ECC) when it is
+ * impossible to PIO read "through" the bus, i.e. when getting all-foxes.
+ *
+ * enum vxge_hw_event enumerates slow-path HW eventis.
+ *
+ * See also: struct vxge_hw_uld_cbs{}, vxge_uld_link_up_f{},
+ * vxge_uld_link_down_f{}.
+ */
+enum vxge_hw_event {
+       VXGE_HW_EVENT_UNKNOWN           = 0,
+       /* HW events */
+       VXGE_HW_EVENT_RESET_START       = VXGE_HW_EVENT_BASE + 1,
+       VXGE_HW_EVENT_RESET_COMPLETE    = VXGE_HW_EVENT_BASE + 2,
+       VXGE_HW_EVENT_LINK_DOWN         = VXGE_HW_EVENT_BASE + 3,
+       VXGE_HW_EVENT_LINK_UP           = VXGE_HW_EVENT_BASE + 4,
+       VXGE_HW_EVENT_ALARM_CLEARED     = VXGE_HW_EVENT_BASE + 5,
+       VXGE_HW_EVENT_ECCERR            = VXGE_HW_EVENT_BASE + 6,
+       VXGE_HW_EVENT_MRPCIM_ECCERR     = VXGE_HW_EVENT_BASE + 7,
+       VXGE_HW_EVENT_FIFO_ERR          = VXGE_HW_EVENT_BASE + 8,
+       VXGE_HW_EVENT_VPATH_ERR         = VXGE_HW_EVENT_BASE + 9,
+       VXGE_HW_EVENT_CRITICAL_ERR      = VXGE_HW_EVENT_BASE + 10,
+       VXGE_HW_EVENT_SERR              = VXGE_HW_EVENT_BASE + 11,
+       VXGE_HW_EVENT_SRPCIM_SERR       = VXGE_HW_EVENT_BASE + 12,
+       VXGE_HW_EVENT_MRPCIM_SERR       = VXGE_HW_EVENT_BASE + 13,
+       VXGE_HW_EVENT_SLOT_FREEZE       = VXGE_HW_EVENT_BASE + 14,
+};
+
+#define VXGE_HW_SET_LEVEL(a, b) (((a) > (b)) ? (a) : (b))
+
+/*
+ * struct vxge_hw_mempool_dma - Represents DMA objects passed to the
+       caller.
+ */
+struct vxge_hw_mempool_dma {
+       dma_addr_t                      addr;
+       struct pci_dev *handle;
+       struct pci_dev *acc_handle;
+};
+
+/*
+ * vxge_hw_mempool_item_f  - Mempool item alloc/free callback
+ * @mempoolh: Memory pool handle.
+ * @memblock: Address of memory block
+ * @memblock_index: Index of memory block
+ * @item: Item that gets allocated or freed.
+ * @index: Item's index in the memory pool.
+ * @is_last: True, if this item is the last one in the pool; false - otherwise.
+ * userdata: Per-pool user context.
+ *
+ * Memory pool allocation/deallocation callback.
+ */
+
+/*
+ * struct vxge_hw_mempool - Memory pool.
+ */
+struct vxge_hw_mempool {
+
+       void (*item_func_alloc)(
+       struct vxge_hw_mempool *mempoolh,
+       u32                     memblock_index,
+       struct vxge_hw_mempool_dma      *dma_object,
+       u32                     index,
+       u32                     is_last);
+
+       void            *userdata;
+       void            **memblocks_arr;
+       void            **memblocks_priv_arr;
+       struct vxge_hw_mempool_dma      *memblocks_dma_arr;
+       struct __vxge_hw_device *devh;
+       u32                     memblock_size;
+       u32                     memblocks_max;
+       u32                     memblocks_allocated;
+       u32                     item_size;
+       u32                     items_max;
+       u32                     items_initial;
+       u32                     items_current;
+       u32                     items_per_memblock;
+       void            **items_arr;
+       u32                     items_priv_size;
+};
+
+#define        VXGE_HW_MAX_INTR_PER_VP                         4
+#define        VXGE_HW_VPATH_INTR_TX                           0
+#define        VXGE_HW_VPATH_INTR_RX                           1
+#define        VXGE_HW_VPATH_INTR_EINTA                        2
+#define        VXGE_HW_VPATH_INTR_BMAP                         3
+
+#define VXGE_HW_BLOCK_SIZE                             4096
+
+/**
+ * struct vxge_hw_tim_intr_config - Titan Tim interrupt configuration.
+ * @intr_enable: Set to 1, if interrupt is enabled.
+ * @btimer_val: Boundary Timer Initialization value in units of 272 ns.
+ * @timer_ac_en: Timer Automatic Cancel. 1 : Automatic Canceling Enable: when
+ *             asserted, other interrupt-generating entities will cancel the
+ *             scheduled timer interrupt.
+ * @timer_ci_en: Timer Continuous Interrupt. 1 : Continuous Interrupting Enable:
+ *             When asserted, an interrupt will be generated every time the
+ *             boundary timer expires, even if no traffic has been transmitted
+ *             on this interrupt.
+ * @timer_ri_en: Timer Consecutive (Re-) Interrupt 1 : Consecutive
+ *             (Re-) Interrupt Enable: When asserted, an interrupt will be
+ *             generated the next time the timer expires, even if no traffic has
+ *             been transmitted on this interrupt. (This will only happen once
+ *             each time that this value is written to the TIM.) This bit is
+ *             cleared by H/W at the end of the current-timer-interval when
+ *             the interrupt is triggered.
+ * @rtimer_val: Restriction Timer Initialization value in units of 272 ns.
+ * @util_sel: Utilization Selector. Selects which of the workload approximations
+ *             to use (e.g. legacy Tx utilization, Tx/Rx utilization, host
+ *             specified utilization etc.), selects one of
+ *             the 17 host configured values.
+ *             0-Virtual Path 0
+ *             1-Virtual Path 1
+ *             ...
+ *             16-Virtual Path 17
+ *             17-Legacy Tx network utilization, provided by TPA
+ *             18-Legacy Rx network utilization, provided by FAU
+ *             19-Average of legacy Rx and Tx utilization calculated from link
+ *                utilization values.
+ *             20-31-Invalid configurations
+ *             32-Host utilization for Virtual Path 0
+ *             33-Host utilization for Virtual Path 1
+ *             ...
+ *             48-Host utilization for Virtual Path 17
+ *             49-Legacy Tx network utilization, provided by TPA
+ *             50-Legacy Rx network utilization, provided by FAU
+ *             51-Average of legacy Rx and Tx utilization calculated from
+ *                link utilization values.
+ *             52-63-Invalid configurations
+ * @ltimer_val: Latency Timer Initialization Value in units of 272 ns.
+ * @txd_cnt_en: TxD Return Event Count Enable. This configuration bit when set
+ *             to 1 enables counting of TxD0 returns (signalled by PCC's),
+ *             towards utilization event count values.
+ * @urange_a: Defines the upper limit (in percent) for this utilization range
+ *             to be active. This range is considered active
+ *             if 0 = UTIL = URNG_A
+ *             and the UEC_A field (below) is non-zero.
+ * @uec_a: Utilization Event Count A. If this range is active, the adapter will
+ *             wait until UEC_A events have occurred on the interrupt before
+ *             generating an interrupt.
+ * @urange_b: Link utilization range B.
+ * @uec_b: Utilization Event Count B.
+ * @urange_c: Link utilization range C.
+ * @uec_c: Utilization Event Count C.
+ * @urange_d: Link utilization range D.
+ * @uec_d: Utilization Event Count D.
+ * Traffic Interrupt Controller Module interrupt configuration.
+ */
+struct vxge_hw_tim_intr_config {
+
+       u32                             intr_enable;
+#define VXGE_HW_TIM_INTR_ENABLE                                1
+#define VXGE_HW_TIM_INTR_DISABLE                               0
+#define VXGE_HW_TIM_INTR_DEFAULT                               0
+
+       u32                             btimer_val;
+#define VXGE_HW_MIN_TIM_BTIMER_VAL                             0
+#define VXGE_HW_MAX_TIM_BTIMER_VAL                             67108864
+#define VXGE_HW_USE_FLASH_DEFAULT                              0xffffffff
+
+       u32                             timer_ac_en;
+#define VXGE_HW_TIM_TIMER_AC_ENABLE                            1
+#define VXGE_HW_TIM_TIMER_AC_DISABLE                           0
+
+       u32                             timer_ci_en;
+#define VXGE_HW_TIM_TIMER_CI_ENABLE                            1
+#define VXGE_HW_TIM_TIMER_CI_DISABLE                           0
+
+       u32                             timer_ri_en;
+#define VXGE_HW_TIM_TIMER_RI_ENABLE                            1
+#define VXGE_HW_TIM_TIMER_RI_DISABLE                           0
+
+       u32                             rtimer_val;
+#define VXGE_HW_MIN_TIM_RTIMER_VAL                             0
+#define VXGE_HW_MAX_TIM_RTIMER_VAL                             67108864
+
+       u32                             util_sel;
+#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_NET_UTIL                17
+#define VXGE_HW_TIM_UTIL_SEL_LEGACY_RX_NET_UTIL                18
+#define VXGE_HW_TIM_UTIL_SEL_LEGACY_TX_RX_AVE_NET_UTIL         19
+#define VXGE_HW_TIM_UTIL_SEL_PER_VPATH                         63
+
+       u32                             ltimer_val;
+#define VXGE_HW_MIN_TIM_LTIMER_VAL                             0
+#define VXGE_HW_MAX_TIM_LTIMER_VAL                             67108864
+
+       /* Line utilization interrupts */
+       u32                             urange_a;
+#define VXGE_HW_MIN_TIM_URANGE_A                               0
+#define VXGE_HW_MAX_TIM_URANGE_A                               100
+
+       u32                             uec_a;
+#define VXGE_HW_MIN_TIM_UEC_A                                  0
+#define VXGE_HW_MAX_TIM_UEC_A                                  65535
+
+       u32                             urange_b;
+#define VXGE_HW_MIN_TIM_URANGE_B                               0
+#define VXGE_HW_MAX_TIM_URANGE_B                               100
+
+       u32                             uec_b;
+#define VXGE_HW_MIN_TIM_UEC_B                                  0
+#define VXGE_HW_MAX_TIM_UEC_B                                  65535
+
+       u32                             urange_c;
+#define VXGE_HW_MIN_TIM_URANGE_C                               0
+#define VXGE_HW_MAX_TIM_URANGE_C                               100
+
+       u32                             uec_c;
+#define VXGE_HW_MIN_TIM_UEC_C                                  0
+#define VXGE_HW_MAX_TIM_UEC_C                                  65535
+
+       u32                             uec_d;
+#define VXGE_HW_MIN_TIM_UEC_D                                  0
+#define VXGE_HW_MAX_TIM_UEC_D                                  65535
+};
+
+#define        VXGE_HW_STATS_OP_READ                                   0
+#define        VXGE_HW_STATS_OP_CLEAR_STAT                             1
+#define        VXGE_HW_STATS_OP_CLEAR_ALL_VPATH_STATS                  2
+#define        VXGE_HW_STATS_OP_CLEAR_ALL_STATS_OF_LOC                 2
+#define        VXGE_HW_STATS_OP_CLEAR_ALL_STATS                        3
+
+#define        VXGE_HW_STATS_LOC_AGGR                                  17
+#define VXGE_HW_STATS_AGGRn_OFFSET                             0x00720
+
+#define VXGE_HW_STATS_VPATH_TX_OFFSET                          0x0
+#define VXGE_HW_STATS_VPATH_RX_OFFSET                          0x00090
+
+#define        VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET        (0x001d0 >> 3)
+#define        VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(bits) \
+                                               vxge_bVALn(bits, 0, 32)
+
+#define        VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(bits) \
+                                               vxge_bVALn(bits, 32, 32)
+
+#define        VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET        (0x001d8 >> 3)
+#define        VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(bits) \
+                                               vxge_bVALn(bits, 0, 32)
+
+#define        VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(bits) \
+                                               vxge_bVALn(bits, 32, 32)
+
+/**
+ * struct vxge_hw_xmac_aggr_stats - Per-Aggregator XMAC Statistics
+ *
+ * @tx_frms: Count of data frames transmitted on this Aggregator on all
+ *             its Aggregation ports. Does not include LACPDUs or Marker PDUs.
+ *             However, does include frames discarded by the Distribution
+ *             function.
+ * @tx_data_octets: Count of data and padding octets of frames transmitted
+ *             on this Aggregator on all its Aggregation ports. Does not include
+ *             octets of LACPDUs or Marker PDUs. However, does include octets of
+ *             frames discarded by the Distribution function.
+ * @tx_mcast_frms: Count of data frames transmitted (to a group destination
+ *             address other than the broadcast address) on this Aggregator on
+ *             all its Aggregation ports. Does not include LACPDUs or Marker
+ *             PDUs. However, does include frames discarded by the Distribution
+ *             function.
+ * @tx_bcast_frms: Count of broadcast data frames transmitted on this Aggregator
+ *             on all its Aggregation ports. Does not include LACPDUs or Marker
+ *             PDUs. However, does include frames discarded by the Distribution
+ *             function.
+ * @tx_discarded_frms: Count of data frames to be transmitted on this Aggregator
+ *             that are discarded by the Distribution function. This occurs when
+ *             conversation are allocated to different ports and have to be
+ *             flushed on old ports
+ * @tx_errored_frms: Count of data frames transmitted on this Aggregator that
+ *             experience transmission errors on its Aggregation ports.
+ * @rx_frms: Count of data frames received on this Aggregator on all its
+ *             Aggregation ports. Does not include LACPDUs or Marker PDUs.
+ *             Also, does not include frames discarded by the Collection
+ *             function.
+ * @rx_data_octets: Count of data and padding octets of frames received on this
+ *             Aggregator on all its Aggregation ports. Does not include octets
+ *             of LACPDUs or Marker PDUs. Also, does not include
+ *             octets of frames
+ *             discarded by the Collection function.
+ * @rx_mcast_frms: Count of data frames received (from a group destination
+ *             address other than the broadcast address) on this Aggregator on
+ *             all its Aggregation ports. Does not include LACPDUs or Marker
+ *             PDUs. Also, does not include frames discarded by the Collection
+ *             function.
+ * @rx_bcast_frms: Count of broadcast data frames received on this Aggregator on
+ *             all its Aggregation ports. Does not include LACPDUs or Marker
+ *             PDUs. Also, does not include frames discarded by the Collection
+ *             function.
+ * @rx_discarded_frms: Count of data frames received on this Aggregator that are
+ *             discarded by the Collection function because the Collection
+ *             function was disabled on the port which the frames are received.
+ * @rx_errored_frms: Count of data frames received on this Aggregator that are
+ *             discarded by its Aggregation ports, or are discarded by the
+ *             Collection function of the Aggregator, or that are discarded by
+ *             the Aggregator due to detection of an illegal Slow Protocols PDU.
+ * @rx_unknown_slow_proto_frms: Count of data frames received on this Aggregator
+ *             that are discarded by its Aggregation ports due to detection of
+ *             an unknown Slow Protocols PDU.
+ *
+ * Per aggregator XMAC RX statistics.
+ */
+struct vxge_hw_xmac_aggr_stats {
+/*0x000*/              u64     tx_frms;
+/*0x008*/              u64     tx_data_octets;
+/*0x010*/              u64     tx_mcast_frms;
+/*0x018*/              u64     tx_bcast_frms;
+/*0x020*/              u64     tx_discarded_frms;
+/*0x028*/              u64     tx_errored_frms;
+/*0x030*/              u64     rx_frms;
+/*0x038*/              u64     rx_data_octets;
+/*0x040*/              u64     rx_mcast_frms;
+/*0x048*/              u64     rx_bcast_frms;
+/*0x050*/              u64     rx_discarded_frms;
+/*0x058*/              u64     rx_errored_frms;
+/*0x060*/              u64     rx_unknown_slow_proto_frms;
+} __packed;
+
+/**
+ * struct vxge_hw_xmac_port_stats - XMAC Port Statistics
+ *
+ * @tx_ttl_frms: Count of successfully transmitted MAC frames
+ * @tx_ttl_octets: Count of total octets of transmitted frames, not including
+ *            framing characters (i.e. less framing bits). To determine the
+ *            total octets of transmitted frames, including framing characters,
+ *            multiply PORTn_TX_TTL_FRMS by 8 and add it to this stat (unless
+ *            otherwise configured, this stat only counts frames that have
+ *            8 bytes of preamble for each frame). This stat can be configured
+ *            (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything
+ *            including the preamble octets.
+ * @tx_data_octets: Count of data and padding octets of successfully transmitted
+ *            frames.
+ * @tx_mcast_frms: Count of successfully transmitted frames to a group address
+ *            other than the broadcast address.
+ * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
+ *            group address.
+ * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
+ *            Includes discarded frames that are not sent to the network.
+ * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
+ * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
+ * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
+ *            are passed to the network.
+ * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent
+ *            due to problems within ICMP.
+ * @tx_tcp: Count of transmitted TCP segments. Does not include segments
+ *            containing retransmitted octets.
+ * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
+ * @tx_udp: Count of transmitted UDP datagrams.
+ * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
+ *            generally occurs when a packet is corrupt somehow, including
+ *            packets that have IP version mismatches, invalid Layer 2 control
+ *            fields, etc. L3/L4 checksums are not offloaded, but the packet
+ *            is still be transmitted.
+ * @tx_unknown_protocol: Increments when the TPA encounters an unknown
+ *            protocol, such as a new IPv6 extension header, or an unsupported
+ *            Routing Type. The packet still has a checksum calculated but it
+ *            may be incorrect.
+ * @tx_pause_ctrl_frms: Count of MAC PAUSE control frames that are transmitted.
+ *            Since, the only control frames supported by this device are
+ *            PAUSE frames, this register is a count of all transmitted MAC
+ *            control frames.
+ * @tx_marker_pdu_frms: Count of Marker PDUs transmitted
+ * on this Aggregation port.
+ * @tx_lacpdu_frms: Count of LACPDUs transmitted on this Aggregation port.
+ * @tx_drop_ip: Count of transmitted IP datagrams that could not be passed to
+ *            the network. Increments because of:
+ *            1) An internal processing error
+ *            (such as an uncorrectable ECC error). 2) A frame parsing error
+ *            during IP checksum calculation.
+ * @tx_marker_resp_pdu_frms: Count of Marker Response PDUs transmitted on this
+ *            Aggregation port.
+ * @tx_xgmii_char2_match: Maintains a count of the number of transmitted XGMII
+ *            characters that match a pattern that is programmable through
+ *            register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
+ *            is set to /T/ (i.e. the terminate character), thus the statistic
+ *            tracks the number of transmitted Terminate characters.
+ * @tx_xgmii_char1_match: Maintains a count of the number of transmitted XGMII
+ *            characters that match a pattern that is programmable through
+ *            register XMAC_STATS_TX_XGMII_CHAR_PORTn. By default, the pattern
+ *            is set to /S/ (i.e. the start character),
+ *            thus the statistic tracks
+ *            the number of transmitted Start characters.
+ * @tx_xgmii_column2_match: Maintains a count of the number of transmitted XGMII
+ *            columns that match a pattern that is programmable through register
+ *            XMAC_STATS_TX_XGMII_COLUMN2_PORTn. By default, the pattern is set
+ *            to 4 x /E/ (i.e. a column containing all error characters), thus
+ *            the statistic tracks the number of Error columns transmitted at
+ *            any time. If XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is
+ *            set to 1, then this stat increments when COLUMN2 is found within
+ *            'n' clocks after COLUMN1. Here, 'n' is defined by
+ *            XMAC_STATS_TX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set
+ *            to 0, then it means to search anywhere for COLUMN2).
+ * @tx_xgmii_column1_match: Maintains a count of the number of transmitted XGMII
+ *            columns that match a pattern that is programmable through register
+ *            XMAC_STATS_TX_XGMII_COLUMN1_PORTn. By default, the pattern is set
+ *            to 4 x /I/ (i.e. a column containing all idle characters),
+ *            thus the statistic tracks the number of transmitted Idle columns.
+ * @tx_any_err_frms: Count of transmitted frames containing any error that
+ *            prevents them from being passed to the network. Increments if
+ *            there is an ECC while reading the frame out of the transmit
+ *            buffer. Also increments if the transmit protocol assist (TPA)
+ *            block determines that the frame should not be sent.
+ * @tx_drop_frms: Count of frames that could not be sent for no other reason
+ *            than internal MAC processing. Increments once whenever the
+ *            transmit buffer is flushed (due to an ECC error on a memory
+ *            descriptor).
+ * @rx_ttl_frms: Count of total received MAC frames, including frames received
+ *            with frame-too-long, FCS, or length errors. This stat can be
+ *            configured (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count
+ *            everything, even "frames" as small one byte of preamble.
+ * @rx_vld_frms: Count of successfully received MAC frames. Does not include
+ *            frames received with frame-too-long, FCS, or length errors.
+ * @rx_offload_frms: Count of offloaded received frames that are passed to
+ *            the host.
+ * @rx_ttl_octets: Count of total octets of received frames, not including
+ *            framing characters (i.e. less framing bits). To determine the
+ *            total octets of received frames, including framing characters,
+ *            multiply PORTn_RX_TTL_FRMS by 8 and add it to this stat (unless
+ *            otherwise configured, this stat only counts frames that have 8
+ *            bytes of preamble for each frame). This stat can be configured
+ *            (see XMAC_STATS_GLOBAL_CFG.TTL_FRMS_HANDLING) to count everything,
+ *            even the preamble octets of "frames" as small one byte of preamble
+ * @rx_data_octets: Count of data and padding octets of successfully received
+ *            frames. Does not include frames received with frame-too-long,
+ *            FCS, or length errors.
+ * @rx_offload_octets: Count of total octets, not including framing
+ *            characters, of offloaded received frames that are passed
+ *            to the host.
+ * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
+ *           nonbroadcast group address. Does not include frames received
+ *            with frame-too-long, FCS, or length errors.
+ * @rx_vld_bcast_frms: Count of successfully received MAC frames containing
+ *            the broadcast group address. Does not include frames received
+ *            with frame-too-long, FCS, or length errors.
+ * @rx_accepted_ucast_frms: Count of successfully received frames containing
+ *            a unicast address. Only includes frames that are passed to
+ *            the system.
+ * @rx_accepted_nucast_frms: Count of successfully received frames containing
+ *            a non-unicast (broadcast or multicast) address. Only includes
+ *            frames that are passed to the system. Could include, for instance,
+ *            non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
+ *            register is set to pass FCS-errored frames to the host.
+ * @rx_tagged_frms: Count of received frames containing a VLAN tag.
+ * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
+ *            + 18 bytes (+ 22 bytes if VLAN-tagged).
+ * @rx_usized_frms: Count of received frames of length (including FCS, but not
+ *            framing bits) less than 64 octets, that are otherwise well-formed.
+ *            In other words, counts runts.
+ * @rx_osized_frms: Count of received frames of length (including FCS, but not
+ *            framing bits) more than 1518 octets, that are otherwise
+ *            well-formed. Note: If register XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING
+ *            is set to 1, then "more than 1518 octets" becomes "more than 1518
+ *            (1522 if VLAN-tagged) octets".
+ * @rx_frag_frms: Count of received frames of length (including FCS, but not
+ *            framing bits) less than 64 octets that had bad FCS. In other
+ *            words, counts fragments.
+ * @rx_jabber_frms: Count of received frames of length (including FCS, but not
+ *            framing bits) more than 1518 octets that had bad FCS. In other
+ *            words, counts jabbers. Note: If register
+ *            XMAC_STATS_GLOBAL_CFG.VLAN_HANDLING is set to 1, then "more than
+ *            1518 octets" becomes "more than 1518 (1522 if VLAN-tagged)
+ *            octets".
+ * @rx_ttl_64_frms: Count of total received MAC frames with length (including
+ *            FCS, but not framing bits) of exactly 64 octets. Includes frames
+ *            received with frame-too-long, FCS, or length errors.
+ * @rx_ttl_65_127_frms: Count of total received MAC frames with length
+ *            (including FCS, but not framing bits) of between 65 and 127
+ *            octets inclusive. Includes frames received with frame-too-long,
+ *            FCS, or length errors.
+ * @rx_ttl_128_255_frms: Count of total received MAC frames with length
+ *            (including FCS, but not framing bits) of between 128 and 255
+ *            octets inclusive. Includes frames received with frame-too-long,
+ *            FCS, or length errors.
+ * @rx_ttl_256_511_frms: Count of total received MAC frames with length
+ *            (including FCS, but not framing bits) of between 256 and 511
+ *            octets inclusive. Includes frames received with frame-too-long,
+ *            FCS, or length errors.
+ * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
+ *            (including FCS, but not framing bits) of between 512 and 1023
+ *            octets inclusive. Includes frames received with frame-too-long,
+ *            FCS, or length errors.
+ * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
+ *            (including FCS, but not framing bits) of between 1024 and 1518
+ *            octets inclusive. Includes frames received with frame-too-long,
+ *            FCS, or length errors.
+ * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
+ *            (including FCS, but not framing bits) of between 1519 and 4095
+ *            octets inclusive. Includes frames received with frame-too-long,
+ *            FCS, or length errors.
+ * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
+ *            (including FCS, but not framing bits) of between 4096 and 8191
+ *            octets inclusive. Includes frames received with frame-too-long,
+ *            FCS, or length errors.
+ * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
+ *            (including FCS, but not framing bits) of between 8192 and
+ *            RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
+ *            with frame-too-long, FCS, or length errors.
+ * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
+ *            (including FCS, but not framing bits) exceeding
+ *            RX_MAX_PYLD_LEN+18 (+22 bytes if VLAN-tagged) octets inclusive.
+ *            Includes frames received with frame-too-long,
+ *            FCS, or length errors.
+ * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
+ * @rx_accepted_ip: Count of received IP datagrams that
+ *             are passed to the system.
+ * @rx_ip_octets: Count of number of octets in received IP datagrams. Includes
+ *            errored IP datagrams.
+ * @rx_err_ip:         Count of received IP datagrams containing errors. For example,
+ *            bad IP checksum.
+ * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
+ * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
+ *            Note: This stat contains a count of all received TCP segments,
+ *            regardless of whether or not they pertain to an established
+ *            connection.
+ * @rx_udp: Count of received UDP datagrams.
+ * @rx_err_tcp: Count of received TCP segments containing errors. For example,
+ *            bad TCP checksum.
+ * @rx_pause_count: Count of number of pause quanta that the MAC has been in
+ *            the paused state. Recall, one pause quantum equates to 512
+ *            bit times.
+ * @rx_pause_ctrl_frms: Count of received MAC PAUSE control frames.
+ * @rx_unsup_ctrl_frms: Count of received MAC control frames that do not
+ *            contain the PAUSE opcode. The sum of RX_PAUSE_CTRL_FRMS and
+ *            this register is a count of all received MAC control frames.
+ *            Note: This stat may be configured to count all layer 2 errors
+ *            (i.e. length errors and FCS errors).
+ * @rx_fcs_err_frms: Count of received MAC frames that do not pass FCS. Does
+ *            not include frames received with frame-too-long or
+ *            frame-too-short error.
+ * @rx_in_rng_len_err_frms: Count of received frames with a length/type field
+ *            value between 46 (42 for VLAN-tagged frames) and 1500 (also 1500
+ *            for VLAN-tagged frames), inclusive, that does not match the
+ *            number of data octets (including pad) received. Also contains
+ *            a count of received frames with a length/type field less than
+ *            46 (42 for VLAN-tagged frames) and the number of data octets
+ *            (including pad) received is greater than 46 (42 for VLAN-tagged
+ *            frames).
+ * @rx_out_rng_len_err_frms:  Count of received frames with length/type field
+ *            between 1501 and 1535 decimal, inclusive.
+ * @rx_drop_frms: Count of received frames that could not be passed to the host.
+ *            See PORTn_RX_L2_MGMT_DISCARD, PORTn_RX_RPA_DISCARD,
+ *            PORTn_RX_TRASH_DISCARD, PORTn_RX_RTS_DISCARD, PORTn_RX_RED_DISCARD
+ *            for a list of reasons. Because the RMAC drops one frame at a time,
+ *            this stat also indicates the number of drop events.
+ * @rx_discarded_frms: Count of received frames containing
+ *             any error that prevents
+ *            them from being passed to the system. See PORTn_RX_FCS_DISCARD,
+ *            PORTn_RX_LEN_DISCARD, and PORTn_RX_SWITCH_DISCARD for a list of
+ *            reasons.
+ * @rx_drop_ip: Count of received IP datagrams that could not be passed to the
+ *            host. See PORTn_RX_DROP_FRMS for a list of reasons.
+ * @rx_drop_udp: Count of received UDP datagrams that are not delivered to the
+ *            host. See PORTn_RX_DROP_FRMS for a list of reasons.
+ * @rx_marker_pdu_frms: Count of valid Marker PDUs received on this Aggregation
+ *            port.
+ * @rx_lacpdu_frms: Count of valid LACPDUs received on this Aggregation port.
+ * @rx_unknown_pdu_frms: Count of received frames (on this Aggregation port)
+ *            that carry the Slow Protocols EtherType, but contain an unknown
+ *            PDU. Or frames that contain the Slow Protocols group MAC address,
+ *            but do not carry the Slow Protocols EtherType.
+ * @rx_marker_resp_pdu_frms: Count of valid Marker Response PDUs received on
+ *            this Aggregation port.
+ * @rx_fcs_discard: Count of received frames that are discarded because the
+ *            FCS check failed.
+ * @rx_illegal_pdu_frms: Count of received frames (on this Aggregation port)
+ *            that carry the Slow Protocols EtherType, but contain a badly
+ *            formed PDU. Or frames that carry the Slow Protocols EtherType,
+ *            but contain an illegal value of Protocol Subtype.
+ * @rx_switch_discard: Count of received frames that are discarded by the
+ *            internal switch because they did not have an entry in the
+ *            Filtering Database. This includes frames that had an invalid
+ *            destination MAC address or VLAN ID. It also includes frames are
+ *            discarded because they did not satisfy the length requirements
+ *            of the target VPATH.
+ * @rx_len_discard: Count of received frames that are discarded because of an
+ *            invalid frame length (includes fragments, oversized frames and
+ *            mismatch between frame length and length/type field). This stat
+ *            can be configured
+ *            (see XMAC_STATS_GLOBAL_CFG.LEN_DISCARD_HANDLING).
+ * @rx_rpa_discard: Count of received frames that were discarded because the
+ *            receive protocol assist (RPA) discovered and error in the frame
+ *            or was unable to parse the frame.
+ * @rx_l2_mgmt_discard: Count of Layer 2 management frames (eg. pause frames,
+ *            Link Aggregation Control Protocol (LACP) frames, etc.) that are
+ *            discarded.
+ * @rx_rts_discard: Count of received frames that are discarded by the receive
+ *            traffic steering (RTS) logic. Includes those frame discarded
+ *            because the SSC response contradicted the switch table, because
+ *            the SSC timed out, or because the target queue could not fit the
+ *            frame.
+ * @rx_trash_discard: Count of received frames that are discarded because
+ *            receive traffic steering (RTS) steered the frame to the trash
+ *            queue.
+ * @rx_buff_full_discard: Count of received frames that are discarded because
+ *            internal buffers are full. Includes frames discarded because the
+ *            RTS logic is waiting for an SSC lookup that has no timeout bound.
+ *            Also, includes frames that are dropped because the MAC2FAU buffer
+ *            is nearly full -- this can happen if the external receive buffer
+ *            is full and the receive path is backing up.
+ * @rx_red_discard: Count of received frames that are discarded because of RED
+ *            (Random Early Discard).
+ * @rx_xgmii_ctrl_err_cnt: Maintains a count of unexpected or misplaced control
+ *            characters occuring between times of normal data transmission
+ *            (i.e. not included in RX_XGMII_DATA_ERR_CNT). This counter is
+ *            incremented when either -
+ *            1) The Reconciliation Sublayer (RS) is expecting one control
+ *               character and gets another (i.e. is expecting a Start
+ *               character, but gets another control character).
+ *            2) Start control character is not in lane 0
+ *            Only increments the count by one for each XGMII column.
+ * @rx_xgmii_data_err_cnt: Maintains a count of unexpected control characters
+ *            during normal data transmission. If the Reconciliation Sublayer
+ *            (RS) receives a control character, other than a terminate control
+ *            character, during receipt of data octets then this register is
+ *            incremented. Also increments if the start frame delimiter is not
+ *            found in the correct location. Only increments the count by one
+ *            for each XGMII column.
+ * @rx_xgmii_char1_match: Maintains a count of the number of XGMII characters
+ *            that match a pattern that is programmable through register
+ *            XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
+ *            to /E/ (i.e. the error character), thus the statistic tracks the
+ *            number of Error characters received at any time.
+ * @rx_xgmii_err_sym: Count of the number of symbol errors in the received
+ *            XGMII data (i.e. PHY indicates "Receive Error" on the XGMII).
+ *            Only includes symbol errors that are observed between the XGMII
+ *            Start Frame Delimiter and End Frame Delimiter, inclusive. And
+ *            only increments the count by one for each frame.
+ * @rx_xgmii_column1_match: Maintains a count of the number of XGMII columns
+ *            that match a pattern that is programmable through register
+ *            XMAC_STATS_RX_XGMII_COLUMN1_PORTn. By default, the pattern is set
+ *            to 4 x /E/ (i.e. a column containing all error characters), thus
+ *            the statistic tracks the number of Error columns received at any
+ *            time.
+ * @rx_xgmii_char2_match: Maintains a count of the number of XGMII characters
+ *            that match a pattern that is programmable through register
+ *            XMAC_STATS_RX_XGMII_CHAR_PORTn. By default, the pattern is set
+ *            to /E/ (i.e. the error character), thus the statistic tracks the
+ *            number of Error characters received at any time.
+ * @rx_local_fault: Maintains a count of the number of times that link
+ *            transitioned from "up" to "down" due to a local fault.
+ * @rx_xgmii_column2_match: Maintains a count of the number of XGMII columns
+ *            that match a pattern that is programmable through register
+ *            XMAC_STATS_RX_XGMII_COLUMN2_PORTn. By default, the pattern is set
+ *            to 4 x /E/ (i.e. a column containing all error characters), thus
+ *            the statistic tracks the number of Error columns received at any
+ *            time. If XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NEAR_COL1 is set
+ *            to 1, then this stat increments when COLUMN2 is found within 'n'
+ *            clocks after COLUMN1. Here, 'n' is defined by
+ *            XMAC_STATS_RX_XGMII_BEHAV_COLUMN2_PORTn.NUM_COL (if 'n' is set to
+ *            0, then it means to search anywhere for COLUMN2).
+ * @rx_jettison: Count of received frames that are jettisoned because internal
+ *            buffers are full.
+ * @rx_remote_fault: Maintains a count of the number of times that link
+ *            transitioned from "up" to "down" due to a remote fault.
+ *
+ * XMAC Port Statistics.
+ */
+struct vxge_hw_xmac_port_stats {
+/*0x000*/              u64     tx_ttl_frms;
+/*0x008*/              u64     tx_ttl_octets;
+/*0x010*/              u64     tx_data_octets;
+/*0x018*/              u64     tx_mcast_frms;
+/*0x020*/              u64     tx_bcast_frms;
+/*0x028*/              u64     tx_ucast_frms;
+/*0x030*/              u64     tx_tagged_frms;
+/*0x038*/              u64     tx_vld_ip;
+/*0x040*/              u64     tx_vld_ip_octets;
+/*0x048*/              u64     tx_icmp;
+/*0x050*/              u64     tx_tcp;
+/*0x058*/              u64     tx_rst_tcp;
+/*0x060*/              u64     tx_udp;
+/*0x068*/              u32     tx_parse_error;
+/*0x06c*/              u32     tx_unknown_protocol;
+/*0x070*/              u64     tx_pause_ctrl_frms;
+/*0x078*/              u32     tx_marker_pdu_frms;
+/*0x07c*/              u32     tx_lacpdu_frms;
+/*0x080*/              u32     tx_drop_ip;
+/*0x084*/              u32     tx_marker_resp_pdu_frms;
+/*0x088*/              u32     tx_xgmii_char2_match;
+/*0x08c*/              u32     tx_xgmii_char1_match;
+/*0x090*/              u32     tx_xgmii_column2_match;
+/*0x094*/              u32     tx_xgmii_column1_match;
+/*0x098*/              u32     unused1;
+/*0x09c*/              u16     tx_any_err_frms;
+/*0x09e*/              u16     tx_drop_frms;
+/*0x0a0*/              u64     rx_ttl_frms;
+/*0x0a8*/              u64     rx_vld_frms;
+/*0x0b0*/              u64     rx_offload_frms;
+/*0x0b8*/              u64     rx_ttl_octets;
+/*0x0c0*/              u64     rx_data_octets;
+/*0x0c8*/              u64     rx_offload_octets;
+/*0x0d0*/              u64     rx_vld_mcast_frms;
+/*0x0d8*/              u64     rx_vld_bcast_frms;
+/*0x0e0*/              u64     rx_accepted_ucast_frms;
+/*0x0e8*/              u64     rx_accepted_nucast_frms;
+/*0x0f0*/              u64     rx_tagged_frms;
+/*0x0f8*/              u64     rx_long_frms;
+/*0x100*/              u64     rx_usized_frms;
+/*0x108*/              u64     rx_osized_frms;
+/*0x110*/              u64     rx_frag_frms;
+/*0x118*/              u64     rx_jabber_frms;
+/*0x120*/              u64     rx_ttl_64_frms;
+/*0x128*/              u64     rx_ttl_65_127_frms;
+/*0x130*/              u64     rx_ttl_128_255_frms;
+/*0x138*/              u64     rx_ttl_256_511_frms;
+/*0x140*/              u64     rx_ttl_512_1023_frms;
+/*0x148*/              u64     rx_ttl_1024_1518_frms;
+/*0x150*/              u64     rx_ttl_1519_4095_frms;
+/*0x158*/              u64     rx_ttl_4096_8191_frms;
+/*0x160*/              u64     rx_ttl_8192_max_frms;
+/*0x168*/              u64     rx_ttl_gt_max_frms;
+/*0x170*/              u64     rx_ip;
+/*0x178*/              u64     rx_accepted_ip;
+/*0x180*/              u64     rx_ip_octets;
+/*0x188*/              u64     rx_err_ip;
+/*0x190*/              u64     rx_icmp;
+/*0x198*/              u64     rx_tcp;
+/*0x1a0*/              u64     rx_udp;
+/*0x1a8*/              u64     rx_err_tcp;
+/*0x1b0*/              u64     rx_pause_count;
+/*0x1b8*/              u64     rx_pause_ctrl_frms;
+/*0x1c0*/              u64     rx_unsup_ctrl_frms;
+/*0x1c8*/              u64     rx_fcs_err_frms;
+/*0x1d0*/              u64     rx_in_rng_len_err_frms;
+/*0x1d8*/              u64     rx_out_rng_len_err_frms;
+/*0x1e0*/              u64     rx_drop_frms;
+/*0x1e8*/              u64     rx_discarded_frms;
+/*0x1f0*/              u64     rx_drop_ip;
+/*0x1f8*/              u64     rx_drop_udp;
+/*0x200*/              u32     rx_marker_pdu_frms;
+/*0x204*/              u32     rx_lacpdu_frms;
+/*0x208*/              u32     rx_unknown_pdu_frms;
+/*0x20c*/              u32     rx_marker_resp_pdu_frms;
+/*0x210*/              u32     rx_fcs_discard;
+/*0x214*/              u32     rx_illegal_pdu_frms;
+/*0x218*/              u32     rx_switch_discard;
+/*0x21c*/              u32     rx_len_discard;
+/*0x220*/              u32     rx_rpa_discard;
+/*0x224*/              u32     rx_l2_mgmt_discard;
+/*0x228*/              u32     rx_rts_discard;
+/*0x22c*/              u32     rx_trash_discard;
+/*0x230*/              u32     rx_buff_full_discard;
+/*0x234*/              u32     rx_red_discard;
+/*0x238*/              u32     rx_xgmii_ctrl_err_cnt;
+/*0x23c*/              u32     rx_xgmii_data_err_cnt;
+/*0x240*/              u32     rx_xgmii_char1_match;
+/*0x244*/              u32     rx_xgmii_err_sym;
+/*0x248*/              u32     rx_xgmii_column1_match;
+/*0x24c*/              u32     rx_xgmii_char2_match;
+/*0x250*/              u32     rx_local_fault;
+/*0x254*/              u32     rx_xgmii_column2_match;
+/*0x258*/              u32     rx_jettison;
+/*0x25c*/              u32     rx_remote_fault;
+} __packed;
+
+/**
+ * struct vxge_hw_xmac_vpath_tx_stats - XMAC Vpath Tx Statistics
+ *
+ * @tx_ttl_eth_frms: Count of successfully transmitted MAC frames.
+ * @tx_ttl_eth_octets: Count of total octets of transmitted frames,
+ *             not including framing characters (i.e. less framing bits).
+ *             To determine the total octets of transmitted frames, including
+ *             framing characters, multiply TX_TTL_ETH_FRMS by 8 and add it to
+ *             this stat (the device always prepends 8 bytes of preamble for
+ *             each frame)
+ * @tx_data_octets: Count of data and padding octets of successfully transmitted
+ *             frames.
+ * @tx_mcast_frms: Count of successfully transmitted frames to a group address
+ *             other than the broadcast address.
+ * @tx_bcast_frms: Count of successfully transmitted frames to the broadcast
+ *             group address.
+ * @tx_ucast_frms: Count of transmitted frames containing a unicast address.
+ *             Includes discarded frames that are not sent to the network.
+ * @tx_tagged_frms: Count of transmitted frames containing a VLAN tag.
+ * @tx_vld_ip: Count of transmitted IP datagrams that are passed to the network.
+ * @tx_vld_ip_octets: Count of total octets of transmitted IP datagrams that
+ *            are passed to the network.
+ * @tx_icmp: Count of transmitted ICMP messages. Includes messages not sent due
+ *            to problems within ICMP.
+ * @tx_tcp: Count of transmitted TCP segments. Does not include segments
+ *            containing retransmitted octets.
+ * @tx_rst_tcp: Count of transmitted TCP segments containing the RST flag.
+ * @tx_udp: Count of transmitted UDP datagrams.
+ * @tx_unknown_protocol: Increments when the TPA encounters an unknown protocol,
+ *            such as a new IPv6 extension header, or an unsupported Routing
+ *            Type. The packet still has a checksum calculated but it may be
+ *            incorrect.
+ * @tx_lost_ip: Count of transmitted IP datagrams that could not be passed
+ *            to the network. Increments because of: 1) An internal processing
+ *            error (such as an uncorrectable ECC error). 2) A frame parsing
+ *            error during IP checksum calculation.
+ * @tx_parse_error: Increments when the TPA is unable to parse a packet. This
+ *            generally occurs when a packet is corrupt somehow, including
+ *            packets that have IP version mismatches, invalid Layer 2 control
+ *            fields, etc. L3/L4 checksums are not offloaded, but the packet
+ *            is still be transmitted.
+ * @tx_tcp_offload: For frames belonging to offloaded sessions only, a count
+ *            of transmitted TCP segments. Does not include segments containing
+ *            retransmitted octets.
+ * @tx_retx_tcp_offload: For frames belonging to offloaded sessions only, the
+ *            total number of segments retransmitted. Retransmitted segments
+ *            that are sourced by the host are counted by the host.
+ * @tx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
+ *            of transmitted IP datagrams that could not be passed to the
+ *            network.
+ *
+ * XMAC Vpath TX Statistics.
+ */
+struct vxge_hw_xmac_vpath_tx_stats {
+       u64     tx_ttl_eth_frms;
+       u64     tx_ttl_eth_octets;
+       u64     tx_data_octets;
+       u64     tx_mcast_frms;
+       u64     tx_bcast_frms;
+       u64     tx_ucast_frms;
+       u64     tx_tagged_frms;
+       u64     tx_vld_ip;
+       u64     tx_vld_ip_octets;
+       u64     tx_icmp;
+       u64     tx_tcp;
+       u64     tx_rst_tcp;
+       u64     tx_udp;
+       u32     tx_unknown_protocol;
+       u32     tx_lost_ip;
+       u32     unused1;
+       u32     tx_parse_error;
+       u64     tx_tcp_offload;
+       u64     tx_retx_tcp_offload;
+       u64     tx_lost_ip_offload;
+} __packed;
+
+/**
+ * struct vxge_hw_xmac_vpath_rx_stats - XMAC Vpath RX Statistics
+ *
+ * @rx_ttl_eth_frms: Count of successfully received MAC frames.
+ * @rx_vld_frms: Count of successfully received MAC frames. Does not include
+ *            frames received with frame-too-long, FCS, or length errors.
+ * @rx_offload_frms: Count of offloaded received frames that are passed to
+ *            the host.
+ * @rx_ttl_eth_octets: Count of total octets of received frames, not including
+ *            framing characters (i.e. less framing bits). Only counts octets
+ *            of frames that are at least 14 bytes (18 bytes for VLAN-tagged)
+ *            before FCS. To determine the total octets of received frames,
+ *            including framing characters, multiply RX_TTL_ETH_FRMS by 8 and
+ *            add it to this stat (the stat RX_TTL_ETH_FRMS only counts frames
+ *            that have the required 8 bytes of preamble).
+ * @rx_data_octets: Count of data and padding octets of successfully received
+ *            frames. Does not include frames received with frame-too-long,
+ *            FCS, or length errors.
+ * @rx_offload_octets: Count of total octets, not including framing characters,
+ *            of offloaded received frames that are passed to the host.
+ * @rx_vld_mcast_frms: Count of successfully received MAC frames containing a
+ *            nonbroadcast group address. Does not include frames received with
+ *            frame-too-long, FCS, or length errors.
+ * @rx_vld_bcast_frms: Count of successfully received MAC frames containing the
+ *            broadcast group address. Does not include frames received with
+ *            frame-too-long, FCS, or length errors.
+ * @rx_accepted_ucast_frms: Count of successfully received frames containing
+ *            a unicast address. Only includes frames that are passed to the
+ *            system.
+ * @rx_accepted_nucast_frms: Count of successfully received frames containing
+ *            a non-unicast (broadcast or multicast) address. Only includes
+ *            frames that are passed to the system. Could include, for instance,
+ *            non-unicast frames that contain FCS errors if the MAC_ERROR_CFG
+ *            register is set to pass FCS-errored frames to the host.
+ * @rx_tagged_frms: Count of received frames containing a VLAN tag.
+ * @rx_long_frms: Count of received frames that are longer than RX_MAX_PYLD_LEN
+ *            + 18 bytes (+ 22 bytes if VLAN-tagged).
+ * @rx_usized_frms: Count of received frames of length (including FCS, but not
+ *            framing bits) less than 64 octets, that are otherwise well-formed.
+ *            In other words, counts runts.
+ * @rx_osized_frms: Count of received frames of length (including FCS, but not
+ *            framing bits) more than 1518 octets, that are otherwise
+ *            well-formed.
+ * @rx_frag_frms: Count of received frames of length (including FCS, but not
+ *            framing bits) less than 64 octets that had bad FCS.
+ *            In other words, counts fragments.
+ * @rx_jabber_frms: Count of received frames of length (including FCS, but not
+ *            framing bits) more than 1518 octets that had bad FCS. In other
+ *            words, counts jabbers.
+ * @rx_ttl_64_frms: Count of total received MAC frames with length (including
+ *            FCS, but not framing bits) of exactly 64 octets. Includes frames
+ *            received with frame-too-long, FCS, or length errors.
+ * @rx_ttl_65_127_frms: Count of total received MAC frames
+ *             with length (including
+ *            FCS, but not framing bits) of between 65 and 127 octets inclusive.
+ *            Includes frames received with frame-too-long, FCS,
+ *            or length errors.
+ * @rx_ttl_128_255_frms: Count of total received MAC frames with length
+ *            (including FCS, but not framing bits)
+ *            of between 128 and 255 octets
+ *            inclusive. Includes frames received with frame-too-long, FCS,
+ *            or length errors.
+ * @rx_ttl_256_511_frms: Count of total received MAC frames with length
+ *            (including FCS, but not framing bits)
+ *            of between 256 and 511 octets
+ *            inclusive. Includes frames received with frame-too-long, FCS, or
+ *            length errors.
+ * @rx_ttl_512_1023_frms: Count of total received MAC frames with length
+ *            (including FCS, but not framing bits) of between 512 and 1023
+ *            octets inclusive. Includes frames received with frame-too-long,
+ *            FCS, or length errors.
+ * @rx_ttl_1024_1518_frms: Count of total received MAC frames with length
+ *            (including FCS, but not framing bits) of between 1024 and 1518
+ *            octets inclusive. Includes frames received with frame-too-long,
+ *            FCS, or length errors.
+ * @rx_ttl_1519_4095_frms: Count of total received MAC frames with length
+ *            (including FCS, but not framing bits) of between 1519 and 4095
+ *            octets inclusive. Includes frames received with frame-too-long,
+ *            FCS, or length errors.
+ * @rx_ttl_4096_8191_frms: Count of total received MAC frames with length
+ *            (including FCS, but not framing bits) of between 4096 and 8191
+ *            octets inclusive. Includes frames received with frame-too-long,
+ *            FCS, or length errors.
+ * @rx_ttl_8192_max_frms: Count of total received MAC frames with length
+ *            (including FCS, but not framing bits) of between 8192 and
+ *            RX_MAX_PYLD_LEN+18 octets inclusive. Includes frames received
+ *            with frame-too-long, FCS, or length errors.
+ * @rx_ttl_gt_max_frms: Count of total received MAC frames with length
+ *            (including FCS, but not framing bits) exceeding RX_MAX_PYLD_LEN+18
+ *            (+22 bytes if VLAN-tagged) octets inclusive. Includes frames
+ *            received with frame-too-long, FCS, or length errors.
+ * @rx_ip: Count of received IP datagrams. Includes errored IP datagrams.
+ * @rx_accepted_ip: Count of received IP datagrams that
+ *             are passed to the system.
+ * @rx_ip_octets: Count of number of octets in received IP datagrams.
+ *            Includes errored IP datagrams.
+ * @rx_err_ip: Count of received IP datagrams containing errors. For example,
+ *            bad IP checksum.
+ * @rx_icmp: Count of received ICMP messages. Includes errored ICMP messages.
+ * @rx_tcp: Count of received TCP segments. Includes errored TCP segments.
+ *             Note: This stat contains a count of all received TCP segments,
+ *             regardless of whether or not they pertain to an established
+ *             connection.
+ * @rx_udp: Count of received UDP datagrams.
+ * @rx_err_tcp: Count of received TCP segments containing errors. For example,
+ *             bad TCP checksum.
+ * @rx_lost_frms: Count of received frames that could not be passed to the host.
+ *             See RX_QUEUE_FULL_DISCARD and RX_RED_DISCARD
+ *             for a list of reasons.
+ * @rx_lost_ip: Count of received IP datagrams that could not be passed to
+ *             the host. See RX_LOST_FRMS for a list of reasons.
+ * @rx_lost_ip_offload: For frames belonging to offloaded sessions only, a count
+ *             of received IP datagrams that could not be passed to the host.
+ *             See RX_LOST_FRMS for a list of reasons.
+ * @rx_various_discard: Count of received frames that are discarded because
+ *             the target receive queue is full.
+ * @rx_sleep_discard: Count of received frames that are discarded because the
+ *            target VPATH is asleep (a Wake-on-LAN magic packet can be used
+ *            to awaken the VPATH).
+ * @rx_red_discard: Count of received frames that are discarded because of RED
+ *            (Random Early Discard).
+ * @rx_queue_full_discard: Count of received frames that are discarded because
+ *             the target receive queue is full.
+ * @rx_mpa_ok_frms: Count of received frames that pass the MPA checks.
+ *
+ * XMAC Vpath RX Statistics.
+ */
+struct vxge_hw_xmac_vpath_rx_stats {
+       u64     rx_ttl_eth_frms;
+       u64     rx_vld_frms;
+       u64     rx_offload_frms;
+       u64     rx_ttl_eth_octets;
+       u64     rx_data_octets;
+       u64     rx_offload_octets;
+       u64     rx_vld_mcast_frms;
+       u64     rx_vld_bcast_frms;
+       u64     rx_accepted_ucast_frms;
+       u64     rx_accepted_nucast_frms;
+       u64     rx_tagged_frms;
+       u64     rx_long_frms;
+       u64     rx_usized_frms;
+       u64     rx_osized_frms;
+       u64     rx_frag_frms;
+       u64     rx_jabber_frms;
+       u64     rx_ttl_64_frms;
+       u64     rx_ttl_65_127_frms;
+       u64     rx_ttl_128_255_frms;
+       u64     rx_ttl_256_511_frms;
+       u64     rx_ttl_512_1023_frms;
+       u64     rx_ttl_1024_1518_frms;
+       u64     rx_ttl_1519_4095_frms;
+       u64     rx_ttl_4096_8191_frms;
+       u64     rx_ttl_8192_max_frms;
+       u64     rx_ttl_gt_max_frms;
+       u64     rx_ip;
+       u64     rx_accepted_ip;
+       u64     rx_ip_octets;
+       u64     rx_err_ip;
+       u64     rx_icmp;
+       u64     rx_tcp;
+       u64     rx_udp;
+       u64     rx_err_tcp;
+       u64     rx_lost_frms;
+       u64     rx_lost_ip;
+       u64     rx_lost_ip_offload;
+       u16     rx_various_discard;
+       u16     rx_sleep_discard;
+       u16     rx_red_discard;
+       u16     rx_queue_full_discard;
+       u64     rx_mpa_ok_frms;
+} __packed;
+
+/**
+ * struct vxge_hw_xmac_stats - XMAC Statistics
+ *
+ * @aggr_stats: Statistics on aggregate port(port 0, port 1)
+ * @port_stats: Staticstics on ports(wire 0, wire 1, lag)
+ * @vpath_tx_stats: Per vpath XMAC TX stats
+ * @vpath_rx_stats: Per vpath XMAC RX stats
+ *
+ * XMAC Statistics.
+ */
+struct vxge_hw_xmac_stats {
+       struct vxge_hw_xmac_aggr_stats
+                               aggr_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID];
+       struct vxge_hw_xmac_port_stats
+                               port_stats[VXGE_HW_MAC_MAX_MAC_PORT_ID+1];
+       struct vxge_hw_xmac_vpath_tx_stats
+                               vpath_tx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
+       struct vxge_hw_xmac_vpath_rx_stats
+                               vpath_rx_stats[VXGE_HW_MAX_VIRTUAL_PATHS];
+};
+
+/**
+ * struct vxge_hw_vpath_stats_hw_info - Titan vpath hardware statistics.
+ * @ini_num_mwr_sent: The number of PCI memory writes initiated by the PIC block
+ *             for the given VPATH
+ * @ini_num_mrd_sent: The number of PCI memory reads initiated by the PIC block
+ * @ini_num_cpl_rcvd: The number of PCI read completions received by the
+ *             PIC block
+ * @ini_num_mwr_byte_sent: The number of PCI memory write bytes sent by the PIC
+ *             block to the host
+ * @ini_num_cpl_byte_rcvd: The number of PCI read completion bytes received by
+ *             the PIC block
+ * @wrcrdtarb_xoff: TBD
+ * @rdcrdtarb_xoff: TBD
+ * @vpath_genstats_count0: TBD
+ * @vpath_genstats_count1: TBD
+ * @vpath_genstats_count2: TBD
+ * @vpath_genstats_count3: TBD
+ * @vpath_genstats_count4: TBD
+ * @vpath_gennstats_count5: TBD
+ * @tx_stats: Transmit stats
+ * @rx_stats: Receive stats
+ * @prog_event_vnum1: Programmable statistic. Increments when internal logic
+ *             detects a certain event. See register
+ *             XMAC_STATS_CFG.EVENT_VNUM1_CFG for more information.
+ * @prog_event_vnum0: Programmable statistic. Increments when internal logic
+ *             detects a certain event. See register
+ *             XMAC_STATS_CFG.EVENT_VNUM0_CFG for more information.
+ * @prog_event_vnum3: Programmable statistic. Increments when internal logic
+ *             detects a certain event. See register
+ *             XMAC_STATS_CFG.EVENT_VNUM3_CFG for more information.
+ * @prog_event_vnum2: Programmable statistic. Increments when internal logic
+ *             detects a certain event. See register
+ *             XMAC_STATS_CFG.EVENT_VNUM2_CFG for more information.
+ * @rx_multi_cast_frame_discard: TBD
+ * @rx_frm_transferred: TBD
+ * @rxd_returned: TBD
+ * @rx_mpa_len_fail_frms: Count of received frames
+ *             that fail the MPA length check
+ * @rx_mpa_mrk_fail_frms: Count of received frames
+ *             that fail the MPA marker check
+ * @rx_mpa_crc_fail_frms: Count of received frames that fail the MPA CRC check
+ * @rx_permitted_frms: Count of frames that pass through the FAU and on to the
+ *             frame buffer (and subsequently to the host).
+ * @rx_vp_reset_discarded_frms: Count of receive frames that are discarded
+ *             because the VPATH is in reset
+ * @rx_wol_frms: Count of received "magic packet" frames. Stat increments
+ *             whenever the received frame matches the VPATH's Wake-on-LAN
+ *             signature(s) CRC.
+ * @tx_vp_reset_discarded_frms: Count of transmit frames that are discarded
+ *             because the VPATH is in reset. Includes frames that are discarded
+ *             because the current VPIN does not match that VPIN of the frame
+ *
+ * Titan vpath hardware statistics.
+ */
+struct vxge_hw_vpath_stats_hw_info {
+/*0x000*/      u32 ini_num_mwr_sent;
+/*0x004*/      u32 unused1;
+/*0x008*/      u32 ini_num_mrd_sent;
+/*0x00c*/      u32 unused2;
+/*0x010*/      u32 ini_num_cpl_rcvd;
+/*0x014*/      u32 unused3;
+/*0x018*/      u64 ini_num_mwr_byte_sent;
+/*0x020*/      u64 ini_num_cpl_byte_rcvd;
+/*0x028*/      u32 wrcrdtarb_xoff;
+/*0x02c*/      u32 unused4;
+/*0x030*/      u32 rdcrdtarb_xoff;
+/*0x034*/      u32 unused5;
+/*0x038*/      u32 vpath_genstats_count0;
+/*0x03c*/      u32 vpath_genstats_count1;
+/*0x040*/      u32 vpath_genstats_count2;
+/*0x044*/      u32 vpath_genstats_count3;
+/*0x048*/      u32 vpath_genstats_count4;
+/*0x04c*/      u32 unused6;
+/*0x050*/      u32 vpath_genstats_count5;
+/*0x054*/      u32 unused7;
+/*0x058*/      struct vxge_hw_xmac_vpath_tx_stats tx_stats;
+/*0x0e8*/      struct vxge_hw_xmac_vpath_rx_stats rx_stats;
+/*0x220*/      u64 unused9;
+/*0x228*/      u32 prog_event_vnum1;
+/*0x22c*/      u32 prog_event_vnum0;
+/*0x230*/      u32 prog_event_vnum3;
+/*0x234*/      u32 prog_event_vnum2;
+/*0x238*/      u16 rx_multi_cast_frame_discard;
+/*0x23a*/      u8 unused10[6];
+/*0x240*/      u32 rx_frm_transferred;
+/*0x244*/      u32 unused11;
+/*0x248*/      u16 rxd_returned;
+/*0x24a*/      u8 unused12[6];
+/*0x252*/      u16 rx_mpa_len_fail_frms;
+/*0x254*/      u16 rx_mpa_mrk_fail_frms;
+/*0x256*/      u16 rx_mpa_crc_fail_frms;
+/*0x258*/      u16 rx_permitted_frms;
+/*0x25c*/      u64 rx_vp_reset_discarded_frms;
+/*0x25e*/      u64 rx_wol_frms;
+/*0x260*/      u64 tx_vp_reset_discarded_frms;
+} __packed;
+
+
+/**
+ * struct vxge_hw_device_stats_mrpcim_info - Titan mrpcim hardware statistics.
+ * @pic.ini_rd_drop     0x0000          4       Number of DMA reads initiated
+ *  by the adapter that were discarded because the VPATH is out of service
+ * @pic.ini_wr_drop    0x0004  4       Number of DMA writes initiated by the
+ *  adapter that were discared because the VPATH is out of service
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane0]    0x0008  4       Number of times
+ *  the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane1]    0x0010  4       Number of times
+ *  the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane2]    0x0018  4       Number of times
+ *  the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane3]    0x0020  4       Number of times
+ *  the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane4]    0x0028  4       Number of times
+ *  the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane5]    0x0030  4       Number of times
+ *  the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane6]    0x0038  4       Number of times
+ *  the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane7]    0x0040  4       Number of times
+ *  the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane8]    0x0048  4       Number of times
+ *  the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane9]    0x0050  4       Number of times
+ *  the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane10]   0x0058  4       Number of times
+ *  the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane11]   0x0060  4       Number of times
+ *  the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane12]   0x0068  4       Number of times
+ *  the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane13]   0x0070  4       Number of times
+ *  the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane14]   0x0078  4       Number of times
+ *  the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane15]   0x0080  4       Number of times
+ *  the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_ph_crdt_depleted[vplane16]   0x0088  4       Number of times
+ *  the posted header credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane0]    0x0090  4       Number of times
+ *  the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane1]    0x0098  4       Number of times
+ *  the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane2]    0x00a0  4       Number of times
+ *  the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane3]    0x00a8  4       Number of times
+ *  the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane4]    0x00b0  4       Number of times
+ *  the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane5]    0x00b8  4       Number of times
+ *  the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane6]    0x00c0  4       Number of times
+ *  the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane7]    0x00c8  4       Number of times
+ *  the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane8]    0x00d0  4       Number of times
+ *  the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane9]    0x00d8  4       Number of times
+ *  the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane10]   0x00e0  4       Number of times
+ *  the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane11]   0x00e8  4       Number of times
+ *  the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane12]   0x00f0  4       Number of times
+ *  the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane13]   0x00f8  4       Number of times
+ *  the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane14]   0x0100  4       Number of times
+ *  the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane15]   0x0108  4       Number of times
+ *  the posted data credits for upstream PCI writes were depleted
+ * @pic.wrcrdtarb_pd_crdt_depleted[vplane16]   0x0110  4       Number of times
+ *  the posted data credits for upstream PCI writes were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane0]   0x0118  4       Number of times
+ *  the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane1]   0x0120  4       Number of times
+ *  the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane2]   0x0128  4       Number of times
+ *  the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane3]   0x0130  4       Number of times
+ *  the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane4]   0x0138  4       Number of times
+ *  the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane5]   0x0140  4       Number of times
+ *  the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane6]   0x0148  4       Number of times
+ *  the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane7]   0x0150  4       Number of times
+ *  the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane8]   0x0158  4       Number of times
+ *  the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane9]   0x0160  4       Number of times
+ *  the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane10]  0x0168  4       Number of times
+ *  the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane11]  0x0170  4       Number of times
+ *  the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane12]  0x0178  4       Number of times
+ *  the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane13]  0x0180  4       Number of times
+ *  the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane14]  0x0188  4       Number of times
+ *  the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane15]  0x0190  4       Number of times
+ *  the non-posted header credits for upstream PCI reads were depleted
+ * @pic.rdcrdtarb_nph_crdt_depleted[vplane16]  0x0198  4       Number of times
+ *  the non-posted header credits for upstream PCI reads were depleted
+ * @pic.ini_rd_vpin_drop       0x01a0  4       Number of DMA reads initiated by
+ *  the adapter that were discarded because the VPATH instance number does
+ *  not match
+ * @pic.ini_wr_vpin_drop       0x01a4  4       Number of DMA writes initiated
+ *  by the adapter that were discarded because the VPATH instance number
+ *  does not match
+ * @pic.genstats_count0        0x01a8  4       Configurable statistic #1. Refer
+ *  to the GENSTATS0_CFG for information on configuring this statistic
+ * @pic.genstats_count1        0x01ac  4       Configurable statistic #2. Refer
+ *  to the GENSTATS1_CFG for information on configuring this statistic
+ * @pic.genstats_count2        0x01b0  4       Configurable statistic #3. Refer
+ *  to the GENSTATS2_CFG for information on configuring this statistic
+ * @pic.genstats_count3        0x01b4  4       Configurable statistic #4. Refer
+ *  to the GENSTATS3_CFG for information on configuring this statistic
+ * @pic.genstats_count4        0x01b8  4       Configurable statistic #5. Refer
+ *  to the GENSTATS4_CFG for information on configuring this statistic
+ * @pic.genstats_count5        0x01c0  4       Configurable statistic #6. Refer
+ *  to the GENSTATS5_CFG for information on configuring this statistic
+ * @pci.rstdrop_cpl    0x01c8  4
+ * @pci.rstdrop_msg    0x01cc  4
+ * @pci.rstdrop_client1        0x01d0  4
+ * @pci.rstdrop_client0        0x01d4  4
+ * @pci.rstdrop_client2        0x01d8  4
+ * @pci.depl_cplh[vplane0]     0x01e2  2       Number of times completion
+ *  header credits were depleted
+ * @pci.depl_nph[vplane0]      0x01e4  2       Number of times non posted
+ *  header credits were depleted
+ * @pci.depl_ph[vplane0]       0x01e6  2       Number of times the posted
+ *  header credits were depleted
+ * @pci.depl_cplh[vplane1]     0x01ea  2
+ * @pci.depl_nph[vplane1]      0x01ec  2
+ * @pci.depl_ph[vplane1]       0x01ee  2
+ * @pci.depl_cplh[vplane2]     0x01f2  2
+ * @pci.depl_nph[vplane2]      0x01f4  2
+ * @pci.depl_ph[vplane2]       0x01f6  2
+ * @pci.depl_cplh[vplane3]     0x01fa  2
+ * @pci.depl_nph[vplane3]      0x01fc  2
+ * @pci.depl_ph[vplane3]       0x01fe  2
+ * @pci.depl_cplh[vplane4]     0x0202  2
+ * @pci.depl_nph[vplane4]      0x0204  2
+ * @pci.depl_ph[vplane4]       0x0206  2
+ * @pci.depl_cplh[vplane5]     0x020a  2
+ * @pci.depl_nph[vplane5]      0x020c  2
+ * @pci.depl_ph[vplane5]       0x020e  2
+ * @pci.depl_cplh[vplane6]     0x0212  2
+ * @pci.depl_nph[vplane6]      0x0214  2
+ * @pci.depl_ph[vplane6]       0x0216  2
+ * @pci.depl_cplh[vplane7]     0x021a  2
+ * @pci.depl_nph[vplane7]      0x021c  2
+ * @pci.depl_ph[vplane7]       0x021e  2
+ * @pci.depl_cplh[vplane8]     0x0222  2
+ * @pci.depl_nph[vplane8]      0x0224  2
+ * @pci.depl_ph[vplane8]       0x0226  2
+ * @pci.depl_cplh[vplane9]     0x022a  2
+ * @pci.depl_nph[vplane9]      0x022c  2
+ * @pci.depl_ph[vplane9]       0x022e  2
+ * @pci.depl_cplh[vplane10]    0x0232  2
+ * @pci.depl_nph[vplane10]     0x0234  2
+ * @pci.depl_ph[vplane10]      0x0236  2
+ * @pci.depl_cplh[vplane11]    0x023a  2
+ * @pci.depl_nph[vplane11]     0x023c  2
+ * @pci.depl_ph[vplane11]      0x023e  2
+ * @pci.depl_cplh[vplane12]    0x0242  2
+ * @pci.depl_nph[vplane12]     0x0244  2
+ * @pci.depl_ph[vplane12]      0x0246  2
+ * @pci.depl_cplh[vplane13]    0x024a  2
+ * @pci.depl_nph[vplane13]     0x024c  2
+ * @pci.depl_ph[vplane13]      0x024e  2
+ * @pci.depl_cplh[vplane14]    0x0252  2
+ * @pci.depl_nph[vplane14]     0x0254  2
+ * @pci.depl_ph[vplane14]      0x0256  2
+ * @pci.depl_cplh[vplane15]    0x025a  2
+ * @pci.depl_nph[vplane15]     0x025c  2
+ * @pci.depl_ph[vplane15]      0x025e  2
+ * @pci.depl_cplh[vplane16]    0x0262  2
+ * @pci.depl_nph[vplane16]     0x0264  2
+ * @pci.depl_ph[vplane16]      0x0266  2
+ * @pci.depl_cpld[vplane0]     0x026a  2       Number of times completion data
+ *  credits were depleted
+ * @pci.depl_npd[vplane0]      0x026c  2       Number of times non posted data
+ *  credits were depleted
+ * @pci.depl_pd[vplane0]       0x026e  2       Number of times the posted data
+ *  credits were depleted
+ * @pci.depl_cpld[vplane1]     0x0272  2
+ * @pci.depl_npd[vplane1]      0x0274  2
+ * @pci.depl_pd[vplane1]       0x0276  2
+ * @pci.depl_cpld[vplane2]     0x027a  2
+ * @pci.depl_npd[vplane2]      0x027c  2
+ * @pci.depl_pd[vplane2]       0x027e  2
+ * @pci.depl_cpld[vplane3]     0x0282  2
+ * @pci.depl_npd[vplane3]      0x0284  2
+ * @pci.depl_pd[vplane3]       0x0286  2
+ * @pci.depl_cpld[vplane4]     0x028a  2
+ * @pci.depl_npd[vplane4]      0x028c  2
+ * @pci.depl_pd[vplane4]       0x028e  2
+ * @pci.depl_cpld[vplane5]     0x0292  2
+ * @pci.depl_npd[vplane5]      0x0294  2
+ * @pci.depl_pd[vplane5]       0x0296  2
+ * @pci.depl_cpld[vplane6]     0x029a  2
+ * @pci.depl_npd[vplane6]      0x029c  2
+ * @pci.depl_pd[vplane6]       0x029e  2
+ * @pci.depl_cpld[vplane7]     0x02a2  2
+ * @pci.depl_npd[vplane7]      0x02a4  2
+ * @pci.depl_pd[vplane7]       0x02a6  2
+ * @pci.depl_cpld[vplane8]     0x02aa  2
+ * @pci.depl_npd[vplane8]      0x02ac  2
+ * @pci.depl_pd[vplane8]       0x02ae  2
+ * @pci.depl_cpld[vplane9]     0x02b2  2
+ * @pci.depl_npd[vplane9]      0x02b4  2
+ * @pci.depl_pd[vplane9]       0x02b6  2
+ * @pci.depl_cpld[vplane10]    0x02ba  2
+ * @pci.depl_npd[vplane10]     0x02bc  2
+ * @pci.depl_pd[vplane10]      0x02be  2
+ * @pci.depl_cpld[vplane11]    0x02c2  2
+ * @pci.depl_npd[vplane11]     0x02c4  2
+ * @pci.depl_pd[vplane11]      0x02c6  2
+ * @pci.depl_cpld[vplane12]    0x02ca  2
+ * @pci.depl_npd[vplane12]     0x02cc  2
+ * @pci.depl_pd[vplane12]      0x02ce  2
+ * @pci.depl_cpld[vplane13]    0x02d2  2
+ * @pci.depl_npd[vplane13]     0x02d4  2
+ * @pci.depl_pd[vplane13]      0x02d6  2
+ * @pci.depl_cpld[vplane14]    0x02da  2
+ * @pci.depl_npd[vplane14]     0x02dc  2
+ * @pci.depl_pd[vplane14]      0x02de  2
+ * @pci.depl_cpld[vplane15]    0x02e2  2
+ * @pci.depl_npd[vplane15]     0x02e4  2
+ * @pci.depl_pd[vplane15]      0x02e6  2
+ * @pci.depl_cpld[vplane16]    0x02ea  2
+ * @pci.depl_npd[vplane16]     0x02ec  2
+ * @pci.depl_pd[vplane16]      0x02ee  2
+ * @xgmac_port[3];
+ * @xgmac_aggr[2];
+ * @xgmac.global_prog_event_gnum0      0x0ae0  8       Programmable statistic.
+ *  Increments when internal logic detects a certain event. See register
+ *  XMAC_STATS_GLOBAL_CFG.EVENT_GNUM0_CFG for more information.
+ * @xgmac.global_prog_event_gnum1      0x0ae8  8       Programmable statistic.
+ *  Increments when internal logic detects a certain event. See register
+ *  XMAC_STATS_GLOBAL_CFG.EVENT_GNUM1_CFG for more information.
+ * @xgmac.orp_lro_events       0x0af8  8
+ * @xgmac.orp_bs_events        0x0b00  8
+ * @xgmac.orp_iwarp_events     0x0b08  8
+ * @xgmac.tx_permitted_frms    0x0b14  4
+ * @xgmac.port2_tx_any_frms    0x0b1d  1
+ * @xgmac.port1_tx_any_frms    0x0b1e  1
+ * @xgmac.port0_tx_any_frms    0x0b1f  1
+ * @xgmac.port2_rx_any_frms    0x0b25  1
+ * @xgmac.port1_rx_any_frms    0x0b26  1
+ * @xgmac.port0_rx_any_frms    0x0b27  1
+ *
+ * Titan mrpcim hardware statistics.
+ */
+struct vxge_hw_device_stats_mrpcim_info {
+/*0x0000*/     u32     pic_ini_rd_drop;
+/*0x0004*/     u32     pic_ini_wr_drop;
+/*0x0008*/     struct {
+       /*0x0000*/      u32     pic_wrcrdtarb_ph_crdt_depleted;
+       /*0x0004*/      u32     unused1;
+               } pic_wrcrdtarb_ph_crdt_depleted_vplane[17];
+/*0x0090*/     struct {
+       /*0x0000*/      u32     pic_wrcrdtarb_pd_crdt_depleted;
+       /*0x0004*/      u32     unused2;
+               } pic_wrcrdtarb_pd_crdt_depleted_vplane[17];
+/*0x0118*/     struct {
+       /*0x0000*/      u32     pic_rdcrdtarb_nph_crdt_depleted;
+       /*0x0004*/      u32     unused3;
+               } pic_rdcrdtarb_nph_crdt_depleted_vplane[17];
+/*0x01a0*/     u32     pic_ini_rd_vpin_drop;
+/*0x01a4*/     u32     pic_ini_wr_vpin_drop;
+/*0x01a8*/     u32     pic_genstats_count0;
+/*0x01ac*/     u32     pic_genstats_count1;
+/*0x01b0*/     u32     pic_genstats_count2;
+/*0x01b4*/     u32     pic_genstats_count3;
+/*0x01b8*/     u32     pic_genstats_count4;
+/*0x01bc*/     u32     unused4;
+/*0x01c0*/     u32     pic_genstats_count5;
+/*0x01c4*/     u32     unused5;
+/*0x01c8*/     u32     pci_rstdrop_cpl;
+/*0x01cc*/     u32     pci_rstdrop_msg;
+/*0x01d0*/     u32     pci_rstdrop_client1;
+/*0x01d4*/     u32     pci_rstdrop_client0;
+/*0x01d8*/     u32     pci_rstdrop_client2;
+/*0x01dc*/     u32     unused6;
+/*0x01e0*/     struct {
+       /*0x0000*/      u16     unused7;
+       /*0x0002*/      u16     pci_depl_cplh;
+       /*0x0004*/      u16     pci_depl_nph;
+       /*0x0006*/      u16     pci_depl_ph;
+               } pci_depl_h_vplane[17];
+/*0x0268*/     struct {
+       /*0x0000*/      u16     unused8;
+       /*0x0002*/      u16     pci_depl_cpld;
+       /*0x0004*/      u16     pci_depl_npd;
+       /*0x0006*/      u16     pci_depl_pd;
+               } pci_depl_d_vplane[17];
+/*0x02f0*/     struct vxge_hw_xmac_port_stats xgmac_port[3];
+/*0x0a10*/     struct vxge_hw_xmac_aggr_stats xgmac_aggr[2];
+/*0x0ae0*/     u64     xgmac_global_prog_event_gnum0;
+/*0x0ae8*/     u64     xgmac_global_prog_event_gnum1;
+/*0x0af0*/     u64     unused7;
+/*0x0af8*/     u64     unused8;
+/*0x0b00*/     u64     unused9;
+/*0x0b08*/     u64     unused10;
+/*0x0b10*/     u32     unused11;
+/*0x0b14*/     u32     xgmac_tx_permitted_frms;
+/*0x0b18*/     u32     unused12;
+/*0x0b1c*/     u8      unused13;
+/*0x0b1d*/     u8      xgmac_port2_tx_any_frms;
+/*0x0b1e*/     u8      xgmac_port1_tx_any_frms;
+/*0x0b1f*/     u8      xgmac_port0_tx_any_frms;
+/*0x0b20*/     u32     unused14;
+/*0x0b24*/     u8      unused15;
+/*0x0b25*/     u8      xgmac_port2_rx_any_frms;
+/*0x0b26*/     u8      xgmac_port1_rx_any_frms;
+/*0x0b27*/     u8      xgmac_port0_rx_any_frms;
+} __packed;
+
+/**
+ * struct vxge_hw_device_stats_hw_info - Titan hardware statistics.
+ * @vpath_info: VPath statistics
+ * @vpath_info_sav: Vpath statistics saved
+ *
+ * Titan hardware statistics.
+ */
+struct vxge_hw_device_stats_hw_info {
+       struct vxge_hw_vpath_stats_hw_info
+               *vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
+       struct vxge_hw_vpath_stats_hw_info
+               vpath_info_sav[VXGE_HW_MAX_VIRTUAL_PATHS];
+};
+
+/**
+ * struct vxge_hw_vpath_stats_sw_common_info - HW common
+ * statistics for queues.
+ * @full_cnt: Number of times the queue was full
+ * @usage_cnt: usage count.
+ * @usage_max: Maximum usage
+ * @reserve_free_swaps_cnt: Reserve/free swap counter. Internal usage.
+ * @total_compl_cnt: Total descriptor completion count.
+ *
+ * Hw queue counters
+ * See also: struct vxge_hw_vpath_stats_sw_fifo_info{},
+ * struct vxge_hw_vpath_stats_sw_ring_info{},
+ */
+struct vxge_hw_vpath_stats_sw_common_info {
+       u32     full_cnt;
+       u32     usage_cnt;
+       u32     usage_max;
+       u32     reserve_free_swaps_cnt;
+       u32 total_compl_cnt;
+};
+
+/**
+ * struct vxge_hw_vpath_stats_sw_fifo_info - HW fifo statistics
+ * @common_stats: Common counters for all queues
+ * @total_posts: Total number of postings on the queue.
+ * @total_buffers: Total number of buffers posted.
+ * @txd_t_code_err_cnt: Array of transmit transfer codes. The position
+ * (index) in this array reflects the transfer code type, for instance
+ * 0xA - "loss of link".
+ * Value txd_t_code_err_cnt[i] reflects the
+ * number of times the corresponding transfer code was encountered.
+ *
+ * HW fifo counters
+ * See also: struct vxge_hw_vpath_stats_sw_common_info{},
+ * struct vxge_hw_vpath_stats_sw_ring_info{},
+ */
+struct vxge_hw_vpath_stats_sw_fifo_info {
+       struct vxge_hw_vpath_stats_sw_common_info common_stats;
+       u32 total_posts;
+       u32 total_buffers;
+       u32 txd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
+};
+
+/**
+ * struct vxge_hw_vpath_stats_sw_ring_info - HW ring statistics
+ * @common_stats: Common counters for all queues
+ * @rxd_t_code_err_cnt: Array of receive transfer codes. The position
+ *             (index) in this array reflects the transfer code type,
+ *             for instance
+ *             0x7 - for "invalid receive buffer size", or 0x8 - for ECC.
+ *             Value rxd_t_code_err_cnt[i] reflects the
+ *             number of times the corresponding transfer code was encountered.
+ *
+ * HW ring counters
+ * See also: struct vxge_hw_vpath_stats_sw_common_info{},
+ * struct vxge_hw_vpath_stats_sw_fifo_info{},
+ */
+struct vxge_hw_vpath_stats_sw_ring_info {
+       struct vxge_hw_vpath_stats_sw_common_info common_stats;
+       u32 rxd_t_code_err_cnt[VXGE_HW_DTR_MAX_T_CODE];
+
+};
+
+/**
+ * struct vxge_hw_vpath_stats_sw_err - HW vpath error statistics
+ * @unknown_alarms:
+ * @network_sustained_fault:
+ * @network_sustained_ok:
+ * @kdfcctl_fifo0_overwrite:
+ * @kdfcctl_fifo0_poison:
+ * @kdfcctl_fifo0_dma_error:
+ * @dblgen_fifo0_overflow:
+ * @statsb_pif_chain_error:
+ * @statsb_drop_timeout:
+ * @target_illegal_access:
+ * @ini_serr_det:
+ * @prc_ring_bumps:
+ * @prc_rxdcm_sc_err:
+ * @prc_rxdcm_sc_abort:
+ * @prc_quanta_size_err:
+ *
+ * HW vpath error statistics
+ */
+struct vxge_hw_vpath_stats_sw_err {
+       u32     unknown_alarms;
+       u32     network_sustained_fault;
+       u32     network_sustained_ok;
+       u32     kdfcctl_fifo0_overwrite;
+       u32     kdfcctl_fifo0_poison;
+       u32     kdfcctl_fifo0_dma_error;
+       u32     dblgen_fifo0_overflow;
+       u32     statsb_pif_chain_error;
+       u32     statsb_drop_timeout;
+       u32     target_illegal_access;
+       u32     ini_serr_det;
+       u32     prc_ring_bumps;
+       u32     prc_rxdcm_sc_err;
+       u32     prc_rxdcm_sc_abort;
+       u32     prc_quanta_size_err;
+};
+
+/**
+ * struct vxge_hw_vpath_stats_sw_info - HW vpath sw statistics
+ * @soft_reset_cnt: Number of times soft reset is done on this vpath.
+ * @error_stats: error counters for the vpath
+ * @ring_stats: counters for ring belonging to the vpath
+ * @fifo_stats: counters for fifo belonging to the vpath
+ *
+ * HW vpath sw statistics
+ * See also: struct vxge_hw_device_info{} }.
+ */
+struct vxge_hw_vpath_stats_sw_info {
+       u32    soft_reset_cnt;
+       struct vxge_hw_vpath_stats_sw_err       error_stats;
+       struct vxge_hw_vpath_stats_sw_ring_info ring_stats;
+       struct vxge_hw_vpath_stats_sw_fifo_info fifo_stats;
+};
+
+/**
+ * struct vxge_hw_device_stats_sw_info - HW own per-device statistics.
+ *
+ * @not_traffic_intr_cnt: Number of times the host was interrupted
+ *                        without new completions.
+ *                        "Non-traffic interrupt counter".
+ * @traffic_intr_cnt: Number of traffic interrupts for the device.
+ * @total_intr_cnt: Total number of traffic interrupts for the device.
+ *                  @total_intr_cnt == @traffic_intr_cnt +
+ *                              @not_traffic_intr_cnt
+ * @soft_reset_cnt: Number of times soft reset is done on this device.
+ * @vpath_info: please see struct vxge_hw_vpath_stats_sw_info{}
+ * HW per-device statistics.
+ */
+struct vxge_hw_device_stats_sw_info {
+       u32     not_traffic_intr_cnt;
+       u32     traffic_intr_cnt;
+       u32     total_intr_cnt;
+       u32     soft_reset_cnt;
+       struct vxge_hw_vpath_stats_sw_info
+               vpath_info[VXGE_HW_MAX_VIRTUAL_PATHS];
+};
+
+/**
+ * struct vxge_hw_device_stats_sw_err - HW device error statistics.
+ * @vpath_alarms: Number of vpath alarms
+ *
+ * HW Device error stats
+ */
+struct vxge_hw_device_stats_sw_err {
+       u32     vpath_alarms;
+};
+
+/**
+ * struct vxge_hw_device_stats - Contains HW per-device statistics,
+ * including hw.
+ * @devh: HW device handle.
+ * @dma_addr: DMA addres of the %hw_info. Given to device to fill-in the stats.
+ * @hw_info_dmah: DMA handle used to map hw statistics onto the device memory
+ *                space.
+ * @hw_info_dma_acch: One more DMA handle used subsequently to free the
+ *                    DMA object. Note that this and the previous handle have
+ *                    physical meaning for Solaris; on Windows and Linux the
+ *                    corresponding value will be simply pointer to PCI device.
+ *
+ * @hw_dev_info_stats: Titan statistics maintained by the hardware.
+ * @sw_dev_info_stats: HW's "soft" device informational statistics, e.g. number
+ *                     of completions per interrupt.
+ * @sw_dev_err_stats: HW's "soft" device error statistics.
+ *
+ * Structure-container of HW per-device statistics. Note that per-channel
+ * statistics are kept in separate structures under HW's fifo and ring
+ * channels.
+ */
+struct vxge_hw_device_stats {
+       /* handles */
+       struct __vxge_hw_device *devh;
+
+       /* HW device hardware statistics */
+       struct vxge_hw_device_stats_hw_info     hw_dev_info_stats;
+
+       /* HW device "soft" stats */
+       struct vxge_hw_device_stats_sw_err   sw_dev_err_stats;
+       struct vxge_hw_device_stats_sw_info  sw_dev_info_stats;
+
+};
+
+enum vxge_hw_status vxge_hw_device_hw_stats_enable(
+                       struct __vxge_hw_device *devh);
+
+enum vxge_hw_status vxge_hw_device_stats_get(
+                       struct __vxge_hw_device *devh,
+                       struct vxge_hw_device_stats_hw_info *hw_stats);
+
+enum vxge_hw_status vxge_hw_driver_stats_get(
+                       struct __vxge_hw_device *devh,
+                       struct vxge_hw_device_stats_sw_info *sw_stats);
+
+enum vxge_hw_status vxge_hw_mrpcim_stats_enable(struct __vxge_hw_device *devh);
+
+enum vxge_hw_status vxge_hw_mrpcim_stats_disable(struct __vxge_hw_device *devh);
+
+enum vxge_hw_status
+vxge_hw_mrpcim_stats_access(
+       struct __vxge_hw_device *devh,
+       u32 operation,
+       u32 location,
+       u32 offset,
+       u64 *stat);
+
+enum vxge_hw_status
+vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *devh, u32 port,
+                                  struct vxge_hw_xmac_aggr_stats *aggr_stats);
+
+enum vxge_hw_status
+vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *devh, u32 port,
+                                  struct vxge_hw_xmac_port_stats *port_stats);
+
+enum vxge_hw_status
+vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *devh,
+                             struct vxge_hw_xmac_stats *xmac_stats);
+
+/**
+ * enum enum vxge_hw_mgmt_reg_type - Register types.
+ *
+ * @vxge_hw_mgmt_reg_type_legacy: Legacy registers
+ * @vxge_hw_mgmt_reg_type_toc: TOC Registers
+ * @vxge_hw_mgmt_reg_type_common: Common Registers
+ * @vxge_hw_mgmt_reg_type_mrpcim: mrpcim registers
+ * @vxge_hw_mgmt_reg_type_srpcim: srpcim registers
+ * @vxge_hw_mgmt_reg_type_vpmgmt: vpath management registers
+ * @vxge_hw_mgmt_reg_type_vpath: vpath registers
+ *
+ * Register type enumaration
+ */
+enum vxge_hw_mgmt_reg_type {
+       vxge_hw_mgmt_reg_type_legacy = 0,
+       vxge_hw_mgmt_reg_type_toc = 1,
+       vxge_hw_mgmt_reg_type_common = 2,
+       vxge_hw_mgmt_reg_type_mrpcim = 3,
+       vxge_hw_mgmt_reg_type_srpcim = 4,
+       vxge_hw_mgmt_reg_type_vpmgmt = 5,
+       vxge_hw_mgmt_reg_type_vpath = 6
+};
+
+enum vxge_hw_status
+vxge_hw_mgmt_reg_read(struct __vxge_hw_device *devh,
+                     enum vxge_hw_mgmt_reg_type type,
+                     u32 index,
+                     u32 offset,
+                     u64 *value);
+
+enum vxge_hw_status
+vxge_hw_mgmt_reg_write(struct __vxge_hw_device *devh,
+                     enum vxge_hw_mgmt_reg_type type,
+                     u32 index,
+                     u32 offset,
+                     u64 value);
+
+/**
+ * enum enum vxge_hw_rxd_state - Descriptor (RXD) state.
+ * @VXGE_HW_RXD_STATE_NONE: Invalid state.
+ * @VXGE_HW_RXD_STATE_AVAIL: Descriptor is available for reservation.
+ * @VXGE_HW_RXD_STATE_POSTED: Descriptor is posted for processing by the
+ * device.
+ * @VXGE_HW_RXD_STATE_FREED: Descriptor is free and can be reused for
+ * filling-in and posting later.
+ *
+ * Titan/HW descriptor states.
+ *
+ */
+enum vxge_hw_rxd_state {
+       VXGE_HW_RXD_STATE_NONE          = 0,
+       VXGE_HW_RXD_STATE_AVAIL         = 1,
+       VXGE_HW_RXD_STATE_POSTED        = 2,
+       VXGE_HW_RXD_STATE_FREED         = 3
+};
+
+/**
+ * struct vxge_hw_ring_rxd_info - Extended information associated with a
+ * completed ring descriptor.
+ * @syn_flag: SYN flag
+ * @is_icmp: Is ICMP
+ * @fast_path_eligible: Fast Path Eligible flag
+ * @l3_cksum: in L3 checksum is valid
+ * @l3_cksum: Result of IP checksum check (by Titan hardware).
+ *            This field containing VXGE_HW_L3_CKSUM_OK would mean that
+ *            the checksum is correct, otherwise - the datagram is
+ *            corrupted.
+ * @l4_cksum: in L4 checksum is valid
+ * @l4_cksum: Result of TCP/UDP checksum check (by Titan hardware).
+ *            This field containing VXGE_HW_L4_CKSUM_OK would mean that
+ *            the checksum is correct. Otherwise - the packet is
+ *            corrupted.
+ * @frame: Zero or more of enum vxge_hw_frame_type flags.
+ *             See enum vxge_hw_frame_type{}.
+ * @proto: zero or more of enum vxge_hw_frame_proto flags.  Reporting bits for
+ *            various higher-layer protocols, including (but note restricted to)
+ *            TCP and UDP. See enum vxge_hw_frame_proto{}.
+ * @is_vlan: If vlan tag is valid
+ * @vlan: VLAN tag extracted from the received frame.
+ * @rth_bucket: RTH bucket
+ * @rth_it_hit: Set, If RTH hash value calculated by the Titan hardware
+ *             has a matching entry in the Indirection table.
+ * @rth_spdm_hit: Set, If RTH hash value calculated by the Titan hardware
+ *             has a matching entry in the Socket Pair Direct Match table.
+ * @rth_hash_type: RTH hash code of the function used to calculate the hash.
+ * @rth_value: Receive Traffic Hashing(RTH) hash value. Produced by Titan
+ *             hardware if RTH is enabled.
+ */
+struct vxge_hw_ring_rxd_info {
+       u32     syn_flag;
+       u32     is_icmp;
+       u32     fast_path_eligible;
+       u32     l3_cksum_valid;
+       u32     l3_cksum;
+       u32     l4_cksum_valid;
+       u32     l4_cksum;
+       u32     frame;
+       u32     proto;
+       u32     is_vlan;
+       u32     vlan;
+       u32     rth_bucket;
+       u32     rth_it_hit;
+       u32     rth_spdm_hit;
+       u32     rth_hash_type;
+       u32     rth_value;
+};
+
+/**
+ * enum enum vxge_hw_ring_hash_type - RTH hash types
+ * @VXGE_HW_RING_HASH_TYPE_NONE: No Hash
+ * @VXGE_HW_RING_HASH_TYPE_TCP_IPV4: TCP IPv4
+ * @VXGE_HW_RING_HASH_TYPE_UDP_IPV4: UDP IPv4
+ * @VXGE_HW_RING_HASH_TYPE_IPV4: IPv4
+ * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6: TCP IPv6
+ * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6: UDP IPv6
+ * @VXGE_HW_RING_HASH_TYPE_IPV6: IPv6
+ * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX: TCP IPv6 extension
+ * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX: UDP IPv6 extension
+ * @VXGE_HW_RING_HASH_TYPE_IPV6_EX: IPv6 extension
+ *
+ * RTH hash types
+ */
+enum vxge_hw_ring_hash_type {
+       VXGE_HW_RING_HASH_TYPE_NONE                     = 0x0,
+       VXGE_HW_RING_HASH_TYPE_TCP_IPV4         = 0x1,
+       VXGE_HW_RING_HASH_TYPE_UDP_IPV4         = 0x2,
+       VXGE_HW_RING_HASH_TYPE_IPV4                     = 0x3,
+       VXGE_HW_RING_HASH_TYPE_TCP_IPV6         = 0x4,
+       VXGE_HW_RING_HASH_TYPE_UDP_IPV6         = 0x5,
+       VXGE_HW_RING_HASH_TYPE_IPV6                     = 0x6,
+       VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX      = 0x7,
+       VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX      = 0x8,
+       VXGE_HW_RING_HASH_TYPE_IPV6_EX          = 0x9
+};
+
+enum vxge_hw_status vxge_hw_ring_rxd_reserve(
+       struct __vxge_hw_ring *ring_handle,
+       void **rxdh);
+
+void
+vxge_hw_ring_rxd_pre_post(
+       struct __vxge_hw_ring *ring_handle,
+       void *rxdh);
+
+void
+vxge_hw_ring_rxd_post_post(
+       struct __vxge_hw_ring *ring_handle,
+       void *rxdh);
+
+enum vxge_hw_status
+vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle, u16 min_flag);
+
+void
+vxge_hw_ring_rxd_post_post_wmb(
+       struct __vxge_hw_ring *ring_handle,
+       void *rxdh);
+
+void vxge_hw_ring_rxd_post(
+       struct __vxge_hw_ring *ring_handle,
+       void *rxdh);
+
+enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
+       struct __vxge_hw_ring *ring_handle,
+       void **rxdh,
+       u8 *t_code);
+
+enum vxge_hw_status vxge_hw_ring_handle_tcode(
+       struct __vxge_hw_ring *ring_handle,
+       void *rxdh,
+       u8 t_code);
+
+void vxge_hw_ring_rxd_free(
+       struct __vxge_hw_ring *ring_handle,
+       void *rxdh);
+
+/**
+ * enum enum vxge_hw_frame_proto - Higher-layer ethernet protocols.
+ * @VXGE_HW_FRAME_PROTO_VLAN_TAGGED: VLAN.
+ * @VXGE_HW_FRAME_PROTO_IPV4: IPv4.
+ * @VXGE_HW_FRAME_PROTO_IPV6: IPv6.
+ * @VXGE_HW_FRAME_PROTO_IP_FRAG: IP fragmented.
+ * @VXGE_HW_FRAME_PROTO_TCP: TCP.
+ * @VXGE_HW_FRAME_PROTO_UDP: UDP.
+ * @VXGE_HW_FRAME_PROTO_TCP_OR_UDP: TCP or UDP.
+ *
+ * Higher layer ethernet protocols and options.
+ */
+enum vxge_hw_frame_proto {
+       VXGE_HW_FRAME_PROTO_VLAN_TAGGED = 0x80,
+       VXGE_HW_FRAME_PROTO_IPV4                = 0x10,
+       VXGE_HW_FRAME_PROTO_IPV6                = 0x08,
+       VXGE_HW_FRAME_PROTO_IP_FRAG             = 0x04,
+       VXGE_HW_FRAME_PROTO_TCP                 = 0x02,
+       VXGE_HW_FRAME_PROTO_UDP                 = 0x01,
+       VXGE_HW_FRAME_PROTO_TCP_OR_UDP  = (VXGE_HW_FRAME_PROTO_TCP | \
+                                                  VXGE_HW_FRAME_PROTO_UDP)
+};
+
+/**
+ * enum enum vxge_hw_fifo_gather_code - Gather codes used in fifo TxD
+ * @VXGE_HW_FIFO_GATHER_CODE_FIRST: First TxDL
+ * @VXGE_HW_FIFO_GATHER_CODE_MIDDLE: Middle TxDL
+ * @VXGE_HW_FIFO_GATHER_CODE_LAST: Last TxDL
+ * @VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST: First and Last TxDL.
+ *
+ * These gather codes are used to indicate the position of a TxD in a TxD list
+ */
+enum vxge_hw_fifo_gather_code {
+       VXGE_HW_FIFO_GATHER_CODE_FIRST          = 0x2,
+       VXGE_HW_FIFO_GATHER_CODE_MIDDLE         = 0x0,
+       VXGE_HW_FIFO_GATHER_CODE_LAST           = 0x1,
+       VXGE_HW_FIFO_GATHER_CODE_FIRST_LAST     = 0x3
+};
+
+/**
+ * enum enum vxge_hw_fifo_tcode - tcodes used in fifo
+ * @VXGE_HW_FIFO_T_CODE_OK: Transfer OK
+ * @VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT: PCI read transaction (either TxD or
+ *             frame data) returned with corrupt data.
+ * @VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL:PCI read transaction was returned
+ *             with no data.
+ * @VXGE_HW_FIFO_T_CODE_INVALID_MSS: The host attempted to send either a
+ *             frame or LSO MSS that was too long (>9800B).
+ * @VXGE_HW_FIFO_T_CODE_LSO_ERROR: Error detected during TCP/UDP Large Send
+       *              Offload operation, due to improper header template,
+       *              unsupported protocol, etc.
+ * @VXGE_HW_FIFO_T_CODE_UNUSED: Unused
+ * @VXGE_HW_FIFO_T_CODE_MULTI_ERROR: Set to 1 by the adapter if multiple
+ *             data buffer transfer errors are encountered (see below).
+ *             Otherwise it is set to 0.
+ *
+ * These tcodes are returned in various API for TxD status
+ */
+enum vxge_hw_fifo_tcode {
+       VXGE_HW_FIFO_T_CODE_OK                  = 0x0,
+       VXGE_HW_FIFO_T_CODE_PCI_READ_CORRUPT    = 0x1,
+       VXGE_HW_FIFO_T_CODE_PCI_READ_FAIL       = 0x2,
+       VXGE_HW_FIFO_T_CODE_INVALID_MSS         = 0x3,
+       VXGE_HW_FIFO_T_CODE_LSO_ERROR           = 0x4,
+       VXGE_HW_FIFO_T_CODE_UNUSED              = 0x7,
+       VXGE_HW_FIFO_T_CODE_MULTI_ERROR         = 0x8
+};
+
+enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
+       struct __vxge_hw_fifo *fifoh,
+       void **txdlh,
+       void **txdl_priv);
+
+void vxge_hw_fifo_txdl_buffer_set(
+                       struct __vxge_hw_fifo *fifo_handle,
+                       void *txdlh,
+                       u32 frag_idx,
+                       dma_addr_t dma_pointer,
+                       u32 size);
+
+void vxge_hw_fifo_txdl_post(
+                       struct __vxge_hw_fifo *fifo_handle,
+                       void *txdlh);
+
+u32 vxge_hw_fifo_free_txdl_count_get(
+                       struct __vxge_hw_fifo *fifo_handle);
+
+enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
+       struct __vxge_hw_fifo *fifoh,
+       void **txdlh,
+       enum vxge_hw_fifo_tcode *t_code);
+
+enum vxge_hw_status vxge_hw_fifo_handle_tcode(
+       struct __vxge_hw_fifo *fifoh,
+       void *txdlh,
+       enum vxge_hw_fifo_tcode t_code);
+
+void vxge_hw_fifo_txdl_free(
+       struct __vxge_hw_fifo *fifoh,
+       void *txdlh);
+
+/*
+ * Device
+ */
+
+#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8)
+#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET               (VXGE_HW_BLOCK_SIZE-16)
+#define VXGE_HW_RING_MIN_BUFF_ALLOCATION               64
+
+/*
+ * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data.
+ * @dma_addr: DMA (mapped) address of _this_ descriptor.
+ * @dma_handle: DMA handle used to map the descriptor onto device.
+ * @dma_offset: Descriptor's offset in the memory block. HW allocates
+ *              descriptors in memory blocks of %VXGE_HW_BLOCK_SIZE
+ *              bytes. Each memblock is contiguous DMA-able memory. Each
+ *              memblock contains 1 or more 4KB RxD blocks visible to the
+ *              Titan hardware.
+ * @dma_object: DMA address and handle of the memory block that contains
+ *              the descriptor. This member is used only in the "checked"
+ *              version of the HW (to enforce certain assertions);
+ *              otherwise it gets compiled out.
+ * @allocated: True if the descriptor is reserved, 0 otherwise. Internal usage.
+ *
+ * Per-receive decsriptor HW-private data. HW uses the space to keep DMA
+ * information associated with the descriptor. Note that driver can ask HW
+ * to allocate additional per-descriptor space for its own (driver-specific)
+ * purposes.
+ */
+struct __vxge_hw_ring_rxd_priv {
+       dma_addr_t      dma_addr;
+       struct pci_dev *dma_handle;
+       ptrdiff_t       dma_offset;
+#ifdef VXGE_DEBUG_ASSERT
+       struct vxge_hw_mempool_dma      *dma_object;
+#endif
+};
+
+/* ========================= RING PRIVATE API ============================= */
+u64
+__vxge_hw_ring_first_block_address_get(
+       struct __vxge_hw_ring *ringh);
+
+enum vxge_hw_status
+__vxge_hw_ring_create(
+       struct __vxge_hw_vpath_handle *vpath_handle,
+       struct vxge_hw_ring_attr *attr);
+
+enum vxge_hw_status
+__vxge_hw_ring_abort(
+       struct __vxge_hw_ring *ringh);
+
+enum vxge_hw_status
+__vxge_hw_ring_reset(
+       struct __vxge_hw_ring *ringh);
+
+enum vxge_hw_status
+__vxge_hw_ring_delete(
+       struct __vxge_hw_vpath_handle *vpath_handle);
+
+/* ========================= FIFO PRIVATE API ============================= */
+
+struct vxge_hw_fifo_attr;
+
+enum vxge_hw_status
+__vxge_hw_fifo_create(
+       struct __vxge_hw_vpath_handle *vpath_handle,
+       struct vxge_hw_fifo_attr *attr);
+
+enum vxge_hw_status
+__vxge_hw_fifo_abort(
+       struct __vxge_hw_fifo *fifoh);
+
+enum vxge_hw_status
+__vxge_hw_fifo_reset(
+       struct __vxge_hw_fifo *ringh);
+
+enum vxge_hw_status
+__vxge_hw_fifo_delete(
+       struct __vxge_hw_vpath_handle *vpath_handle);
+
+struct vxge_hw_mempool_cbs {
+       void (*item_func_alloc)(
+                       struct vxge_hw_mempool *mempoolh,
+                       u32                     memblock_index,
+                       struct vxge_hw_mempool_dma      *dma_object,
+                       u32                     index,
+                       u32                     is_last);
+};
+
+void
+__vxge_hw_mempool_destroy(
+       struct vxge_hw_mempool *mempool);
+
+#define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath)                             \
+               ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
+
+enum vxge_hw_status
+__vxge_hw_vpath_rts_table_get(
+       struct __vxge_hw_vpath_handle *vpath_handle,
+       u32                     action,
+       u32                     rts_table,
+       u32                     offset,
+       u64                     *data1,
+       u64                     *data2);
+
+enum vxge_hw_status
+__vxge_hw_vpath_rts_table_set(
+       struct __vxge_hw_vpath_handle *vpath_handle,
+       u32                     action,
+       u32                     rts_table,
+       u32                     offset,
+       u64                     data1,
+       u64                     data2);
+
+enum vxge_hw_status
+__vxge_hw_vpath_reset(
+       struct __vxge_hw_device *devh,
+       u32                     vp_id);
+
+enum vxge_hw_status
+__vxge_hw_vpath_sw_reset(
+       struct __vxge_hw_device *devh,
+       u32                     vp_id);
+
+enum vxge_hw_status
+__vxge_hw_vpath_enable(
+       struct __vxge_hw_device *devh,
+       u32                     vp_id);
+
+void
+__vxge_hw_vpath_prc_configure(
+       struct __vxge_hw_device *devh,
+       u32                     vp_id);
+
+enum vxge_hw_status
+__vxge_hw_vpath_kdfc_configure(
+       struct __vxge_hw_device *devh,
+       u32                     vp_id);
+
+enum vxge_hw_status
+__vxge_hw_vpath_mac_configure(
+       struct __vxge_hw_device *devh,
+       u32                     vp_id);
+
+enum vxge_hw_status
+__vxge_hw_vpath_tim_configure(
+       struct __vxge_hw_device *devh,
+       u32                     vp_id);
+
+enum vxge_hw_status
+__vxge_hw_vpath_initialize(
+       struct __vxge_hw_device *devh,
+       u32                     vp_id);
+
+enum vxge_hw_status
+__vxge_hw_vp_initialize(
+       struct __vxge_hw_device *devh,
+       u32                     vp_id,
+       struct vxge_hw_vp_config        *config);
+
+void
+__vxge_hw_vp_terminate(
+       struct __vxge_hw_device *devh,
+       u32                     vp_id);
+
+enum vxge_hw_status
+__vxge_hw_vpath_alarm_process(
+       struct __vxge_hw_virtualpath    *vpath,
+       u32                     skip_alarms);
+
+void vxge_hw_device_intr_enable(
+       struct __vxge_hw_device *devh);
+
+u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *devh, u32 intr_mode);
+
+void vxge_hw_device_intr_disable(
+       struct __vxge_hw_device *devh);
+
+void vxge_hw_device_mask_all(
+       struct __vxge_hw_device *devh);
+
+void vxge_hw_device_unmask_all(
+       struct __vxge_hw_device *devh);
+
+enum vxge_hw_status vxge_hw_device_begin_irq(
+       struct __vxge_hw_device *devh,
+       u32 skip_alarms,
+       u64 *reason);
+
+void vxge_hw_device_clear_tx_rx(
+       struct __vxge_hw_device *devh);
+
+/*
+ *  Virtual Paths
+ */
+
+u32 vxge_hw_vpath_id(
+       struct __vxge_hw_vpath_handle *vpath_handle);
+
+enum vxge_hw_vpath_mac_addr_add_mode {
+       VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE = 0,
+       VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE = 1,
+       VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE = 2
+};
+
+enum vxge_hw_status
+vxge_hw_vpath_mac_addr_add(
+       struct __vxge_hw_vpath_handle *vpath_handle,
+       u8 (macaddr)[ETH_ALEN],
+       u8 (macaddr_mask)[ETH_ALEN],
+       enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode);
+
+enum vxge_hw_status
+vxge_hw_vpath_mac_addr_get(
+       struct __vxge_hw_vpath_handle *vpath_handle,
+       u8 (macaddr)[ETH_ALEN],
+       u8 (macaddr_mask)[ETH_ALEN]);
+
+enum vxge_hw_status
+vxge_hw_vpath_mac_addr_get_next(
+       struct __vxge_hw_vpath_handle *vpath_handle,
+       u8 (macaddr)[ETH_ALEN],
+       u8 (macaddr_mask)[ETH_ALEN]);
+
+enum vxge_hw_status
+vxge_hw_vpath_mac_addr_delete(
+       struct __vxge_hw_vpath_handle *vpath_handle,
+       u8 (macaddr)[ETH_ALEN],
+       u8 (macaddr_mask)[ETH_ALEN]);
+
+enum vxge_hw_status
+vxge_hw_vpath_vid_add(
+       struct __vxge_hw_vpath_handle *vpath_handle,
+       u64                     vid);
+
+enum vxge_hw_status
+vxge_hw_vpath_vid_get(
+       struct __vxge_hw_vpath_handle *vpath_handle,
+       u64                     *vid);
+
+enum vxge_hw_status
+vxge_hw_vpath_vid_get_next(
+       struct __vxge_hw_vpath_handle *vpath_handle,
+       u64                     *vid);
+
+enum vxge_hw_status
+vxge_hw_vpath_vid_delete(
+       struct __vxge_hw_vpath_handle *vpath_handle,
+       u64                     vid);
+
+enum vxge_hw_status
+vxge_hw_vpath_etype_add(
+       struct __vxge_hw_vpath_handle *vpath_handle,
+       u64                     etype);
+
+enum vxge_hw_status
+vxge_hw_vpath_etype_get(
+       struct __vxge_hw_vpath_handle *vpath_handle,
+       u64                     *etype);
+
+enum vxge_hw_status
+vxge_hw_vpath_etype_get_next(
+       struct __vxge_hw_vpath_handle *vpath_handle,
+       u64                     *etype);
+
+enum vxge_hw_status
+vxge_hw_vpath_etype_delete(
+       struct __vxge_hw_vpath_handle *vpath_handle,
+       u64                     etype);
+
+enum vxge_hw_status vxge_hw_vpath_promisc_enable(
+       struct __vxge_hw_vpath_handle *vpath_handle);
+
+enum vxge_hw_status vxge_hw_vpath_promisc_disable(
+       struct __vxge_hw_vpath_handle *vpath_handle);
+
+enum vxge_hw_status vxge_hw_vpath_bcast_enable(
+       struct __vxge_hw_vpath_handle *vpath_handle);
+
+enum vxge_hw_status vxge_hw_vpath_mcast_enable(
+       struct __vxge_hw_vpath_handle *vpath_handle);
+
+enum vxge_hw_status vxge_hw_vpath_mcast_disable(
+       struct __vxge_hw_vpath_handle *vpath_handle);
+
+enum vxge_hw_status vxge_hw_vpath_poll_rx(
+       struct __vxge_hw_ring *ringh);
+
+enum vxge_hw_status vxge_hw_vpath_poll_tx(
+       struct __vxge_hw_fifo *fifoh,
+       void **skb_ptr);
+
+enum vxge_hw_status vxge_hw_vpath_alarm_process(
+       struct __vxge_hw_vpath_handle *vpath_handle,
+       u32 skip_alarms);
+
+enum vxge_hw_status
+vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle,
+                      int *tim_msix_id, int alarm_msix_id);
+
+void
+vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vpath_handle,
+                       int msix_id);
+
+void vxge_hw_device_flush_io(struct __vxge_hw_device *devh);
+
+void
+vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vpath_handle,
+                        int msix_id);
+
+void
+vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vpath_handle,
+                         int msix_id);
+
+void
+vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vpath_handle);
+
+enum vxge_hw_status vxge_hw_vpath_intr_enable(
+                               struct __vxge_hw_vpath_handle *vpath_handle);
+
+enum vxge_hw_status vxge_hw_vpath_intr_disable(
+                               struct __vxge_hw_vpath_handle *vpath_handle);
+
+void vxge_hw_vpath_inta_mask_tx_rx(
+       struct __vxge_hw_vpath_handle *vpath_handle);
+
+void vxge_hw_vpath_inta_unmask_tx_rx(
+       struct __vxge_hw_vpath_handle *vpath_handle);
+
+void
+vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channelh, int msix_id);
+
+void
+vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channelh, int msix_id);
+
+enum vxge_hw_status
+vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh);
+
+void
+vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh);
+
+void
+vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel,
+                                void **dtrh);
+
+void
+vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel);
+
+void
+vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
+
+int
+vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
+
+/* ========================== PRIVATE API ================================= */
+
+enum vxge_hw_status
+__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev);
+
+enum vxge_hw_status
+__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev);
+
+enum vxge_hw_status
+__vxge_hw_device_handle_error(
+               struct __vxge_hw_device *hldev,
+               u32 vp_id,
+               enum vxge_hw_event type);
+
+#endif